repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
jrfastab/Linux-Kernel-QOS | arch/arm/mach-kirkwood/mpp.c | 2616 | 1076 | /*
* arch/arm/mach-kirkwood/mpp.c
*
* MPP functions for Marvell Kirkwood SoCs
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <plat/mpp.h>
#include "common.h"
#include "mpp.h"
static unsigned int __init kirkwood_variant(void)
{
u32 dev, rev;
kirkwood_pcie_id(&dev, &rev);
if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
return MPP_F6281_MASK;
if (dev == MV88F6282_DEV_ID)
return MPP_F6282_MASK;
if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
return MPP_F6192_MASK;
if (dev == MV88F6180_DEV_ID)
return MPP_F6180_MASK;
pr_err("MPP setup: unknown kirkwood variant (dev %#x rev %#x)\n",
dev, rev);
return 0;
}
void __init kirkwood_mpp_conf(unsigned int *mpp_list)
{
orion_mpp_conf(mpp_list, kirkwood_variant(),
MPP_MAX, DEV_BUS_VIRT_BASE);
}
| gpl-2.0 |
johnhubbard/pnotify-linux-3.0.52 | drivers/input/touchscreen/tps6507x-ts.c | 3128 | 8923 | /*
* drivers/input/touchscreen/tps6507x_ts.c
*
* Touchscreen driver for the tps6507x chip.
*
* Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
*
* Credits:
*
* Using code from tsc2007, MtekVision Co., Ltd.
*
* For licencing details see kernel-base/COPYING
*
* TPS65070, TPS65073, TPS650731, and TPS650732 support
* 10 bit touch screen interface.
*/
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/mfd/tps6507x.h>
#include <linux/input/tps6507x-ts.h>
#include <linux/delay.h>
#define TSC_DEFAULT_POLL_PERIOD 30 /* ms */
#define TPS_DEFAULT_MIN_PRESSURE 0x30
#define MAX_10BIT ((1 << 10) - 1)
#define TPS6507X_ADCONFIG_CONVERT_TS (TPS6507X_ADCONFIG_AD_ENABLE | \
TPS6507X_ADCONFIG_START_CONVERSION | \
TPS6507X_ADCONFIG_INPUT_REAL_TSC)
#define TPS6507X_ADCONFIG_POWER_DOWN_TS (TPS6507X_ADCONFIG_INPUT_REAL_TSC)
struct ts_event {
u16 x;
u16 y;
u16 pressure;
};
struct tps6507x_ts {
struct input_dev *input_dev;
struct device *dev;
char phys[32];
struct delayed_work work;
unsigned polling; /* polling is active */
struct ts_event tc;
struct tps6507x_dev *mfd;
u16 model;
unsigned pendown;
int irq;
void (*clear_penirq)(void);
unsigned long poll_period; /* ms */
u16 min_pressure;
int vref; /* non-zero to leave vref on */
};
static int tps6507x_read_u8(struct tps6507x_ts *tsc, u8 reg, u8 *data)
{
int err;
err = tsc->mfd->read_dev(tsc->mfd, reg, 1, data);
if (err)
return err;
return 0;
}
static int tps6507x_write_u8(struct tps6507x_ts *tsc, u8 reg, u8 data)
{
return tsc->mfd->write_dev(tsc->mfd, reg, 1, &data);
}
static s32 tps6507x_adc_conversion(struct tps6507x_ts *tsc,
u8 tsc_mode, u16 *value)
{
s32 ret;
u8 adc_status;
u8 result;
/* Route input signal to A/D converter */
ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE, tsc_mode);
if (ret) {
dev_err(tsc->dev, "TSC mode read failed\n");
goto err;
}
/* Start A/D conversion */
ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
TPS6507X_ADCONFIG_CONVERT_TS);
if (ret) {
dev_err(tsc->dev, "ADC config write failed\n");
return ret;
}
do {
ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADCONFIG,
&adc_status);
if (ret) {
dev_err(tsc->dev, "ADC config read failed\n");
goto err;
}
} while (adc_status & TPS6507X_ADCONFIG_START_CONVERSION);
ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_2, &result);
if (ret) {
dev_err(tsc->dev, "ADC result 2 read failed\n");
goto err;
}
*value = (result & TPS6507X_REG_ADRESULT_2_MASK) << 8;
ret = tps6507x_read_u8(tsc, TPS6507X_REG_ADRESULT_1, &result);
if (ret) {
dev_err(tsc->dev, "ADC result 1 read failed\n");
goto err;
}
*value |= result;
dev_dbg(tsc->dev, "TSC channel %d = 0x%X\n", tsc_mode, *value);
err:
return ret;
}
/* Need to call tps6507x_adc_standby() after using A/D converter for the
* touch screen interrupt to work properly.
*/
static s32 tps6507x_adc_standby(struct tps6507x_ts *tsc)
{
s32 ret;
s32 loops = 0;
u8 val;
ret = tps6507x_write_u8(tsc, TPS6507X_REG_ADCONFIG,
TPS6507X_ADCONFIG_INPUT_TSC);
if (ret)
return ret;
ret = tps6507x_write_u8(tsc, TPS6507X_REG_TSCMODE,
TPS6507X_TSCMODE_STANDBY);
if (ret)
return ret;
ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
if (ret)
return ret;
while (val & TPS6507X_REG_TSC_INT) {
mdelay(10);
ret = tps6507x_read_u8(tsc, TPS6507X_REG_INT, &val);
if (ret)
return ret;
loops++;
}
return ret;
}
static void tps6507x_ts_handler(struct work_struct *work)
{
struct tps6507x_ts *tsc = container_of(work,
struct tps6507x_ts, work.work);
struct input_dev *input_dev = tsc->input_dev;
int pendown;
int schd;
int poll = 0;
s32 ret;
ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_PRESSURE,
&tsc->tc.pressure);
if (ret)
goto done;
pendown = tsc->tc.pressure > tsc->min_pressure;
if (unlikely(!pendown && tsc->pendown)) {
dev_dbg(tsc->dev, "UP\n");
input_report_key(input_dev, BTN_TOUCH, 0);
input_report_abs(input_dev, ABS_PRESSURE, 0);
input_sync(input_dev);
tsc->pendown = 0;
}
if (pendown) {
if (!tsc->pendown) {
dev_dbg(tsc->dev, "DOWN\n");
input_report_key(input_dev, BTN_TOUCH, 1);
} else
dev_dbg(tsc->dev, "still down\n");
ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_X_POSITION,
&tsc->tc.x);
if (ret)
goto done;
ret = tps6507x_adc_conversion(tsc, TPS6507X_TSCMODE_Y_POSITION,
&tsc->tc.y);
if (ret)
goto done;
input_report_abs(input_dev, ABS_X, tsc->tc.x);
input_report_abs(input_dev, ABS_Y, tsc->tc.y);
input_report_abs(input_dev, ABS_PRESSURE, tsc->tc.pressure);
input_sync(input_dev);
tsc->pendown = 1;
poll = 1;
}
done:
/* always poll if not using interrupts */
poll = 1;
if (poll) {
schd = schedule_delayed_work(&tsc->work,
msecs_to_jiffies(tsc->poll_period));
if (schd)
tsc->polling = 1;
else {
tsc->polling = 0;
dev_err(tsc->dev, "re-schedule failed");
}
} else
tsc->polling = 0;
ret = tps6507x_adc_standby(tsc);
}
static int tps6507x_ts_probe(struct platform_device *pdev)
{
int error;
struct tps6507x_ts *tsc;
struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
struct touchscreen_init_data *init_data;
struct input_dev *input_dev;
struct tps6507x_board *tps_board;
int schd;
/**
* tps_board points to pmic related constants
* coming from the board-evm file.
*/
tps_board = (struct tps6507x_board *)tps6507x_dev->dev->platform_data;
if (!tps_board) {
dev_err(tps6507x_dev->dev,
"Could not find tps6507x platform data\n");
return -EIO;
}
/**
* init_data points to array of regulator_init structures
* coming from the board-evm file.
*/
init_data = tps_board->tps6507x_ts_init_data;
tsc = kzalloc(sizeof(struct tps6507x_ts), GFP_KERNEL);
if (!tsc) {
dev_err(tps6507x_dev->dev, "failed to allocate driver data\n");
error = -ENOMEM;
goto err0;
}
tps6507x_dev->ts = tsc;
tsc->mfd = tps6507x_dev;
tsc->dev = tps6507x_dev->dev;
input_dev = input_allocate_device();
if (!input_dev) {
dev_err(tsc->dev, "Failed to allocate input device.\n");
error = -ENOMEM;
goto err1;
}
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, MAX_10BIT, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, MAX_10BIT, 0, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, MAX_10BIT, 0, 0);
input_dev->name = "TPS6507x Touchscreen";
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = tsc->dev;
snprintf(tsc->phys, sizeof(tsc->phys),
"%s/input0", dev_name(tsc->dev));
input_dev->phys = tsc->phys;
dev_dbg(tsc->dev, "device: %s\n", input_dev->phys);
input_set_drvdata(input_dev, tsc);
tsc->input_dev = input_dev;
INIT_DELAYED_WORK(&tsc->work, tps6507x_ts_handler);
if (init_data) {
tsc->poll_period = init_data->poll_period;
tsc->vref = init_data->vref;
tsc->min_pressure = init_data->min_pressure;
input_dev->id.vendor = init_data->vendor;
input_dev->id.product = init_data->product;
input_dev->id.version = init_data->version;
} else {
tsc->poll_period = TSC_DEFAULT_POLL_PERIOD;
tsc->min_pressure = TPS_DEFAULT_MIN_PRESSURE;
}
error = tps6507x_adc_standby(tsc);
if (error)
goto err2;
error = input_register_device(input_dev);
if (error)
goto err2;
schd = schedule_delayed_work(&tsc->work,
msecs_to_jiffies(tsc->poll_period));
if (schd)
tsc->polling = 1;
else {
tsc->polling = 0;
dev_err(tsc->dev, "schedule failed");
goto err2;
}
platform_set_drvdata(pdev, tps6507x_dev);
return 0;
err2:
cancel_delayed_work_sync(&tsc->work);
input_free_device(input_dev);
err1:
kfree(tsc);
tps6507x_dev->ts = NULL;
err0:
return error;
}
static int __devexit tps6507x_ts_remove(struct platform_device *pdev)
{
struct tps6507x_dev *tps6507x_dev = platform_get_drvdata(pdev);
struct tps6507x_ts *tsc = tps6507x_dev->ts;
struct input_dev *input_dev = tsc->input_dev;
cancel_delayed_work_sync(&tsc->work);
input_unregister_device(input_dev);
tps6507x_dev->ts = NULL;
kfree(tsc);
return 0;
}
static struct platform_driver tps6507x_ts_driver = {
.driver = {
.name = "tps6507x-ts",
.owner = THIS_MODULE,
},
.probe = tps6507x_ts_probe,
.remove = __devexit_p(tps6507x_ts_remove),
};
static int __init tps6507x_ts_init(void)
{
return platform_driver_register(&tps6507x_ts_driver);
}
module_init(tps6507x_ts_init);
static void __exit tps6507x_ts_exit(void)
{
platform_driver_unregister(&tps6507x_ts_driver);
}
module_exit(tps6507x_ts_exit);
MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:tps6507x-tsc");
| gpl-2.0 |
Stane1983/kernel-amlogic-mx | arch/powerpc/sysdev/qe_lib/qe_io.c | 3384 | 5573 | /*
* arch/powerpc/sysdev/qe_lib/qe_io.c
*
* QE Parallel I/O ports configuration routines
*
* Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
*
* Author: Li Yang <LeoLi@freescale.com>
* Based on code from Shlomi Gridish <gridish@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <asm/io.h>
#include <asm/qe.h>
#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#undef DEBUG
static struct qe_pio_regs __iomem *par_io;
static int num_par_io_ports = 0;
int par_io_init(struct device_node *np)
{
struct resource res;
int ret;
const u32 *num_ports;
/* Map Parallel I/O ports registers */
ret = of_address_to_resource(np, 0, &res);
if (ret)
return ret;
par_io = ioremap(res.start, res.end - res.start + 1);
num_ports = of_get_property(np, "num-ports", NULL);
if (num_ports)
num_par_io_ports = *num_ports;
return 0;
}
void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
int open_drain, int assignment, int has_irq)
{
u32 pin_mask1bit;
u32 pin_mask2bits;
u32 new_mask2bits;
u32 tmp_val;
/* calculate pin location for single and 2 bits information */
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
/* Set open drain, if required */
tmp_val = in_be32(&par_io->cpodr);
if (open_drain)
out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
else
out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
/* define direction */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
in_be32(&par_io->cpdir2) :
in_be32(&par_io->cpdir1);
/* get all bits mask for 2 bit per port */
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2));
/* Get the final mask we need for the right definition */
new_mask2bits = (u32) (dir << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
out_be32(&par_io->cpdir2,
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
} else {
out_be32(&par_io->cpdir1,
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
}
/* define pin assignment */
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
in_be32(&par_io->cppar2) :
in_be32(&par_io->cppar1);
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
(pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
if (pin > (QE_PIO_PINS / 2) - 1) {
out_be32(&par_io->cppar2,
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
} else {
out_be32(&par_io->cppar1,
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits;
out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
}
}
EXPORT_SYMBOL(__par_io_config_pin);
int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
int assignment, int has_irq)
{
if (!par_io || port >= num_par_io_ports)
return -EINVAL;
__par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
has_irq);
return 0;
}
EXPORT_SYMBOL(par_io_config_pin);
int par_io_data_set(u8 port, u8 pin, u8 val)
{
u32 pin_mask, tmp_val;
if (port >= num_par_io_ports)
return -EINVAL;
if (pin >= QE_PIO_PINS)
return -EINVAL;
/* calculate pin location */
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
tmp_val = in_be32(&par_io[port].cpdata);
if (val == 0) /* clear */
out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
else /* set */
out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
return 0;
}
EXPORT_SYMBOL(par_io_data_set);
int par_io_of_config(struct device_node *np)
{
struct device_node *pio;
const phandle *ph;
int pio_map_len;
const unsigned int *pio_map;
if (par_io == NULL) {
printk(KERN_ERR "par_io not initialized\n");
return -1;
}
ph = of_get_property(np, "pio-handle", NULL);
if (ph == NULL) {
printk(KERN_ERR "pio-handle not available\n");
return -1;
}
pio = of_find_node_by_phandle(*ph);
pio_map = of_get_property(pio, "pio-map", &pio_map_len);
if (pio_map == NULL) {
printk(KERN_ERR "pio-map is not set!\n");
return -1;
}
pio_map_len /= sizeof(unsigned int);
if ((pio_map_len % 6) != 0) {
printk(KERN_ERR "pio-map format wrong!\n");
return -1;
}
while (pio_map_len > 0) {
par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
(int) pio_map[2], (int) pio_map[3],
(int) pio_map[4], (int) pio_map[5]);
pio_map += 6;
pio_map_len -= 6;
}
of_node_put(pio);
return 0;
}
EXPORT_SYMBOL(par_io_of_config);
#ifdef DEBUG
static void dump_par_io(void)
{
unsigned int i;
printk(KERN_INFO "%s: par_io=%p\n", __func__, par_io);
for (i = 0; i < num_par_io_ports; i++) {
printk(KERN_INFO " cpodr[%u]=%08x\n", i,
in_be32(&par_io[i].cpodr));
printk(KERN_INFO " cpdata[%u]=%08x\n", i,
in_be32(&par_io[i].cpdata));
printk(KERN_INFO " cpdir1[%u]=%08x\n", i,
in_be32(&par_io[i].cpdir1));
printk(KERN_INFO " cpdir2[%u]=%08x\n", i,
in_be32(&par_io[i].cpdir2));
printk(KERN_INFO " cppar1[%u]=%08x\n", i,
in_be32(&par_io[i].cppar1));
printk(KERN_INFO " cppar2[%u]=%08x\n", i,
in_be32(&par_io[i].cppar2));
}
}
EXPORT_SYMBOL(dump_par_io);
#endif /* DEBUG */
| gpl-2.0 |
allenway/PIS-kernel | drivers/media/video/tlg2300/pd-radio.c | 3640 | 10389 | #include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <media/v4l2-dev.h>
#include <linux/version.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <media/v4l2-ioctl.h>
#include <linux/sched.h>
#include "pd-common.h"
#include "vendorcmds.h"
static int set_frequency(struct poseidon *p, __u32 frequency);
static int poseidon_fm_close(struct file *filp);
static int poseidon_fm_open(struct file *filp);
#define TUNER_FREQ_MIN_FM 76000000
#define TUNER_FREQ_MAX_FM 108000000
#define MAX_PREEMPHASIS (V4L2_PREEMPHASIS_75_uS + 1)
static int preemphasis[MAX_PREEMPHASIS] = {
TLG_TUNE_ASTD_NONE, /* V4L2_PREEMPHASIS_DISABLED */
TLG_TUNE_ASTD_FM_EUR, /* V4L2_PREEMPHASIS_50_uS */
TLG_TUNE_ASTD_FM_US, /* V4L2_PREEMPHASIS_75_uS */
};
static int poseidon_check_mode_radio(struct poseidon *p)
{
int ret;
u32 status;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/2);
ret = usb_set_interface(p->udev, 0, BULK_ALTERNATE_IFACE);
if (ret < 0)
goto out;
ret = set_tuner_mode(p, TLG_MODE_FM_RADIO);
if (ret != 0)
goto out;
ret = send_set_req(p, SGNL_SRC_SEL, TLG_SIG_SRC_ANTENNA, &status);
ret = send_set_req(p, TUNER_AUD_ANA_STD,
p->radio_data.pre_emphasis, &status);
ret |= send_set_req(p, TUNER_AUD_MODE,
TLG_TUNE_TVAUDIO_MODE_STEREO, &status);
ret |= send_set_req(p, AUDIO_SAMPLE_RATE_SEL,
ATV_AUDIO_RATE_48K, &status);
ret |= send_set_req(p, TUNE_FREQ_SELECT, TUNER_FREQ_MIN_FM, &status);
out:
return ret;
}
#ifdef CONFIG_PM
static int pm_fm_suspend(struct poseidon *p)
{
logpm(p);
pm_alsa_suspend(p);
usb_set_interface(p->udev, 0, 0);
msleep(300);
return 0;
}
static int pm_fm_resume(struct poseidon *p)
{
logpm(p);
poseidon_check_mode_radio(p);
set_frequency(p, p->radio_data.fm_freq);
pm_alsa_resume(p);
return 0;
}
#endif
static int poseidon_fm_open(struct file *filp)
{
struct video_device *vfd = video_devdata(filp);
struct poseidon *p = video_get_drvdata(vfd);
int ret = 0;
if (!p)
return -1;
mutex_lock(&p->lock);
if (p->state & POSEIDON_STATE_DISCONNECT) {
ret = -ENODEV;
goto out;
}
if (p->state && !(p->state & POSEIDON_STATE_FM)) {
ret = -EBUSY;
goto out;
}
usb_autopm_get_interface(p->interface);
if (0 == p->state) {
/* default pre-emphasis */
if (p->radio_data.pre_emphasis == 0)
p->radio_data.pre_emphasis = TLG_TUNE_ASTD_FM_EUR;
set_debug_mode(vfd, debug_mode);
ret = poseidon_check_mode_radio(p);
if (ret < 0) {
usb_autopm_put_interface(p->interface);
goto out;
}
p->state |= POSEIDON_STATE_FM;
}
p->radio_data.users++;
kref_get(&p->kref);
filp->private_data = p;
out:
mutex_unlock(&p->lock);
return ret;
}
static int poseidon_fm_close(struct file *filp)
{
struct poseidon *p = filp->private_data;
struct radio_data *fm = &p->radio_data;
uint32_t status;
mutex_lock(&p->lock);
fm->users--;
if (0 == fm->users)
p->state &= ~POSEIDON_STATE_FM;
if (fm->is_radio_streaming && filp == p->file_for_stream) {
fm->is_radio_streaming = 0;
send_set_req(p, PLAY_SERVICE, TLG_TUNE_PLAY_SVC_STOP, &status);
}
usb_autopm_put_interface(p->interface);
mutex_unlock(&p->lock);
kref_put(&p->kref, poseidon_delete);
filp->private_data = NULL;
return 0;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
struct poseidon *p = file->private_data;
strlcpy(v->driver, "tele-radio", sizeof(v->driver));
strlcpy(v->card, "Telegent Poseidon", sizeof(v->card));
usb_make_path(p->udev, v->bus_info, sizeof(v->bus_info));
v->version = KERNEL_VERSION(0, 0, 1);
v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
return 0;
}
static const struct v4l2_file_operations poseidon_fm_fops = {
.owner = THIS_MODULE,
.open = poseidon_fm_open,
.release = poseidon_fm_close,
.ioctl = video_ioctl2,
};
static int tlg_fm_vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *vt)
{
struct tuner_fm_sig_stat_s fm_stat = {};
int ret, status, count = 5;
struct poseidon *p = file->private_data;
if (vt->index != 0)
return -EINVAL;
vt->type = V4L2_TUNER_RADIO;
vt->capability = V4L2_TUNER_CAP_STEREO;
vt->rangelow = TUNER_FREQ_MIN_FM / 62500;
vt->rangehigh = TUNER_FREQ_MAX_FM / 62500;
vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
vt->audmode = V4L2_TUNER_MODE_STEREO;
vt->signal = 0;
vt->afc = 0;
mutex_lock(&p->lock);
ret = send_get_req(p, TUNER_STATUS, TLG_MODE_FM_RADIO,
&fm_stat, &status, sizeof(fm_stat));
while (fm_stat.sig_lock_busy && count-- && !ret) {
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
ret = send_get_req(p, TUNER_STATUS, TLG_MODE_FM_RADIO,
&fm_stat, &status, sizeof(fm_stat));
}
mutex_unlock(&p->lock);
if (ret || status) {
vt->signal = 0;
} else if ((fm_stat.sig_present || fm_stat.sig_locked)
&& fm_stat.sig_strength == 0) {
vt->signal = 0xffff;
} else
vt->signal = (fm_stat.sig_strength * 255 / 10) << 8;
return 0;
}
static int fm_get_freq(struct file *file, void *priv,
struct v4l2_frequency *argp)
{
struct poseidon *p = file->private_data;
argp->frequency = p->radio_data.fm_freq;
return 0;
}
static int set_frequency(struct poseidon *p, __u32 frequency)
{
__u32 freq ;
int ret, status;
mutex_lock(&p->lock);
ret = send_set_req(p, TUNER_AUD_ANA_STD,
p->radio_data.pre_emphasis, &status);
freq = (frequency * 125) * 500 / 1000;/* kHZ */
if (freq < TUNER_FREQ_MIN_FM/1000 || freq > TUNER_FREQ_MAX_FM/1000) {
ret = -EINVAL;
goto error;
}
ret = send_set_req(p, TUNE_FREQ_SELECT, freq, &status);
if (ret < 0)
goto error ;
ret = send_set_req(p, TAKE_REQUEST, 0, &status);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ/4);
if (!p->radio_data.is_radio_streaming) {
ret = send_set_req(p, TAKE_REQUEST, 0, &status);
ret = send_set_req(p, PLAY_SERVICE,
TLG_TUNE_PLAY_SVC_START, &status);
p->radio_data.is_radio_streaming = 1;
}
p->radio_data.fm_freq = frequency;
error:
mutex_unlock(&p->lock);
return ret;
}
static int fm_set_freq(struct file *file, void *priv,
struct v4l2_frequency *argp)
{
struct poseidon *p = file->private_data;
p->file_for_stream = file;
#ifdef CONFIG_PM
p->pm_suspend = pm_fm_suspend;
p->pm_resume = pm_fm_resume;
#endif
return set_frequency(p, argp->frequency);
}
static int tlg_fm_vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *arg)
{
return 0;
}
static int tlg_fm_vidioc_g_exts_ctrl(struct file *file, void *fh,
struct v4l2_ext_controls *ctrls)
{
struct poseidon *p = file->private_data;
int i;
if (ctrls->ctrl_class != V4L2_CTRL_CLASS_FM_TX)
return -EINVAL;
for (i = 0; i < ctrls->count; i++) {
struct v4l2_ext_control *ctrl = ctrls->controls + i;
if (ctrl->id != V4L2_CID_TUNE_PREEMPHASIS)
continue;
if (i < MAX_PREEMPHASIS)
ctrl->value = p->radio_data.pre_emphasis;
}
return 0;
}
static int tlg_fm_vidioc_s_exts_ctrl(struct file *file, void *fh,
struct v4l2_ext_controls *ctrls)
{
int i;
if (ctrls->ctrl_class != V4L2_CTRL_CLASS_FM_TX)
return -EINVAL;
for (i = 0; i < ctrls->count; i++) {
struct v4l2_ext_control *ctrl = ctrls->controls + i;
if (ctrl->id != V4L2_CID_TUNE_PREEMPHASIS)
continue;
if (ctrl->value >= 0 && ctrl->value < MAX_PREEMPHASIS) {
struct poseidon *p = file->private_data;
int pre_emphasis = preemphasis[ctrl->value];
u32 status;
send_set_req(p, TUNER_AUD_ANA_STD,
pre_emphasis, &status);
p->radio_data.pre_emphasis = pre_emphasis;
}
}
return 0;
}
static int tlg_fm_vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
return 0;
}
static int tlg_fm_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *ctrl)
{
if (!(ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL))
return -EINVAL;
ctrl->id &= ~V4L2_CTRL_FLAG_NEXT_CTRL;
if (ctrl->id != V4L2_CID_TUNE_PREEMPHASIS) {
/* return the next supported control */
ctrl->id = V4L2_CID_TUNE_PREEMPHASIS;
v4l2_ctrl_query_fill(ctrl, V4L2_PREEMPHASIS_DISABLED,
V4L2_PREEMPHASIS_75_uS, 1,
V4L2_PREEMPHASIS_50_uS);
ctrl->flags = V4L2_CTRL_FLAG_UPDATE;
return 0;
}
return -EINVAL;
}
static int tlg_fm_vidioc_querymenu(struct file *file, void *fh,
struct v4l2_querymenu *qmenu)
{
return v4l2_ctrl_query_menu(qmenu, NULL, NULL);
}
static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *vt)
{
return vt->index > 0 ? -EINVAL : 0;
}
static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *va)
{
return (va->index != 0) ? -EINVAL : 0;
}
static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
{
a->index = 0;
a->mode = 0;
a->capability = V4L2_AUDCAP_STEREO;
strcpy(a->name, "Radio");
return 0;
}
static int vidioc_s_input(struct file *filp, void *priv, u32 i)
{
return (i != 0) ? -EINVAL : 0;
}
static int vidioc_g_input(struct file *filp, void *priv, u32 *i)
{
return (*i != 0) ? -EINVAL : 0;
}
static const struct v4l2_ioctl_ops poseidon_fm_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_queryctrl = tlg_fm_vidioc_queryctrl,
.vidioc_querymenu = tlg_fm_vidioc_querymenu,
.vidioc_g_ctrl = tlg_fm_vidioc_g_ctrl,
.vidioc_s_ctrl = tlg_fm_vidioc_s_ctrl,
.vidioc_s_ext_ctrls = tlg_fm_vidioc_s_exts_ctrl,
.vidioc_g_ext_ctrls = tlg_fm_vidioc_g_exts_ctrl,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_tuner = tlg_fm_vidioc_g_tuner,
.vidioc_g_frequency = fm_get_freq,
.vidioc_s_frequency = fm_set_freq,
};
static struct video_device poseidon_fm_template = {
.name = "Telegent-Radio",
.fops = &poseidon_fm_fops,
.minor = -1,
.release = video_device_release,
.ioctl_ops = &poseidon_fm_ioctl_ops,
};
int poseidon_fm_init(struct poseidon *p)
{
struct video_device *fm_dev;
fm_dev = vdev_init(p, &poseidon_fm_template);
if (fm_dev == NULL)
return -1;
if (video_register_device(fm_dev, VFL_TYPE_RADIO, -1) < 0) {
video_device_release(fm_dev);
return -1;
}
p->radio_data.fm_dev = fm_dev;
return 0;
}
int poseidon_fm_exit(struct poseidon *p)
{
destroy_video_device(&p->radio_data.fm_dev);
return 0;
}
| gpl-2.0 |
Hacker432-Y550/android_kernel_huawei_msm8916 | fs/reiserfs/stree.c | 3896 | 65952 | /*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
/*
* Written by Anatoly P. Pinchuk pap@namesys.botik.ru
* Programm System Institute
* Pereslavl-Zalessky Russia
*/
/*
* This file contains functions dealing with S+tree
*
* B_IS_IN_TREE
* copy_item_head
* comp_short_keys
* comp_keys
* comp_short_le_keys
* le_key2cpu_key
* comp_le_keys
* bin_search
* get_lkey
* get_rkey
* key_in_buffer
* decrement_bcount
* reiserfs_check_path
* pathrelse_and_restore
* pathrelse
* search_by_key_reada
* search_by_key
* search_for_position_by_key
* comp_items
* prepare_for_direct_item
* prepare_for_direntry_item
* prepare_for_delete_or_cut
* calc_deleted_bytes_number
* init_tb_struct
* padd_item
* reiserfs_delete_item
* reiserfs_delete_solid_item
* reiserfs_delete_object
* maybe_indirect_to_direct
* indirect_to_direct_roll_back
* reiserfs_cut_from_item
* truncate_directory
* reiserfs_do_truncate
* reiserfs_paste_into_item
* reiserfs_insert_item
*/
#include <linux/time.h>
#include <linux/string.h>
#include <linux/pagemap.h>
#include "reiserfs.h"
#include <linux/buffer_head.h>
#include <linux/quotaops.h>
/* Does the buffer contain a disk block which is in the tree. */
inline int B_IS_IN_TREE(const struct buffer_head *bh)
{
RFALSE(B_LEVEL(bh) > MAX_HEIGHT,
"PAP-1010: block (%b) has too big level (%z)", bh, bh);
return (B_LEVEL(bh) != FREE_LEVEL);
}
//
// to gets item head in le form
//
inline void copy_item_head(struct item_head *to,
const struct item_head *from)
{
memcpy(to, from, IH_SIZE);
}
/* k1 is pointer to on-disk structure which is stored in little-endian
form. k2 is pointer to cpu variable. For key of items of the same
object this returns 0.
Returns: -1 if key1 < key2
0 if key1 == key2
1 if key1 > key2 */
inline int comp_short_keys(const struct reiserfs_key *le_key,
const struct cpu_key *cpu_key)
{
__u32 n;
n = le32_to_cpu(le_key->k_dir_id);
if (n < cpu_key->on_disk_key.k_dir_id)
return -1;
if (n > cpu_key->on_disk_key.k_dir_id)
return 1;
n = le32_to_cpu(le_key->k_objectid);
if (n < cpu_key->on_disk_key.k_objectid)
return -1;
if (n > cpu_key->on_disk_key.k_objectid)
return 1;
return 0;
}
/* k1 is pointer to on-disk structure which is stored in little-endian
form. k2 is pointer to cpu variable.
Compare keys using all 4 key fields.
Returns: -1 if key1 < key2 0
if key1 = key2 1 if key1 > key2 */
static inline int comp_keys(const struct reiserfs_key *le_key,
const struct cpu_key *cpu_key)
{
int retval;
retval = comp_short_keys(le_key, cpu_key);
if (retval)
return retval;
if (le_key_k_offset(le_key_version(le_key), le_key) <
cpu_key_k_offset(cpu_key))
return -1;
if (le_key_k_offset(le_key_version(le_key), le_key) >
cpu_key_k_offset(cpu_key))
return 1;
if (cpu_key->key_length == 3)
return 0;
/* this part is needed only when tail conversion is in progress */
if (le_key_k_type(le_key_version(le_key), le_key) <
cpu_key_k_type(cpu_key))
return -1;
if (le_key_k_type(le_key_version(le_key), le_key) >
cpu_key_k_type(cpu_key))
return 1;
return 0;
}
inline int comp_short_le_keys(const struct reiserfs_key *key1,
const struct reiserfs_key *key2)
{
__u32 *k1_u32, *k2_u32;
int key_length = REISERFS_SHORT_KEY_LEN;
k1_u32 = (__u32 *) key1;
k2_u32 = (__u32 *) key2;
for (; key_length--; ++k1_u32, ++k2_u32) {
if (le32_to_cpu(*k1_u32) < le32_to_cpu(*k2_u32))
return -1;
if (le32_to_cpu(*k1_u32) > le32_to_cpu(*k2_u32))
return 1;
}
return 0;
}
inline void le_key2cpu_key(struct cpu_key *to, const struct reiserfs_key *from)
{
int version;
to->on_disk_key.k_dir_id = le32_to_cpu(from->k_dir_id);
to->on_disk_key.k_objectid = le32_to_cpu(from->k_objectid);
// find out version of the key
version = le_key_version(from);
to->version = version;
to->on_disk_key.k_offset = le_key_k_offset(version, from);
to->on_disk_key.k_type = le_key_k_type(version, from);
}
// this does not say which one is bigger, it only returns 1 if keys
// are not equal, 0 otherwise
inline int comp_le_keys(const struct reiserfs_key *k1,
const struct reiserfs_key *k2)
{
return memcmp(k1, k2, sizeof(struct reiserfs_key));
}
/**************************************************************************
* Binary search toolkit function *
* Search for an item in the array by the item key *
* Returns: 1 if found, 0 if not found; *
* *pos = number of the searched element if found, else the *
* number of the first element that is larger than key. *
**************************************************************************/
/* For those not familiar with binary search: lbound is the leftmost item that it
could be, rbound the rightmost item that it could be. We examine the item
halfway between lbound and rbound, and that tells us either that we can increase
lbound, or decrease rbound, or that we have found it, or if lbound <= rbound that
there are no possible items, and we have not found it. With each examination we
cut the number of possible items it could be by one more than half rounded down,
or we find it. */
static inline int bin_search(const void *key, /* Key to search for. */
const void *base, /* First item in the array. */
int num, /* Number of items in the array. */
int width, /* Item size in the array.
searched. Lest the reader be
confused, note that this is crafted
as a general function, and when it
is applied specifically to the array
of item headers in a node, width
is actually the item header size not
the item size. */
int *pos /* Number of the searched for element. */
)
{
int rbound, lbound, j;
for (j = ((rbound = num - 1) + (lbound = 0)) / 2;
lbound <= rbound; j = (rbound + lbound) / 2)
switch (comp_keys
((struct reiserfs_key *)((char *)base + j * width),
(struct cpu_key *)key)) {
case -1:
lbound = j + 1;
continue;
case 1:
rbound = j - 1;
continue;
case 0:
*pos = j;
return ITEM_FOUND; /* Key found in the array. */
}
/* bin_search did not find given key, it returns position of key,
that is minimal and greater than the given one. */
*pos = lbound;
return ITEM_NOT_FOUND;
}
/* Minimal possible key. It is never in the tree. */
const struct reiserfs_key MIN_KEY = { 0, 0, {{0, 0},} };
/* Maximal possible key. It is never in the tree. */
static const struct reiserfs_key MAX_KEY = {
__constant_cpu_to_le32(0xffffffff),
__constant_cpu_to_le32(0xffffffff),
{{__constant_cpu_to_le32(0xffffffff),
__constant_cpu_to_le32(0xffffffff)},}
};
/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
of the path, and going upwards. We must check the path's validity at each step. If the key is not in
the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
case we return a special key, either MIN_KEY or MAX_KEY. */
static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path,
const struct super_block *sb)
{
int position, path_offset = chk_path->path_length;
struct buffer_head *parent;
RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET,
"PAP-5010: invalid offset in the path");
/* While not higher in path than first element. */
while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
RFALSE(!buffer_uptodate
(PATH_OFFSET_PBUFFER(chk_path, path_offset)),
"PAP-5020: parent is not uptodate");
/* Parent at the path is not in the tree now. */
if (!B_IS_IN_TREE
(parent =
PATH_OFFSET_PBUFFER(chk_path, path_offset)))
return &MAX_KEY;
/* Check whether position in the parent is correct. */
if ((position =
PATH_OFFSET_POSITION(chk_path,
path_offset)) >
B_NR_ITEMS(parent))
return &MAX_KEY;
/* Check whether parent at the path really points to the child. */
if (B_N_CHILD_NUM(parent, position) !=
PATH_OFFSET_PBUFFER(chk_path,
path_offset + 1)->b_blocknr)
return &MAX_KEY;
/* Return delimiting key if position in the parent is not equal to zero. */
if (position)
return B_N_PDELIM_KEY(parent, position - 1);
}
/* Return MIN_KEY if we are in the root of the buffer tree. */
if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
b_blocknr == SB_ROOT_BLOCK(sb))
return &MIN_KEY;
return &MAX_KEY;
}
/* Get delimiting key of the buffer at the path and its right neighbor. */
inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
const struct super_block *sb)
{
int position, path_offset = chk_path->path_length;
struct buffer_head *parent;
RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET,
"PAP-5030: invalid offset in the path");
while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
RFALSE(!buffer_uptodate
(PATH_OFFSET_PBUFFER(chk_path, path_offset)),
"PAP-5040: parent is not uptodate");
/* Parent at the path is not in the tree now. */
if (!B_IS_IN_TREE
(parent =
PATH_OFFSET_PBUFFER(chk_path, path_offset)))
return &MIN_KEY;
/* Check whether position in the parent is correct. */
if ((position =
PATH_OFFSET_POSITION(chk_path,
path_offset)) >
B_NR_ITEMS(parent))
return &MIN_KEY;
/* Check whether parent at the path really points to the child. */
if (B_N_CHILD_NUM(parent, position) !=
PATH_OFFSET_PBUFFER(chk_path,
path_offset + 1)->b_blocknr)
return &MIN_KEY;
/* Return delimiting key if position in the parent is not the last one. */
if (position != B_NR_ITEMS(parent))
return B_N_PDELIM_KEY(parent, position);
}
/* Return MAX_KEY if we are in the root of the buffer tree. */
if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
b_blocknr == SB_ROOT_BLOCK(sb))
return &MAX_KEY;
return &MIN_KEY;
}
/* Check whether a key is contained in the tree rooted from a buffer at a path. */
/* This works by looking at the left and right delimiting keys for the buffer in the last path_element in
the path. These delimiting keys are stored at least one level above that buffer in the tree. If the
buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in
this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */
static inline int key_in_buffer(struct treepath *chk_path, /* Path which should be checked. */
const struct cpu_key *key, /* Key which should be checked. */
struct super_block *sb
)
{
RFALSE(!key || chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET
|| chk_path->path_length > MAX_HEIGHT,
"PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)",
key, chk_path->path_length);
RFALSE(!PATH_PLAST_BUFFER(chk_path)->b_bdev,
"PAP-5060: device must not be NODEV");
if (comp_keys(get_lkey(chk_path, sb), key) == 1)
/* left delimiting key is bigger, that the key we look for */
return 0;
/* if ( comp_keys(key, get_rkey(chk_path, sb)) != -1 ) */
if (comp_keys(get_rkey(chk_path, sb), key) != 1)
/* key must be less than right delimitiing key */
return 0;
return 1;
}
int reiserfs_check_path(struct treepath *p)
{
RFALSE(p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET,
"path not properly relsed");
return 0;
}
/* Drop the reference to each buffer in a path and restore
* dirty bits clean when preparing the buffer for the log.
* This version should only be called from fix_nodes() */
void pathrelse_and_restore(struct super_block *sb,
struct treepath *search_path)
{
int path_offset = search_path->path_length;
RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
"clm-4000: invalid path offset");
while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) {
struct buffer_head *bh;
bh = PATH_OFFSET_PBUFFER(search_path, path_offset--);
reiserfs_restore_prepared_buffer(sb, bh);
brelse(bh);
}
search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
}
/* Drop the reference to each buffer in a path */
void pathrelse(struct treepath *search_path)
{
int path_offset = search_path->path_length;
RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
"PAP-5090: invalid path offset");
while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET)
brelse(PATH_OFFSET_PBUFFER(search_path, path_offset--));
search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
}
static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
{
struct block_head *blkh;
struct item_head *ih;
int used_space;
int prev_location;
int i;
int nr;
blkh = (struct block_head *)buf;
if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) {
reiserfs_warning(NULL, "reiserfs-5080",
"this should be caught earlier");
return 0;
}
nr = blkh_nr_item(blkh);
if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) {
/* item number is too big or too small */
reiserfs_warning(NULL, "reiserfs-5081",
"nr_item seems wrong: %z", bh);
return 0;
}
ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1;
used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih));
if (used_space != blocksize - blkh_free_space(blkh)) {
/* free space does not match to calculated amount of use space */
reiserfs_warning(NULL, "reiserfs-5082",
"free space seems wrong: %z", bh);
return 0;
}
// FIXME: it is_leaf will hit performance too much - we may have
// return 1 here
/* check tables of item heads */
ih = (struct item_head *)(buf + BLKH_SIZE);
prev_location = blocksize;
for (i = 0; i < nr; i++, ih++) {
if (le_ih_k_type(ih) == TYPE_ANY) {
reiserfs_warning(NULL, "reiserfs-5083",
"wrong item type for item %h",
ih);
return 0;
}
if (ih_location(ih) >= blocksize
|| ih_location(ih) < IH_SIZE * nr) {
reiserfs_warning(NULL, "reiserfs-5084",
"item location seems wrong: %h",
ih);
return 0;
}
if (ih_item_len(ih) < 1
|| ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) {
reiserfs_warning(NULL, "reiserfs-5085",
"item length seems wrong: %h",
ih);
return 0;
}
if (prev_location - ih_location(ih) != ih_item_len(ih)) {
reiserfs_warning(NULL, "reiserfs-5086",
"item location seems wrong "
"(second one): %h", ih);
return 0;
}
prev_location = ih_location(ih);
}
// one may imagine much more checks
return 1;
}
/* returns 1 if buf looks like an internal node, 0 otherwise */
static int is_internal(char *buf, int blocksize, struct buffer_head *bh)
{
struct block_head *blkh;
int nr;
int used_space;
blkh = (struct block_head *)buf;
nr = blkh_level(blkh);
if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) {
/* this level is not possible for internal nodes */
reiserfs_warning(NULL, "reiserfs-5087",
"this should be caught earlier");
return 0;
}
nr = blkh_nr_item(blkh);
if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) {
/* for internal which is not root we might check min number of keys */
reiserfs_warning(NULL, "reiserfs-5088",
"number of key seems wrong: %z", bh);
return 0;
}
used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1);
if (used_space != blocksize - blkh_free_space(blkh)) {
reiserfs_warning(NULL, "reiserfs-5089",
"free space seems wrong: %z", bh);
return 0;
}
// one may imagine much more checks
return 1;
}
// make sure that bh contains formatted node of reiserfs tree of
// 'level'-th level
static int is_tree_node(struct buffer_head *bh, int level)
{
if (B_LEVEL(bh) != level) {
reiserfs_warning(NULL, "reiserfs-5090", "node level %d does "
"not match to the expected one %d",
B_LEVEL(bh), level);
return 0;
}
if (level == DISK_LEAF_NODE_LEVEL)
return is_leaf(bh->b_data, bh->b_size, bh);
return is_internal(bh->b_data, bh->b_size, bh);
}
#define SEARCH_BY_KEY_READA 16
/*
* The function is NOT SCHEDULE-SAFE!
* It might unlock the write lock if we needed to wait for a block
* to be read. Note that in this case it won't recover the lock to avoid
* high contention resulting from too much lock requests, especially
* the caller (search_by_key) will perform other schedule-unsafe
* operations just after calling this function.
*
* @return true if we have unlocked
*/
static bool search_by_key_reada(struct super_block *s,
struct buffer_head **bh,
b_blocknr_t *b, int num)
{
int i, j;
bool unlocked = false;
for (i = 0; i < num; i++) {
bh[i] = sb_getblk(s, b[i]);
}
/*
* We are going to read some blocks on which we
* have a reference. It's safe, though we might be
* reading blocks concurrently changed if we release
* the lock. But it's still fine because we check later
* if the tree changed
*/
for (j = 0; j < i; j++) {
/*
* note, this needs attention if we are getting rid of the BKL
* you have to make sure the prepared bit isn't set on this buffer
*/
if (!buffer_uptodate(bh[j])) {
if (!unlocked) {
reiserfs_write_unlock(s);
unlocked = true;
}
ll_rw_block(READA, 1, bh + j);
}
brelse(bh[j]);
}
return unlocked;
}
/**************************************************************************
* Algorithm SearchByKey *
* look for item in the Disk S+Tree by its key *
* Input: sb - super block *
* key - pointer to the key to search *
* Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR *
* search_path - path from the root to the needed leaf *
**************************************************************************/
/* This function fills up the path from the root to the leaf as it
descends the tree looking for the key. It uses reiserfs_bread to
try to find buffers in the cache given their block number. If it
does not find them in the cache it reads them from disk. For each
node search_by_key finds using reiserfs_bread it then uses
bin_search to look through that node. bin_search will find the
position of the block_number of the next node if it is looking
through an internal node. If it is looking through a leaf node
bin_search will find the position of the item which has key either
equal to given key, or which is the maximal key less than the given
key. search_by_key returns a path that must be checked for the
correctness of the top of the path but need not be checked for the
correctness of the bottom of the path */
/* The function is NOT SCHEDULE-SAFE! */
int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to search. */
struct treepath *search_path,/* This structure was
allocated and initialized
by the calling
function. It is filled up
by this function. */
int stop_level /* How far down the tree to search. To
stop at leaf level - set to
DISK_LEAF_NODE_LEVEL */
)
{
b_blocknr_t block_number;
int expected_level;
struct buffer_head *bh;
struct path_element *last_element;
int node_level, retval;
int right_neighbor_of_leaf_node;
int fs_gen;
struct buffer_head *reada_bh[SEARCH_BY_KEY_READA];
b_blocknr_t reada_blocks[SEARCH_BY_KEY_READA];
int reada_count = 0;
#ifdef CONFIG_REISERFS_CHECK
int repeat_counter = 0;
#endif
PROC_INFO_INC(sb, search_by_key);
/* As we add each node to a path we increase its count. This means that
we must be careful to release all nodes in a path before we either
discard the path struct or re-use the path struct, as we do here. */
pathrelse(search_path);
right_neighbor_of_leaf_node = 0;
/* With each iteration of this loop we search through the items in the
current node, and calculate the next current node(next path element)
for the next iteration of this loop.. */
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
while (1) {
#ifdef CONFIG_REISERFS_CHECK
if (!(++repeat_counter % 50000))
reiserfs_warning(sb, "PAP-5100",
"%s: there were %d iterations of "
"while loop looking for key %K",
current->comm, repeat_counter,
key);
#endif
/* prep path to have another element added to it. */
last_element =
PATH_OFFSET_PELEMENT(search_path,
++search_path->path_length);
fs_gen = get_generation(sb);
/* Read the next tree node, and set the last element in the path to
have a pointer to it. */
if ((bh = last_element->pe_buffer =
sb_getblk(sb, block_number))) {
bool unlocked = false;
if (!buffer_uptodate(bh) && reada_count > 1)
/* may unlock the write lock */
unlocked = search_by_key_reada(sb, reada_bh,
reada_blocks, reada_count);
/*
* If we haven't already unlocked the write lock,
* then we need to do that here before reading
* the current block
*/
if (!buffer_uptodate(bh) && !unlocked) {
reiserfs_write_unlock(sb);
unlocked = true;
}
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
if (unlocked)
reiserfs_write_lock(sb);
if (!buffer_uptodate(bh))
goto io_error;
} else {
io_error:
search_path->path_length--;
pathrelse(search_path);
return IO_ERROR;
}
reada_count = 0;
if (expected_level == -1)
expected_level = SB_TREE_HEIGHT(sb);
expected_level--;
/* It is possible that schedule occurred. We must check whether the key
to search is still in the tree rooted from the current buffer. If
not then repeat search from the root. */
if (fs_changed(fs_gen, sb) &&
(!B_IS_IN_TREE(bh) ||
B_LEVEL(bh) != expected_level ||
!key_in_buffer(search_path, key, sb))) {
PROC_INFO_INC(sb, search_by_key_fs_changed);
PROC_INFO_INC(sb, search_by_key_restarted);
PROC_INFO_INC(sb,
sbk_restarted[expected_level - 1]);
pathrelse(search_path);
/* Get the root block number so that we can repeat the search
starting from the root. */
block_number = SB_ROOT_BLOCK(sb);
expected_level = -1;
right_neighbor_of_leaf_node = 0;
/* repeat search from the root */
continue;
}
/* only check that the key is in the buffer if key is not
equal to the MAX_KEY. Latter case is only possible in
"finish_unfinished()" processing during mount. */
RFALSE(comp_keys(&MAX_KEY, key) &&
!key_in_buffer(search_path, key, sb),
"PAP-5130: key is not in the buffer");
#ifdef CONFIG_REISERFS_CHECK
if (REISERFS_SB(sb)->cur_tb) {
print_cur_tb("5140");
reiserfs_panic(sb, "PAP-5140",
"schedule occurred in do_balance!");
}
#endif
// make sure, that the node contents look like a node of
// certain level
if (!is_tree_node(bh, expected_level)) {
reiserfs_error(sb, "vs-5150",
"invalid format found in block %ld. "
"Fsck?", bh->b_blocknr);
pathrelse(search_path);
return IO_ERROR;
}
/* ok, we have acquired next formatted node in the tree */
node_level = B_LEVEL(bh);
PROC_INFO_BH_STAT(sb, bh, node_level - 1);
RFALSE(node_level < stop_level,
"vs-5152: tree level (%d) is less than stop level (%d)",
node_level, stop_level);
retval = bin_search(key, B_N_PITEM_HEAD(bh, 0),
B_NR_ITEMS(bh),
(node_level ==
DISK_LEAF_NODE_LEVEL) ? IH_SIZE :
KEY_SIZE,
&(last_element->pe_position));
if (node_level == stop_level) {
return retval;
}
/* we are not in the stop level */
if (retval == ITEM_FOUND)
/* item has been found, so we choose the pointer which is to the right of the found one */
last_element->pe_position++;
/* if item was not found we choose the position which is to
the left of the found item. This requires no code,
bin_search did it already. */
/* So we have chosen a position in the current node which is
an internal node. Now we calculate child block number by
position in the node. */
block_number =
B_N_CHILD_NUM(bh, last_element->pe_position);
/* if we are going to read leaf nodes, try for read ahead as well */
if ((search_path->reada & PATH_READA) &&
node_level == DISK_LEAF_NODE_LEVEL + 1) {
int pos = last_element->pe_position;
int limit = B_NR_ITEMS(bh);
struct reiserfs_key *le_key;
if (search_path->reada & PATH_READA_BACK)
limit = 0;
while (reada_count < SEARCH_BY_KEY_READA) {
if (pos == limit)
break;
reada_blocks[reada_count++] =
B_N_CHILD_NUM(bh, pos);
if (search_path->reada & PATH_READA_BACK)
pos--;
else
pos++;
/*
* check to make sure we're in the same object
*/
le_key = B_N_PDELIM_KEY(bh, pos);
if (le32_to_cpu(le_key->k_objectid) !=
key->on_disk_key.k_objectid) {
break;
}
}
}
}
}
/* Form the path to an item and position in this item which contains
file byte defined by key. If there is no such item
corresponding to the key, we point the path to the item with
maximal key less than key, and *pos_in_item is set to one
past the last entry/byte in the item. If searching for entry in a
directory item, and it is not found, *pos_in_item is set to one
entry more than the entry with maximal key which is less than the
sought key.
Note that if there is no entry in this same node which is one more,
then we point to an imaginary entry. for direct items, the
position is in units of bytes, for indirect items the position is
in units of blocknr entries, for directory items the position is in
units of directory entries. */
/* The function is NOT SCHEDULE-SAFE! */
int search_for_position_by_key(struct super_block *sb, /* Pointer to the super block. */
const struct cpu_key *p_cpu_key, /* Key to search (cpu variable) */
struct treepath *search_path /* Filled up by this function. */
)
{
struct item_head *p_le_ih; /* pointer to on-disk structure */
int blk_size;
loff_t item_offset, offset;
struct reiserfs_dir_entry de;
int retval;
/* If searching for directory entry. */
if (is_direntry_cpu_key(p_cpu_key))
return search_by_entry_key(sb, p_cpu_key, search_path,
&de);
/* If not searching for directory entry. */
/* If item is found. */
retval = search_item(sb, p_cpu_key, search_path);
if (retval == IO_ERROR)
return retval;
if (retval == ITEM_FOUND) {
RFALSE(!ih_item_len
(B_N_PITEM_HEAD
(PATH_PLAST_BUFFER(search_path),
PATH_LAST_POSITION(search_path))),
"PAP-5165: item length equals zero");
pos_in_item(search_path) = 0;
return POSITION_FOUND;
}
RFALSE(!PATH_LAST_POSITION(search_path),
"PAP-5170: position equals zero");
/* Item is not found. Set path to the previous item. */
p_le_ih =
B_N_PITEM_HEAD(PATH_PLAST_BUFFER(search_path),
--PATH_LAST_POSITION(search_path));
blk_size = sb->s_blocksize;
if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) {
return FILE_NOT_FOUND;
}
// FIXME: quite ugly this far
item_offset = le_ih_k_offset(p_le_ih);
offset = cpu_key_k_offset(p_cpu_key);
/* Needed byte is contained in the item pointed to by the path. */
if (item_offset <= offset &&
item_offset + op_bytes_number(p_le_ih, blk_size) > offset) {
pos_in_item(search_path) = offset - item_offset;
if (is_indirect_le_ih(p_le_ih)) {
pos_in_item(search_path) /= blk_size;
}
return POSITION_FOUND;
}
/* Needed byte is not contained in the item pointed to by the
path. Set pos_in_item out of the item. */
if (is_indirect_le_ih(p_le_ih))
pos_in_item(search_path) =
ih_item_len(p_le_ih) / UNFM_P_SIZE;
else
pos_in_item(search_path) = ih_item_len(p_le_ih);
return POSITION_NOT_FOUND;
}
/* Compare given item and item pointed to by the path. */
int comp_items(const struct item_head *stored_ih, const struct treepath *path)
{
struct buffer_head *bh = PATH_PLAST_BUFFER(path);
struct item_head *ih;
/* Last buffer at the path is not in the tree. */
if (!B_IS_IN_TREE(bh))
return 1;
/* Last path position is invalid. */
if (PATH_LAST_POSITION(path) >= B_NR_ITEMS(bh))
return 1;
/* we need only to know, whether it is the same item */
ih = get_ih(path);
return memcmp(stored_ih, ih, IH_SIZE);
}
/* unformatted nodes are not logged anymore, ever. This is safe
** now
*/
#define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
// block can not be forgotten as it is in I/O or held by someone
#define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
// prepare for delete or cut of direct item
static inline int prepare_for_direct_item(struct treepath *path,
struct item_head *le_ih,
struct inode *inode,
loff_t new_file_length, int *cut_size)
{
loff_t round_len;
if (new_file_length == max_reiserfs_offset(inode)) {
/* item has to be deleted */
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE;
}
// new file gets truncated
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
//
round_len = ROUND_UP(new_file_length);
/* this was new_file_length < le_ih ... */
if (round_len < le_ih_k_offset(le_ih)) {
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE; /* Delete this item. */
}
/* Calculate first position and size for cutting from item. */
pos_in_item(path) = round_len - (le_ih_k_offset(le_ih) - 1);
*cut_size = -(ih_item_len(le_ih) - pos_in_item(path));
return M_CUT; /* Cut from this item. */
}
// old file: items may have any length
if (new_file_length < le_ih_k_offset(le_ih)) {
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE; /* Delete this item. */
}
/* Calculate first position and size for cutting from item. */
*cut_size = -(ih_item_len(le_ih) -
(pos_in_item(path) =
new_file_length + 1 - le_ih_k_offset(le_ih)));
return M_CUT; /* Cut from this item. */
}
static inline int prepare_for_direntry_item(struct treepath *path,
struct item_head *le_ih,
struct inode *inode,
loff_t new_file_length,
int *cut_size)
{
if (le_ih_k_offset(le_ih) == DOT_OFFSET &&
new_file_length == max_reiserfs_offset(inode)) {
RFALSE(ih_entry_count(le_ih) != 2,
"PAP-5220: incorrect empty directory item (%h)", le_ih);
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE; /* Delete the directory item containing "." and ".." entry. */
}
if (ih_entry_count(le_ih) == 1) {
/* Delete the directory item such as there is one record only
in this item */
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
return M_DELETE;
}
/* Cut one record from the directory item. */
*cut_size =
-(DEH_SIZE +
entry_length(get_last_bh(path), le_ih, pos_in_item(path)));
return M_CUT;
}
#define JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD (2 * JOURNAL_PER_BALANCE_CNT + 1)
/* If the path points to a directory or direct item, calculate mode and the size cut, for balance.
If the path points to an indirect item, remove some number of its unformatted nodes.
In case of file truncate calculate whether this item must be deleted/truncated or last
unformatted node of this item will be converted to a direct item.
This function returns a determination of what balance mode the calling function should employ. */
static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, /* Number of unformatted nodes which were removed
from end of the file. */
int *cut_size, unsigned long long new_file_length /* MAX_KEY_OFFSET in case of delete. */
)
{
struct super_block *sb = inode->i_sb;
struct item_head *p_le_ih = PATH_PITEM_HEAD(path);
struct buffer_head *bh = PATH_PLAST_BUFFER(path);
BUG_ON(!th->t_trans_id);
/* Stat_data item. */
if (is_statdata_le_ih(p_le_ih)) {
RFALSE(new_file_length != max_reiserfs_offset(inode),
"PAP-5210: mode must be M_DELETE");
*cut_size = -(IH_SIZE + ih_item_len(p_le_ih));
return M_DELETE;
}
/* Directory item. */
if (is_direntry_le_ih(p_le_ih))
return prepare_for_direntry_item(path, p_le_ih, inode,
new_file_length,
cut_size);
/* Direct item. */
if (is_direct_le_ih(p_le_ih))
return prepare_for_direct_item(path, p_le_ih, inode,
new_file_length, cut_size);
/* Case of an indirect item. */
{
int blk_size = sb->s_blocksize;
struct item_head s_ih;
int need_re_search;
int delete = 0;
int result = M_CUT;
int pos = 0;
if ( new_file_length == max_reiserfs_offset (inode) ) {
/* prepare_for_delete_or_cut() is called by
* reiserfs_delete_item() */
new_file_length = 0;
delete = 1;
}
do {
need_re_search = 0;
*cut_size = 0;
bh = PATH_PLAST_BUFFER(path);
copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
pos = I_UNFM_NUM(&s_ih);
while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > new_file_length) {
__le32 *unfm;
__u32 block;
/* Each unformatted block deletion may involve one additional
* bitmap block into the transaction, thereby the initial
* journal space reservation might not be enough. */
if (!delete && (*cut_size) != 0 &&
reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD)
break;
unfm = (__le32 *)B_I_PITEM(bh, &s_ih) + pos - 1;
block = get_block_num(unfm, 0);
if (block != 0) {
reiserfs_prepare_for_journal(sb, bh, 1);
put_block_num(unfm, 0, 0);
journal_mark_dirty(th, sb, bh);
reiserfs_free_block(th, inode, block, 1);
}
reiserfs_write_unlock(sb);
cond_resched();
reiserfs_write_lock(sb);
if (item_moved (&s_ih, path)) {
need_re_search = 1;
break;
}
pos --;
(*removed)++;
(*cut_size) -= UNFM_P_SIZE;
if (pos == 0) {
(*cut_size) -= IH_SIZE;
result = M_DELETE;
break;
}
}
/* a trick. If the buffer has been logged, this will do nothing. If
** we've broken the loop without logging it, it will restore the
** buffer */
reiserfs_restore_prepared_buffer(sb, bh);
} while (need_re_search &&
search_for_position_by_key(sb, item_key, path) == POSITION_FOUND);
pos_in_item(path) = pos * UNFM_P_SIZE;
if (*cut_size == 0) {
/* Nothing were cut. maybe convert last unformatted node to the
* direct item? */
result = M_CONVERT;
}
return result;
}
}
/* Calculate number of bytes which will be deleted or cut during balance */
static int calc_deleted_bytes_number(struct tree_balance *tb, char mode)
{
int del_size;
struct item_head *p_le_ih = PATH_PITEM_HEAD(tb->tb_path);
if (is_statdata_le_ih(p_le_ih))
return 0;
del_size =
(mode ==
M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0];
if (is_direntry_le_ih(p_le_ih)) {
/* return EMPTY_DIR_SIZE; We delete emty directoris only.
* we can't use EMPTY_DIR_SIZE, as old format dirs have a different
* empty size. ick. FIXME, is this right? */
return del_size;
}
if (is_indirect_le_ih(p_le_ih))
del_size = (del_size / UNFM_P_SIZE) *
(PATH_PLAST_BUFFER(tb->tb_path)->b_size);
return del_size;
}
static void init_tb_struct(struct reiserfs_transaction_handle *th,
struct tree_balance *tb,
struct super_block *sb,
struct treepath *path, int size)
{
BUG_ON(!th->t_trans_id);
memset(tb, '\0', sizeof(struct tree_balance));
tb->transaction_handle = th;
tb->tb_sb = sb;
tb->tb_path = path;
PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
tb->insert_size[0] = size;
}
void padd_item(char *item, int total_length, int length)
{
int i;
for (i = total_length; i > length;)
item[--i] = 0;
}
#ifdef REISERQUOTA_DEBUG
char key2type(struct reiserfs_key *ih)
{
if (is_direntry_le_key(2, ih))
return 'd';
if (is_direct_le_key(2, ih))
return 'D';
if (is_indirect_le_key(2, ih))
return 'i';
if (is_statdata_le_key(2, ih))
return 's';
return 'u';
}
char head2type(struct item_head *ih)
{
if (is_direntry_le_ih(ih))
return 'd';
if (is_direct_le_ih(ih))
return 'D';
if (is_indirect_le_ih(ih))
return 'i';
if (is_statdata_le_ih(ih))
return 's';
return 'u';
}
#endif
/* Delete object item.
* th - active transaction handle
* path - path to the deleted item
* item_key - key to search for the deleted item
* indode - used for updating i_blocks and quotas
* un_bh - NULL or unformatted node pointer
*/
int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
struct treepath *path, const struct cpu_key *item_key,
struct inode *inode, struct buffer_head *un_bh)
{
struct super_block *sb = inode->i_sb;
struct tree_balance s_del_balance;
struct item_head s_ih;
struct item_head *q_ih;
int quota_cut_bytes;
int ret_value, del_size, removed;
#ifdef CONFIG_REISERFS_CHECK
char mode;
int iter = 0;
#endif
BUG_ON(!th->t_trans_id);
init_tb_struct(th, &s_del_balance, sb, path,
0 /*size is unknown */ );
while (1) {
removed = 0;
#ifdef CONFIG_REISERFS_CHECK
iter++;
mode =
#endif
prepare_for_delete_or_cut(th, inode, path,
item_key, &removed,
&del_size,
max_reiserfs_offset(inode));
RFALSE(mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
s_del_balance.insert_size[0] = del_size;
ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL);
if (ret_value != REPEAT_SEARCH)
break;
PROC_INFO_INC(sb, delete_item_restarted);
// file system changed, repeat search
ret_value =
search_for_position_by_key(sb, item_key, path);
if (ret_value == IO_ERROR)
break;
if (ret_value == FILE_NOT_FOUND) {
reiserfs_warning(sb, "vs-5340",
"no items of the file %K found",
item_key);
break;
}
} /* while (1) */
if (ret_value != CARRY_ON) {
unfix_nodes(&s_del_balance);
return 0;
}
// reiserfs_delete_item returns item length when success
ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
q_ih = get_ih(path);
quota_cut_bytes = ih_item_len(q_ih);
/* hack so the quota code doesn't have to guess if the file
** has a tail. On tail insert, we allocate quota for 1 unformatted node.
** We test the offset because the tail might have been
** split into multiple items, and we only want to decrement for
** the unfm node once
*/
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(q_ih)) {
if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) {
quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
} else {
quota_cut_bytes = 0;
}
}
if (un_bh) {
int off;
char *data;
/* We are in direct2indirect conversion, so move tail contents
to the unformatted node */
/* note, we do the copy before preparing the buffer because we
** don't care about the contents of the unformatted node yet.
** the only thing we really care about is the direct item's data
** is in the unformatted node.
**
** Otherwise, we would have to call reiserfs_prepare_for_journal on
** the unformatted node, which might schedule, meaning we'd have to
** loop all the way back up to the start of the while loop.
**
** The unformatted node must be dirtied later on. We can't be
** sure here if the entire tail has been deleted yet.
**
** un_bh is from the page cache (all unformatted nodes are
** from the page cache) and might be a highmem page. So, we
** can't use un_bh->b_data.
** -clm
*/
data = kmap_atomic(un_bh->b_page);
off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
memcpy(data + off,
B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih),
ret_value);
kunmap_atomic(data);
}
/* Perform balancing after all resources have been collected at once. */
do_balance(&s_del_balance, NULL, NULL, M_DELETE);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(sb, REISERFS_DEBUG_CODE,
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, inode->i_uid, head2type(&s_ih));
#endif
dquot_free_space_nodirty(inode, quota_cut_bytes);
/* Return deleted body length */
return ret_value;
}
/* Summary Of Mechanisms For Handling Collisions Between Processes:
deletion of the body of the object is performed by iput(), with the
result that if multiple processes are operating on a file, the
deletion of the body of the file is deferred until the last process
that has an open inode performs its iput().
writes and truncates are protected from collisions by use of
semaphores.
creates, linking, and mknod are protected from collisions with other
processes by making the reiserfs_add_entry() the last step in the
creation, and then rolling back all changes if there was a collision.
- Hans
*/
/* this deletes item which never gets split */
void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
struct inode *inode, struct reiserfs_key *key)
{
struct tree_balance tb;
INITIALIZE_PATH(path);
int item_len = 0;
int tb_init = 0;
struct cpu_key cpu_key;
int retval;
int quota_cut_bytes = 0;
BUG_ON(!th->t_trans_id);
le_key2cpu_key(&cpu_key, key);
while (1) {
retval = search_item(th->t_super, &cpu_key, &path);
if (retval == IO_ERROR) {
reiserfs_error(th->t_super, "vs-5350",
"i/o failure occurred trying "
"to delete %K", &cpu_key);
break;
}
if (retval != ITEM_FOUND) {
pathrelse(&path);
// No need for a warning, if there is just no free space to insert '..' item into the newly-created subdir
if (!
((unsigned long long)
GET_HASH_VALUE(le_key_k_offset
(le_key_version(key), key)) == 0
&& (unsigned long long)
GET_GENERATION_NUMBER(le_key_k_offset
(le_key_version(key),
key)) == 1))
reiserfs_warning(th->t_super, "vs-5355",
"%k not found", key);
break;
}
if (!tb_init) {
tb_init = 1;
item_len = ih_item_len(PATH_PITEM_HEAD(&path));
init_tb_struct(th, &tb, th->t_super, &path,
-(IH_SIZE + item_len));
}
quota_cut_bytes = ih_item_len(PATH_PITEM_HEAD(&path));
retval = fix_nodes(M_DELETE, &tb, NULL, NULL);
if (retval == REPEAT_SEARCH) {
PROC_INFO_INC(th->t_super, delete_solid_item_restarted);
continue;
}
if (retval == CARRY_ON) {
do_balance(&tb, NULL, NULL, M_DELETE);
if (inode) { /* Should we count quota for item? (we don't count quotas for save-links) */
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
"reiserquota delete_solid_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid,
key2type(key));
#endif
dquot_free_space_nodirty(inode,
quota_cut_bytes);
}
break;
}
// IO_ERROR, NO_DISK_SPACE, etc
reiserfs_warning(th->t_super, "vs-5360",
"could not delete %K due to fix_nodes failure",
&cpu_key);
unfix_nodes(&tb);
break;
}
reiserfs_check_path(&path);
}
int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
struct inode *inode)
{
int err;
inode->i_size = 0;
BUG_ON(!th->t_trans_id);
/* for directory this deletes item containing "." and ".." */
err =
reiserfs_do_truncate(th, inode, NULL, 0 /*no timestamp updates */ );
if (err)
return err;
#if defined( USE_INODE_GENERATION_COUNTER )
if (!old_format_only(th->t_super)) {
__le32 *inode_generation;
inode_generation =
&REISERFS_SB(th->t_super)->s_rs->s_inode_generation;
le32_add_cpu(inode_generation, 1);
}
/* USE_INODE_GENERATION_COUNTER */
#endif
reiserfs_delete_solid_item(th, inode, INODE_PKEY(inode));
return err;
}
static void unmap_buffers(struct page *page, loff_t pos)
{
struct buffer_head *bh;
struct buffer_head *head;
struct buffer_head *next;
unsigned long tail_index;
unsigned long cur_index;
if (page) {
if (page_has_buffers(page)) {
tail_index = pos & (PAGE_CACHE_SIZE - 1);
cur_index = 0;
head = page_buffers(page);
bh = head;
do {
next = bh->b_this_page;
/* we want to unmap the buffers that contain the tail, and
** all the buffers after it (since the tail must be at the
** end of the file). We don't want to unmap file data
** before the tail, since it might be dirty and waiting to
** reach disk
*/
cur_index += bh->b_size;
if (cur_index > tail_index) {
reiserfs_unmap_buffer(bh);
}
bh = next;
} while (bh != head);
}
}
}
static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct page *page,
struct treepath *path,
const struct cpu_key *item_key,
loff_t new_file_size, char *mode)
{
struct super_block *sb = inode->i_sb;
int block_size = sb->s_blocksize;
int cut_bytes;
BUG_ON(!th->t_trans_id);
BUG_ON(new_file_size != inode->i_size);
/* the page being sent in could be NULL if there was an i/o error
** reading in the last block. The user will hit problems trying to
** read the file, but for now we just skip the indirect2direct
*/
if (atomic_read(&inode->i_count) > 1 ||
!tail_has_to_be_packed(inode) ||
!page || (REISERFS_I(inode)->i_flags & i_nopack_mask)) {
/* leave tail in an unformatted node */
*mode = M_SKIP_BALANCING;
cut_bytes =
block_size - (new_file_size & (block_size - 1));
pathrelse(path);
return cut_bytes;
}
/* Perform the conversion to a direct_item. */
/* return indirect_to_direct(inode, path, item_key,
new_file_size, mode); */
return indirect2direct(th, inode, page, path, item_key,
new_file_size, mode);
}
/* we did indirect_to_direct conversion. And we have inserted direct
item successesfully, but there were no disk space to cut unfm
pointer being converted. Therefore we have to delete inserted
direct item(s) */
static void indirect_to_direct_roll_back(struct reiserfs_transaction_handle *th,
struct inode *inode, struct treepath *path)
{
struct cpu_key tail_key;
int tail_len;
int removed;
BUG_ON(!th->t_trans_id);
make_cpu_key(&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4); // !!!!
tail_key.key_length = 4;
tail_len =
(cpu_key_k_offset(&tail_key) & (inode->i_sb->s_blocksize - 1)) - 1;
while (tail_len) {
/* look for the last byte of the tail */
if (search_for_position_by_key(inode->i_sb, &tail_key, path) ==
POSITION_NOT_FOUND)
reiserfs_panic(inode->i_sb, "vs-5615",
"found invalid item");
RFALSE(path->pos_in_item !=
ih_item_len(PATH_PITEM_HEAD(path)) - 1,
"vs-5616: appended bytes found");
PATH_LAST_POSITION(path)--;
removed =
reiserfs_delete_item(th, path, &tail_key, inode,
NULL /*unbh not needed */ );
RFALSE(removed <= 0
|| removed > tail_len,
"vs-5617: there was tail %d bytes, removed item length %d bytes",
tail_len, removed);
tail_len -= removed;
set_cpu_key_k_offset(&tail_key,
cpu_key_k_offset(&tail_key) - removed);
}
reiserfs_warning(inode->i_sb, "reiserfs-5091", "indirect_to_direct "
"conversion has been rolled back due to "
"lack of disk space");
//mark_file_without_tail (inode);
mark_inode_dirty(inode);
}
/* (Truncate or cut entry) or delete object item. Returns < 0 on failure */
int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
struct treepath *path,
struct cpu_key *item_key,
struct inode *inode,
struct page *page, loff_t new_file_size)
{
struct super_block *sb = inode->i_sb;
/* Every function which is going to call do_balance must first
create a tree_balance structure. Then it must fill up this
structure by using the init_tb_struct and fix_nodes functions.
After that we can make tree balancing. */
struct tree_balance s_cut_balance;
struct item_head *p_le_ih;
int cut_size = 0, /* Amount to be cut. */
ret_value = CARRY_ON, removed = 0, /* Number of the removed unformatted nodes. */
is_inode_locked = 0;
char mode; /* Mode of the balance. */
int retval2 = -1;
int quota_cut_bytes;
loff_t tail_pos = 0;
BUG_ON(!th->t_trans_id);
init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
cut_size);
/* Repeat this loop until we either cut the item without needing
to balance, or we fix_nodes without schedule occurring */
while (1) {
/* Determine the balance mode, position of the first byte to
be cut, and size to be cut. In case of the indirect item
free unformatted nodes which are pointed to by the cut
pointers. */
mode =
prepare_for_delete_or_cut(th, inode, path,
item_key, &removed,
&cut_size, new_file_size);
if (mode == M_CONVERT) {
/* convert last unformatted node to direct item or leave
tail in the unformatted node */
RFALSE(ret_value != CARRY_ON,
"PAP-5570: can not convert twice");
ret_value =
maybe_indirect_to_direct(th, inode, page,
path, item_key,
new_file_size, &mode);
if (mode == M_SKIP_BALANCING)
/* tail has been left in the unformatted node */
return ret_value;
is_inode_locked = 1;
/* removing of last unformatted node will change value we
have to return to truncate. Save it */
retval2 = ret_value;
/*retval2 = sb->s_blocksize - (new_file_size & (sb->s_blocksize - 1)); */
/* So, we have performed the first part of the conversion:
inserting the new direct item. Now we are removing the
last unformatted node pointer. Set key to search for
it. */
set_cpu_key_k_type(item_key, TYPE_INDIRECT);
item_key->key_length = 4;
new_file_size -=
(new_file_size & (sb->s_blocksize - 1));
tail_pos = new_file_size;
set_cpu_key_k_offset(item_key, new_file_size + 1);
if (search_for_position_by_key
(sb, item_key,
path) == POSITION_NOT_FOUND) {
print_block(PATH_PLAST_BUFFER(path), 3,
PATH_LAST_POSITION(path) - 1,
PATH_LAST_POSITION(path) + 1);
reiserfs_panic(sb, "PAP-5580", "item to "
"convert does not exist (%K)",
item_key);
}
continue;
}
if (cut_size == 0) {
pathrelse(path);
return 0;
}
s_cut_balance.insert_size[0] = cut_size;
ret_value = fix_nodes(mode, &s_cut_balance, NULL, NULL);
if (ret_value != REPEAT_SEARCH)
break;
PROC_INFO_INC(sb, cut_from_item_restarted);
ret_value =
search_for_position_by_key(sb, item_key, path);
if (ret_value == POSITION_FOUND)
continue;
reiserfs_warning(sb, "PAP-5610", "item %K not found",
item_key);
unfix_nodes(&s_cut_balance);
return (ret_value == IO_ERROR) ? -EIO : -ENOENT;
} /* while */
// check fix_nodes results (IO_ERROR or NO_DISK_SPACE)
if (ret_value != CARRY_ON) {
if (is_inode_locked) {
// FIXME: this seems to be not needed: we are always able
// to cut item
indirect_to_direct_roll_back(th, inode, path);
}
if (ret_value == NO_DISK_SPACE)
reiserfs_warning(sb, "reiserfs-5092",
"NO_DISK_SPACE");
unfix_nodes(&s_cut_balance);
return -EIO;
}
/* go ahead and perform balancing */
RFALSE(mode == M_PASTE || mode == M_INSERT, "invalid mode");
/* Calculate number of bytes that need to be cut from the item. */
quota_cut_bytes =
(mode ==
M_DELETE) ? ih_item_len(get_ih(path)) : -s_cut_balance.
insert_size[0];
if (retval2 == -1)
ret_value = calc_deleted_bytes_number(&s_cut_balance, mode);
else
ret_value = retval2;
/* For direct items, we only change the quota when deleting the last
** item.
*/
p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path);
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) {
if (mode == M_DELETE &&
(le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) ==
1) {
// FIXME: this is to keep 3.5 happy
REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
} else {
quota_cut_bytes = 0;
}
}
#ifdef CONFIG_REISERFS_CHECK
if (is_inode_locked) {
struct item_head *le_ih =
PATH_PITEM_HEAD(s_cut_balance.tb_path);
/* we are going to complete indirect2direct conversion. Make
sure, that we exactly remove last unformatted node pointer
of the item */
if (!is_indirect_le_ih(le_ih))
reiserfs_panic(sb, "vs-5652",
"item must be indirect %h", le_ih);
if (mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE)
reiserfs_panic(sb, "vs-5653", "completing "
"indirect2direct conversion indirect "
"item %h being deleted must be of "
"4 byte long", le_ih);
if (mode == M_CUT
&& s_cut_balance.insert_size[0] != -UNFM_P_SIZE) {
reiserfs_panic(sb, "vs-5654", "can not complete "
"indirect2direct conversion of %h "
"(CUT, insert_size==%d)",
le_ih, s_cut_balance.insert_size[0]);
}
/* it would be useful to make sure, that right neighboring
item is direct item of this file */
}
#endif
do_balance(&s_cut_balance, NULL, NULL, mode);
if (is_inode_locked) {
/* we've done an indirect->direct conversion. when the data block
** was freed, it was removed from the list of blocks that must
** be flushed before the transaction commits, make sure to
** unmap and invalidate it
*/
unmap_buffers(page, tail_pos);
REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
}
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid, '?');
#endif
dquot_free_space_nodirty(inode, quota_cut_bytes);
return ret_value;
}
static void truncate_directory(struct reiserfs_transaction_handle *th,
struct inode *inode)
{
BUG_ON(!th->t_trans_id);
if (inode->i_nlink)
reiserfs_error(inode->i_sb, "vs-5655", "link count != 0");
set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), DOT_OFFSET);
set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_DIRENTRY);
reiserfs_delete_solid_item(th, inode, INODE_PKEY(inode));
reiserfs_update_sd(th, inode);
set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), SD_OFFSET);
set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_STAT_DATA);
}
/* Truncate file to the new size. Note, this must be called with a transaction
already started */
int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
struct inode *inode, /* ->i_size contains new size */
struct page *page, /* up to date for last block */
int update_timestamps /* when it is called by
file_release to convert
the tail - no timestamps
should be updated */
)
{
INITIALIZE_PATH(s_search_path); /* Path to the current object item. */
struct item_head *p_le_ih; /* Pointer to an item header. */
struct cpu_key s_item_key; /* Key to search for a previous file item. */
loff_t file_size, /* Old file size. */
new_file_size; /* New file size. */
int deleted; /* Number of deleted or truncated bytes. */
int retval;
int err = 0;
BUG_ON(!th->t_trans_id);
if (!
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|| S_ISLNK(inode->i_mode)))
return 0;
if (S_ISDIR(inode->i_mode)) {
// deletion of directory - no need to update timestamps
truncate_directory(th, inode);
return 0;
}
/* Get new file size. */
new_file_size = inode->i_size;
// FIXME: note, that key type is unimportant here
make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode),
TYPE_DIRECT, 3);
retval =
search_for_position_by_key(inode->i_sb, &s_item_key,
&s_search_path);
if (retval == IO_ERROR) {
reiserfs_error(inode->i_sb, "vs-5657",
"i/o failure occurred trying to truncate %K",
&s_item_key);
err = -EIO;
goto out;
}
if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) {
reiserfs_error(inode->i_sb, "PAP-5660",
"wrong result %d of search for %K", retval,
&s_item_key);
err = -EIO;
goto out;
}
s_search_path.pos_in_item--;
/* Get real file size (total length of all file items) */
p_le_ih = PATH_PITEM_HEAD(&s_search_path);
if (is_statdata_le_ih(p_le_ih))
file_size = 0;
else {
loff_t offset = le_ih_k_offset(p_le_ih);
int bytes =
op_bytes_number(p_le_ih, inode->i_sb->s_blocksize);
/* this may mismatch with real file size: if last direct item
had no padding zeros and last unformatted node had no free
space, this file would have this file size */
file_size = offset + bytes - 1;
}
/*
* are we doing a full truncate or delete, if so
* kick in the reada code
*/
if (new_file_size == 0)
s_search_path.reada = PATH_READA | PATH_READA_BACK;
if (file_size == 0 || file_size < new_file_size) {
goto update_and_out;
}
/* Update key to search for the last file item. */
set_cpu_key_k_offset(&s_item_key, file_size);
do {
/* Cut or delete file item. */
deleted =
reiserfs_cut_from_item(th, &s_search_path, &s_item_key,
inode, page, new_file_size);
if (deleted < 0) {
reiserfs_warning(inode->i_sb, "vs-5665",
"reiserfs_cut_from_item failed");
reiserfs_check_path(&s_search_path);
return 0;
}
RFALSE(deleted > file_size,
"PAP-5670: reiserfs_cut_from_item: too many bytes deleted: deleted %d, file_size %lu, item_key %K",
deleted, file_size, &s_item_key);
/* Change key to search the last file item. */
file_size -= deleted;
set_cpu_key_k_offset(&s_item_key, file_size);
/* While there are bytes to truncate and previous file item is presented in the tree. */
/*
** This loop could take a really long time, and could log
** many more blocks than a transaction can hold. So, we do a polite
** journal end here, and if the transaction needs ending, we make
** sure the file is consistent before ending the current trans
** and starting a new one
*/
if (journal_transaction_should_end(th, 0) ||
reiserfs_transaction_free_space(th) <= JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) {
int orig_len_alloc = th->t_blocks_allocated;
pathrelse(&s_search_path);
if (update_timestamps) {
inode->i_mtime = CURRENT_TIME_SEC;
inode->i_ctime = CURRENT_TIME_SEC;
}
reiserfs_update_sd(th, inode);
err = journal_end(th, inode->i_sb, orig_len_alloc);
if (err)
goto out;
err = journal_begin(th, inode->i_sb,
JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD + JOURNAL_PER_BALANCE_CNT * 4) ;
if (err)
goto out;
reiserfs_update_inode_transaction(inode);
}
} while (file_size > ROUND_UP(new_file_size) &&
search_for_position_by_key(inode->i_sb, &s_item_key,
&s_search_path) == POSITION_FOUND);
RFALSE(file_size > ROUND_UP(new_file_size),
"PAP-5680: truncate did not finish: new_file_size %Ld, current %Ld, oid %d",
new_file_size, file_size, s_item_key.on_disk_key.k_objectid);
update_and_out:
if (update_timestamps) {
// this is truncate, not file closing
inode->i_mtime = CURRENT_TIME_SEC;
inode->i_ctime = CURRENT_TIME_SEC;
}
reiserfs_update_sd(th, inode);
out:
pathrelse(&s_search_path);
return err;
}
#ifdef CONFIG_REISERFS_CHECK
// this makes sure, that we __append__, not overwrite or add holes
static void check_research_for_paste(struct treepath *path,
const struct cpu_key *key)
{
struct item_head *found_ih = get_ih(path);
if (is_direct_le_ih(found_ih)) {
if (le_ih_k_offset(found_ih) +
op_bytes_number(found_ih,
get_last_bh(path)->b_size) !=
cpu_key_k_offset(key)
|| op_bytes_number(found_ih,
get_last_bh(path)->b_size) !=
pos_in_item(path))
reiserfs_panic(NULL, "PAP-5720", "found direct item "
"%h or position (%d) does not match "
"to key %K", found_ih,
pos_in_item(path), key);
}
if (is_indirect_le_ih(found_ih)) {
if (le_ih_k_offset(found_ih) +
op_bytes_number(found_ih,
get_last_bh(path)->b_size) !=
cpu_key_k_offset(key)
|| I_UNFM_NUM(found_ih) != pos_in_item(path)
|| get_ih_free_space(found_ih) != 0)
reiserfs_panic(NULL, "PAP-5730", "found indirect "
"item (%h) or position (%d) does not "
"match to key (%K)",
found_ih, pos_in_item(path), key);
}
}
#endif /* config reiserfs check */
/* Paste bytes to the existing item. Returns bytes number pasted into the item. */
int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *search_path, /* Path to the pasted item. */
const struct cpu_key *key, /* Key to search for the needed item. */
struct inode *inode, /* Inode item belongs to */
const char *body, /* Pointer to the bytes to paste. */
int pasted_size)
{ /* Size of pasted bytes. */
struct tree_balance s_paste_balance;
int retval;
int fs_gen;
BUG_ON(!th->t_trans_id);
fs_gen = get_generation(inode->i_sb);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota paste_into_item(): allocating %u id=%u type=%c",
pasted_size, inode->i_uid,
key2type(&(key->on_disk_key)));
#endif
reiserfs_write_unlock(inode->i_sb);
retval = dquot_alloc_space_nodirty(inode, pasted_size);
reiserfs_write_lock(inode->i_sb);
if (retval) {
pathrelse(search_path);
return retval;
}
init_tb_struct(th, &s_paste_balance, th->t_super, search_path,
pasted_size);
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
s_paste_balance.key = key->on_disk_key;
#endif
/* DQUOT_* can schedule, must check before the fix_nodes */
if (fs_changed(fs_gen, inode->i_sb)) {
goto search_again;
}
while ((retval =
fix_nodes(M_PASTE, &s_paste_balance, NULL,
body)) == REPEAT_SEARCH) {
search_again:
/* file system changed while we were in the fix_nodes */
PROC_INFO_INC(th->t_super, paste_into_item_restarted);
retval =
search_for_position_by_key(th->t_super, key,
search_path);
if (retval == IO_ERROR) {
retval = -EIO;
goto error_out;
}
if (retval == POSITION_FOUND) {
reiserfs_warning(inode->i_sb, "PAP-5710",
"entry or pasted byte (%K) exists",
key);
retval = -EEXIST;
goto error_out;
}
#ifdef CONFIG_REISERFS_CHECK
check_research_for_paste(search_path, key);
#endif
}
/* Perform balancing after all resources are collected by fix_nodes, and
accessing them will not risk triggering schedule. */
if (retval == CARRY_ON) {
do_balance(&s_paste_balance, NULL /*ih */ , body, M_PASTE);
return 0;
}
retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
error_out:
/* this also releases the path */
unfix_nodes(&s_paste_balance);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota paste_into_item(): freeing %u id=%u type=%c",
pasted_size, inode->i_uid,
key2type(&(key->on_disk_key)));
#endif
dquot_free_space_nodirty(inode, pasted_size);
return retval;
}
/* Insert new item into the buffer at the path.
* th - active transaction handle
* path - path to the inserted item
* ih - pointer to the item header to insert
* body - pointer to the bytes to insert
*/
int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
struct treepath *path, const struct cpu_key *key,
struct item_head *ih, struct inode *inode,
const char *body)
{
struct tree_balance s_ins_balance;
int retval;
int fs_gen = 0;
int quota_bytes = 0;
BUG_ON(!th->t_trans_id);
if (inode) { /* Do we count quotas for item? */
fs_gen = get_generation(inode->i_sb);
quota_bytes = ih_item_len(ih);
/* hack so the quota code doesn't have to guess if the file has
** a tail, links are always tails, so there's no guessing needed
*/
if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(ih))
quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE;
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
"reiserquota insert_item(): allocating %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
reiserfs_write_unlock(inode->i_sb);
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
retval = dquot_alloc_space_nodirty(inode, quota_bytes);
reiserfs_write_lock(inode->i_sb);
if (retval) {
pathrelse(path);
return retval;
}
}
init_tb_struct(th, &s_ins_balance, th->t_super, path,
IH_SIZE + ih_item_len(ih));
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
s_ins_balance.key = key->on_disk_key;
#endif
/* DQUOT_* can schedule, must check to be sure calling fix_nodes is safe */
if (inode && fs_changed(fs_gen, inode->i_sb)) {
goto search_again;
}
while ((retval =
fix_nodes(M_INSERT, &s_ins_balance, ih,
body)) == REPEAT_SEARCH) {
search_again:
/* file system changed while we were in the fix_nodes */
PROC_INFO_INC(th->t_super, insert_item_restarted);
retval = search_item(th->t_super, key, path);
if (retval == IO_ERROR) {
retval = -EIO;
goto error_out;
}
if (retval == ITEM_FOUND) {
reiserfs_warning(th->t_super, "PAP-5760",
"key %K already exists in the tree",
key);
retval = -EEXIST;
goto error_out;
}
}
/* make balancing after all resources will be collected at a time */
if (retval == CARRY_ON) {
do_balance(&s_ins_balance, ih, body, M_INSERT);
return 0;
}
retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
error_out:
/* also releases the path */
unfix_nodes(&s_ins_balance);
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
"reiserquota insert_item(): freeing %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
if (inode)
dquot_free_space_nodirty(inode, quota_bytes);
return retval;
}
| gpl-2.0 |
FrostedKernel/android_kernel_htc_msm8960 | arch/sh/kernel/kgdb.c | 4408 | 8200 | /*
* SuperH KGDB support
*
* Copyright (C) 2008 - 2009 Paul Mundt
*
* Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
/* Macros for single step instruction identification */
#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
(((op) & 0x7f ) << 1))
#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
(((op) & 0x7ff) << 1))
#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
(((op) & 0x7ff) << 1))
#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
#define OPCODE_RTS(op) ((op) == 0xb)
#define OPCODE_RTE(op) ((op) == 0x2b)
#define SR_T_BIT_MASK 0x1
#define STEP_OPCODE 0xc33d
/* Calculate the new address for after a step */
static short *get_step_address(struct pt_regs *linux_regs)
{
insn_size_t op = __raw_readw(linux_regs->pc);
long addr;
/* BT */
if (OPCODE_BT(op)) {
if (linux_regs->sr & SR_T_BIT_MASK)
addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
else
addr = linux_regs->pc + 2;
}
/* BTS */
else if (OPCODE_BTS(op)) {
if (linux_regs->sr & SR_T_BIT_MASK)
addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
else
addr = linux_regs->pc + 4; /* Not in delay slot */
}
/* BF */
else if (OPCODE_BF(op)) {
if (!(linux_regs->sr & SR_T_BIT_MASK))
addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
else
addr = linux_regs->pc + 2;
}
/* BFS */
else if (OPCODE_BFS(op)) {
if (!(linux_regs->sr & SR_T_BIT_MASK))
addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
else
addr = linux_regs->pc + 4; /* Not in delay slot */
}
/* BRA */
else if (OPCODE_BRA(op))
addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
/* BRAF */
else if (OPCODE_BRAF(op))
addr = linux_regs->pc + 4
+ linux_regs->regs[OPCODE_BRAF_REG(op)];
/* BSR */
else if (OPCODE_BSR(op))
addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
/* BSRF */
else if (OPCODE_BSRF(op))
addr = linux_regs->pc + 4
+ linux_regs->regs[OPCODE_BSRF_REG(op)];
/* JMP */
else if (OPCODE_JMP(op))
addr = linux_regs->regs[OPCODE_JMP_REG(op)];
/* JSR */
else if (OPCODE_JSR(op))
addr = linux_regs->regs[OPCODE_JSR_REG(op)];
/* RTS */
else if (OPCODE_RTS(op))
addr = linux_regs->pr;
/* RTE */
else if (OPCODE_RTE(op))
addr = linux_regs->regs[15];
/* Other */
else
addr = linux_regs->pc + instruction_size(op);
flush_icache_range(addr, addr + instruction_size(op));
return (short *)addr;
}
/*
* Replace the instruction immediately after the current instruction
* (i.e. next in the expected flow of control) with a trap instruction,
* so that returning will cause only a single instruction to be executed.
* Note that this model is slightly broken for instructions with delay
* slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
* instruction in the delay slot will be executed.
*/
static unsigned long stepped_address;
static insn_size_t stepped_opcode;
static void do_single_step(struct pt_regs *linux_regs)
{
/* Determine where the target instruction will send us to */
unsigned short *addr = get_step_address(linux_regs);
stepped_address = (int)addr;
/* Replace it */
stepped_opcode = __raw_readw((long)addr);
*addr = STEP_OPCODE;
/* Flush and return */
flush_icache_range((long)addr, (long)addr +
instruction_size(stepped_opcode));
}
/* Undo a single step */
static void undo_single_step(struct pt_regs *linux_regs)
{
/* If we have stepped, put back the old instruction */
/* Use stepped_address in case we stopped elsewhere */
if (stepped_opcode != 0) {
__raw_writew(stepped_opcode, stepped_address);
flush_icache_range(stepped_address, stepped_address + 2);
}
stepped_opcode = 0;
}
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
int i;
for (i = 0; i < 16; i++)
gdb_regs[GDB_R0 + i] = regs->regs[i];
gdb_regs[GDB_PC] = regs->pc;
gdb_regs[GDB_PR] = regs->pr;
gdb_regs[GDB_SR] = regs->sr;
gdb_regs[GDB_GBR] = regs->gbr;
gdb_regs[GDB_MACH] = regs->mach;
gdb_regs[GDB_MACL] = regs->macl;
__asm__ __volatile__ ("stc vbr, %0" : "=r" (gdb_regs[GDB_VBR]));
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
int i;
for (i = 0; i < 16; i++)
regs->regs[GDB_R0 + i] = gdb_regs[GDB_R0 + i];
regs->pc = gdb_regs[GDB_PC];
regs->pr = gdb_regs[GDB_PR];
regs->sr = gdb_regs[GDB_SR];
regs->gbr = gdb_regs[GDB_GBR];
regs->mach = gdb_regs[GDB_MACH];
regs->macl = gdb_regs[GDB_MACL];
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
gdb_regs[GDB_R15] = p->thread.sp;
gdb_regs[GDB_PC] = p->thread.pc;
}
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *linux_regs)
{
unsigned long addr;
char *ptr;
/* Undo any stepping we may have done */
undo_single_step(linux_regs);
switch (remcomInBuffer[0]) {
case 'c':
case 's':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
linux_regs->pc = addr;
case 'D':
case 'k':
atomic_set(&kgdb_cpu_doing_single_step, -1);
if (remcomInBuffer[0] == 's') {
do_single_step(linux_regs);
kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
return 0;
}
/* this means that we do not want to exit from the handler: */
return -1;
}
unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
{
if (exception == 60)
return instruction_pointer(regs) - 2;
return instruction_pointer(regs);
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->pc = ip;
}
/*
* The primary entry points for the kgdb debug trap table entries.
*/
BUILD_TRAP_HANDLER(singlestep)
{
unsigned long flags;
TRAP_HANDLER_DECL;
local_irq_save(flags);
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
kgdb_handle_exception(0, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
int ret;
switch (cmd) {
case DIE_BREAKPOINT:
/*
* This means a user thread is single stepping
* a system call which should be ignored
*/
if (test_thread_flag(TIF_SINGLESTEP))
return NOTIFY_DONE;
ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
args->err, args->regs);
if (ret)
return NOTIFY_DONE;
break;
}
return NOTIFY_STOP;
}
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = __kgdb_notify(ptr, cmd);
local_irq_restore(flags);
return ret;
}
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_notify,
/*
* Lowest-prio notifier priority, we want to be notified last:
*/
.priority = -INT_MAX,
};
int kgdb_arch_init(void)
{
return register_die_notifier(&kgdb_notifier);
}
void kgdb_arch_exit(void)
{
unregister_die_notifier(&kgdb_notifier);
}
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: trapa #0x3c */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
.gdb_bpt_instr = { 0x3c, 0xc3 },
#else
.gdb_bpt_instr = { 0xc3, 0x3c },
#endif
};
| gpl-2.0 |
Tim1928/DBK-3.4 | drivers/base/devtmpfs.c | 4664 | 9377 | /*
* devtmpfs - kernel-maintained tmpfs-based /dev
*
* Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
*
* During bootup, before any driver core device is registered,
* devtmpfs, a tmpfs-based filesystem is created. Every driver-core
* device which requests a device node, will add a node in this
* filesystem.
* By default, all devices are named after the the name of the
* device, owned by root and have a default mode of 0600. Subsystems
* can overwrite the default setting if needed.
*/
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/shmem_fs.h>
#include <linux/ramfs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kthread.h>
static struct task_struct *thread;
#if defined CONFIG_DEVTMPFS_MOUNT
static int mount_dev = 1;
#else
static int mount_dev;
#endif
static DEFINE_SPINLOCK(req_lock);
static struct req {
struct req *next;
struct completion done;
int err;
const char *name;
umode_t mode; /* 0 => delete */
struct device *dev;
} *requests;
static int __init mount_param(char *str)
{
mount_dev = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("devtmpfs.mount=", mount_param);
static struct dentry *dev_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
{
#ifdef CONFIG_TMPFS
return mount_single(fs_type, flags, data, shmem_fill_super);
#else
return mount_single(fs_type, flags, data, ramfs_fill_super);
#endif
}
static struct file_system_type dev_fs_type = {
.name = "devtmpfs",
.mount = dev_mount,
.kill_sb = kill_litter_super,
};
#ifdef CONFIG_BLOCK
static inline int is_blockdev(struct device *dev)
{
return dev->class == &block_class;
}
#else
static inline int is_blockdev(struct device *dev) { return 0; }
#endif
int devtmpfs_create_node(struct device *dev)
{
const char *tmp = NULL;
struct req req;
if (!thread)
return 0;
req.mode = 0;
req.name = device_get_devnode(dev, &req.mode, &tmp);
if (!req.name)
return -ENOMEM;
if (req.mode == 0)
req.mode = 0600;
if (is_blockdev(dev))
req.mode |= S_IFBLK;
else
req.mode |= S_IFCHR;
req.dev = dev;
init_completion(&req.done);
spin_lock(&req_lock);
req.next = requests;
requests = &req;
spin_unlock(&req_lock);
wake_up_process(thread);
wait_for_completion(&req.done);
kfree(tmp);
return req.err;
}
int devtmpfs_delete_node(struct device *dev)
{
const char *tmp = NULL;
struct req req;
if (!thread)
return 0;
req.name = device_get_devnode(dev, NULL, &tmp);
if (!req.name)
return -ENOMEM;
req.mode = 0;
req.dev = dev;
init_completion(&req.done);
spin_lock(&req_lock);
req.next = requests;
requests = &req;
spin_unlock(&req_lock);
wake_up_process(thread);
wait_for_completion(&req.done);
kfree(tmp);
return req.err;
}
static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
int err;
dentry = kern_path_create(AT_FDCWD, name, &path, 1);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
if (!err)
/* mark as kernel-created inode */
dentry->d_inode->i_private = &thread;
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
return err;
}
static int create_path(const char *nodepath)
{
char *path;
char *s;
int err = 0;
/* parent directories do not exist, create them */
path = kstrdup(nodepath, GFP_KERNEL);
if (!path)
return -ENOMEM;
s = path;
for (;;) {
s = strchr(s, '/');
if (!s)
break;
s[0] = '\0';
err = dev_mkdir(path, 0755);
if (err && err != -EEXIST)
break;
s[0] = '/';
s++;
}
kfree(path);
return err;
}
static int handle_create(const char *nodename, umode_t mode, struct device *dev)
{
struct dentry *dentry;
struct path path;
int err;
dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mknod(path.dentry->d_inode,
dentry, mode, dev->devt);
if (!err) {
struct iattr newattrs;
/* fixup possibly umasked mode */
newattrs.ia_mode = mode;
newattrs.ia_valid = ATTR_MODE;
mutex_lock(&dentry->d_inode->i_mutex);
notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
/* mark as kernel-created inode */
dentry->d_inode->i_private = &thread;
}
dput(dentry);
mutex_unlock(&path.dentry->d_inode->i_mutex);
path_put(&path);
return err;
}
static int dev_rmdir(const char *name)
{
struct nameidata nd;
struct dentry *dentry;
int err;
err = kern_path_parent(name, &nd);
if (err)
return err;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
if (!IS_ERR(dentry)) {
if (dentry->d_inode) {
if (dentry->d_inode->i_private == &thread)
err = vfs_rmdir(nd.path.dentry->d_inode,
dentry);
else
err = -EPERM;
} else {
err = -ENOENT;
}
dput(dentry);
} else {
err = PTR_ERR(dentry);
}
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
return err;
}
static int delete_path(const char *nodepath)
{
const char *path;
int err = 0;
path = kstrdup(nodepath, GFP_KERNEL);
if (!path)
return -ENOMEM;
for (;;) {
char *base;
base = strrchr(path, '/');
if (!base)
break;
base[0] = '\0';
err = dev_rmdir(path);
if (err)
break;
}
kfree(path);
return err;
}
static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
{
/* did we create it */
if (inode->i_private != &thread)
return 0;
/* does the dev_t match */
if (is_blockdev(dev)) {
if (!S_ISBLK(stat->mode))
return 0;
} else {
if (!S_ISCHR(stat->mode))
return 0;
}
if (stat->rdev != dev->devt)
return 0;
/* ours */
return 1;
}
static int handle_remove(const char *nodename, struct device *dev)
{
struct nameidata nd;
struct dentry *dentry;
struct kstat stat;
int deleted = 1;
int err;
err = kern_path_parent(nodename, &nd);
if (err)
return err;
mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
if (!IS_ERR(dentry)) {
if (dentry->d_inode) {
err = vfs_getattr(nd.path.mnt, dentry, &stat);
if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
struct iattr newattrs;
/*
* before unlinking this node, reset permissions
* of possible references like hardlinks
*/
newattrs.ia_uid = 0;
newattrs.ia_gid = 0;
newattrs.ia_mode = stat.mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
mutex_lock(&dentry->d_inode->i_mutex);
notify_change(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
err = vfs_unlink(nd.path.dentry->d_inode,
dentry);
if (!err || err == -ENOENT)
deleted = 1;
}
} else {
err = -ENOENT;
}
dput(dentry);
} else {
err = PTR_ERR(dentry);
}
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
path_put(&nd.path);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
}
/*
* If configured, or requested by the commandline, devtmpfs will be
* auto-mounted after the kernel mounted the root filesystem.
*/
int devtmpfs_mount(const char *mntdir)
{
int err;
if (!mount_dev)
return 0;
if (!thread)
return 0;
err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
printk(KERN_INFO "devtmpfs: mounted\n");
return err;
}
static DECLARE_COMPLETION(setup_done);
static int handle(const char *name, umode_t mode, struct device *dev)
{
if (mode)
return handle_create(name, mode, dev);
else
return handle_remove(name, dev);
}
static int devtmpfsd(void *p)
{
char options[] = "mode=0755";
int *err = p;
*err = sys_unshare(CLONE_NEWNS);
if (*err)
goto out;
*err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
if (*err)
goto out;
sys_chdir("/.."); /* will traverse into overmounted root */
sys_chroot(".");
complete(&setup_done);
while (1) {
spin_lock(&req_lock);
while (requests) {
struct req *req = requests;
requests = NULL;
spin_unlock(&req_lock);
while (req) {
struct req *next = req->next;
req->err = handle(req->name, req->mode, req->dev);
complete(&req->done);
req = next;
}
spin_lock(&req_lock);
}
__set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&req_lock);
schedule();
}
return 0;
out:
complete(&setup_done);
return *err;
}
/*
* Create devtmpfs instance, driver-core devices will add their device
* nodes here.
*/
int __init devtmpfs_init(void)
{
int err = register_filesystem(&dev_fs_type);
if (err) {
printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
"type %i\n", err);
return err;
}
thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
if (!IS_ERR(thread)) {
wait_for_completion(&setup_done);
} else {
err = PTR_ERR(thread);
thread = NULL;
}
if (err) {
printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
unregister_filesystem(&dev_fs_type);
return err;
}
printk(KERN_INFO "devtmpfs: initialized\n");
return 0;
}
| gpl-2.0 |
Trustonic/kernel-goldfish | drivers/net/wireless/ath/ath5k/pcu.c | 4920 | 28524 | /*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Matthew W. S. Bell <mentor@madwifi.org>
* Copyright (c) 2007-2008 Luis Rodriguez <mcgrof@winlab.rutgers.edu>
* Copyright (c) 2007-2008 Pavel Roskin <proski@gnu.org>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*********************************\
* Protocol Control Unit Functions *
\*********************************/
#include <asm/unaligned.h>
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: Protocol Control Unit (PCU) functions
*
* Protocol control unit is responsible to maintain various protocol
* properties before a frame is send and after a frame is received to/from
* baseband. To be more specific, PCU handles:
*
* - Buffering of RX and TX frames (after QCU/DCUs)
*
* - Encrypting and decrypting (using the built-in engine)
*
* - Generating ACKs, RTS/CTS frames
*
* - Maintaining TSF
*
* - FCS
*
* - Updating beacon data (with TSF etc)
*
* - Generating virtual CCA
*
* - RX/Multicast filtering
*
* - BSSID filtering
*
* - Various statistics
*
* -Different operating modes: AP, STA, IBSS
*
* Note: Most of these functions can be tweaked/bypassed so you can do
* them on sw above for debugging or research. For more infos check out PCU
* registers on reg.h.
*/
/**
* DOC: ACK rates
*
* AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
* There is a mapping between G rates (that cover both
* CCK and OFDM) and ack rates that we use when setting
* rate -> duration table. This mapping is hw-based so
* don't change anything.
*
* To enable this functionality we must set
* ah->ah_ack_bitrate_high to true else base rate is
* used (1Mb for CCK, 6Mb for OFDM).
*/
static const unsigned int ack_rates_high[] =
/* Tx -> ACK */
/* 1Mb -> 1Mb */ { 0,
/* 2MB -> 2Mb */ 1,
/* 5.5Mb -> 2Mb */ 1,
/* 11Mb -> 2Mb */ 1,
/* 6Mb -> 6Mb */ 4,
/* 9Mb -> 6Mb */ 4,
/* 12Mb -> 12Mb */ 6,
/* 18Mb -> 12Mb */ 6,
/* 24Mb -> 24Mb */ 8,
/* 36Mb -> 24Mb */ 8,
/* 48Mb -> 24Mb */ 8,
/* 54Mb -> 24Mb */ 8 };
/*******************\
* Helper functions *
\*******************/
/**
* ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
* @shortpre: Indicate short preample
*
* Calculate tx duration of a frame given it's rate and length
* It extends ieee80211_generic_frame_duration for non standard
* bwmodes.
*/
int
ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
int bitrate, bits, symbols, symbol_bits;
int dur;
/* Fallback */
if (!ah->ah_bwmode) {
__le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
NULL, len, rate);
/* subtract difference between long and short preamble */
dur = le16_to_cpu(raw_dur);
if (shortpre)
dur -= 96;
return dur;
}
bitrate = rate->bitrate;
preamble = AR5K_INIT_OFDM_PREAMPLE_TIME;
plcp_bits = AR5K_INIT_OFDM_PLCP_BITS;
sym_time = AR5K_INIT_OFDM_SYMBOL_TIME;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
sifs = AR5K_INIT_SIFS_TURBO;
preamble = AR5K_INIT_OFDM_PREAMBLE_TIME_MIN;
break;
case AR5K_BWMODE_10MHZ:
sifs = AR5K_INIT_SIFS_HALF_RATE;
preamble *= 2;
sym_time *= 2;
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
preamble *= 4;
sym_time *= 4;
break;
default:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
break;
}
bits = plcp_bits + (len << 3);
/* Bit rate is in 100Kbits */
symbol_bits = bitrate * sym_time;
symbols = DIV_ROUND_UP(bits * 10, symbol_bits);
dur = sifs + preamble + (sym_time * symbols);
return dur;
}
/**
* ath5k_hw_get_default_slottime() - Get the default slot time for current mode
* @ah: The &struct ath5k_hw
*/
unsigned int
ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int slot_time;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
slot_time = AR5K_INIT_SLOT_TIME_TURBO;
break;
case AR5K_BWMODE_10MHZ:
slot_time = AR5K_INIT_SLOT_TIME_HALF_RATE;
break;
case AR5K_BWMODE_5MHZ:
slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE;
break;
case AR5K_BWMODE_DEFAULT:
default:
slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
if ((channel->hw_value == AR5K_MODE_11B) && !ah->ah_short_slot)
slot_time = AR5K_INIT_SLOT_TIME_B;
break;
}
return slot_time;
}
/**
* ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
* @ah: The &struct ath5k_hw
*/
unsigned int
ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int sifs;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
sifs = AR5K_INIT_SIFS_TURBO;
break;
case AR5K_BWMODE_10MHZ:
sifs = AR5K_INIT_SIFS_HALF_RATE;
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
break;
case AR5K_BWMODE_DEFAULT:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
default:
if (channel->band == IEEE80211_BAND_5GHZ)
sifs = AR5K_INIT_SIFS_DEFAULT_A;
break;
}
return sifs;
}
/**
* ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
* @ah: The &struct ath5k_hw
*
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
* NOTE: Is called in interrupt context!
*/
void
ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
struct ath5k_statistics *stats = &ah->stats;
/* Read-And-Clear */
stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
/******************\
* ACK/CTS Timeouts *
\******************/
/**
* ath5k_hw_write_rate_duration() - Fill rate code to duration table
* @ah: The &struct ath5k_hw
*
* Write the rate code to duration table upon hw reset. This is a helper for
* ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
* the hardware, based on current mode, for each rate. The rates which are
* capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have
* different rate code so we write their value twice (one for long preamble
* and one for short).
*
* Note: Band doesn't matter here, if we set the values for OFDM it works
* on both a and g modes. So all we have to do is set values for all g rates
* that include all OFDM and CCK rates.
*
*/
static inline void
ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
struct ieee80211_rate *rate;
unsigned int i;
/* 802.11g covers both OFDM and CCK */
u8 band = IEEE80211_BAND_2GHZ;
/* Write rate duration table */
for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
u32 reg;
u16 tx_time;
if (ah->ah_ack_bitrate_high)
rate = &ah->sbands[band].bitrates[ack_rates_high[i]];
/* CCK -> 1Mb */
else if (i < 4)
rate = &ah->sbands[band].bitrates[0];
/* OFDM -> 6Mb */
else
rate = &ah->sbands[band].bitrates[4];
/* Set ACK timeout */
reg = AR5K_RATE_DUR(rate->hw_value);
/* An ACK frame consists of 10 bytes. If you add the FCS,
* which ieee80211_generic_frame_duration() adds,
* its 14 bytes. Note we use the control rate and not the
* actual rate for this rate. See mac80211 tx.c
* ieee80211_duration() for a brief description of
* what rate we should choose to TX ACKs. */
tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
ath5k_hw_reg_write(ah, tx_time, reg);
if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
continue;
tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, true);
ath5k_hw_reg_write(ah, tx_time,
reg + (AR5K_SET_SHORT_PREAMBLE << 2));
}
}
/**
* ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
static int
ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
ath5k_hw_htoclock(ah, timeout));
return 0;
}
/**
* ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
static int
ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
ath5k_hw_htoclock(ah, timeout));
return 0;
}
/*******************\
* RX filter Control *
\*******************/
/**
* ath5k_hw_set_lladdr() - Set station id
* @ah: The &struct ath5k_hw
* @mac: The card's mac address (array of octets)
*
* Set station id on hw using the provided mac address
*/
int
ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 low_id, high_id;
u32 pcu_reg;
/* Set new station ID */
memcpy(common->macaddr, mac, ETH_ALEN);
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
low_id = get_unaligned_le32(mac);
high_id = get_unaligned_le16(mac + 4);
ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
return 0;
}
/**
* ath5k_hw_set_bssid() - Set current BSSID on hw
* @ah: The &struct ath5k_hw
*
* Sets the current BSSID and BSSID mask we have from the
* common struct into the hardware
*/
void
ath5k_hw_set_bssid(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
u16 tim_offset = 0;
/*
* Set BSSID mask on 5212
*/
if (ah->ah_version == AR5K_AR5212)
ath_hw_setbssidmask(common);
/*
* Set BSSID
*/
ath5k_hw_reg_write(ah,
get_unaligned_le32(common->curbssid),
AR5K_BSS_ID0);
ath5k_hw_reg_write(ah,
get_unaligned_le16(common->curbssid + 4) |
((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S),
AR5K_BSS_ID1);
if (common->curaid == 0) {
ath5k_hw_disable_pspoll(ah);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
tim_offset ? tim_offset + 4 : 0);
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
/**
* ath5k_hw_set_bssid_mask() - Filter out bssids we listen
* @ah: The &struct ath5k_hw
* @mask: The BSSID mask to set (array of octets)
*
* BSSID masking is a method used by AR5212 and newer hardware to inform PCU
* which bits of the interface's MAC address should be looked at when trying
* to decide which packets to ACK. In station mode and AP mode with a single
* BSS every bit matters since we lock to only one BSS. In AP mode with
* multiple BSSes (virtual interfaces) not every bit matters because hw must
* accept frames for all BSSes and so we tweak some bits of our mac address
* in order to have multiple BSSes.
*
* For more information check out ../hw.c of the common ath module.
*/
void
ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
/* Cache bssid mask so that we can restore it
* on reset */
memcpy(common->bssidmask, mask, ETH_ALEN);
if (ah->ah_version == AR5K_AR5212)
ath_hw_setbssidmask(common);
}
/**
* ath5k_hw_set_mcast_filter() - Set multicast filter
* @ah: The &struct ath5k_hw
* @filter0: Lower 32bits of muticast filter
* @filter1: Higher 16bits of multicast filter
*/
void
ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/**
* ath5k_hw_get_rx_filter() - Get current rx filter
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
* phy error filter registers. RX filter is used
* to set the allowed frame types that PCU will accept
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
u32
ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
/*Radar detection for 5212*/
if (ah->ah_version == AR5K_AR5212) {
data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
if (data & AR5K_PHY_ERR_FIL_RADAR)
filter |= AR5K_RX_FILTER_RADARERR;
if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
filter |= AR5K_RX_FILTER_PHYERR;
}
return filter;
}
/**
* ath5k_hw_set_rx_filter() - Set rx filter
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
* Sets RX filter register and also handles PHY error filter
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
void
ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
/* Set PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212) {
if (filter & AR5K_RX_FILTER_RADARERR)
data |= AR5K_PHY_ERR_FIL_RADAR;
if (filter & AR5K_RX_FILTER_PHYERR)
data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
}
/*
* The AR5210 uses promiscuous mode to detect radar activity
*/
if (ah->ah_version == AR5K_AR5210 &&
(filter & AR5K_RX_FILTER_RADARERR)) {
filter &= ~AR5K_RX_FILTER_RADARERR;
filter |= AR5K_RX_FILTER_PROM;
}
/*Zero length DMA (phy error reporting) */
if (data)
AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
/*Write RX Filter register*/
ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
/*Write PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212)
ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
}
/****************\
* Beacon control *
\****************/
#define ATH5K_MAX_TSF_READ 10
/**
* ath5k_hw_get_tsf64() - Get the full 64bit TSF
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
u64
ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
unsigned long flags;
/* This code is time critical - we don't want to be interrupted here */
local_irq_save(flags);
/*
* While reading TSF upper and then lower part, the clock is still
* counting (or jumping in case of IBSS merge) so we might get
* inconsistent values. To avoid this, we read the upper part again
* and check it has not been changed. We make the hypothesis that a
* maximum of 3 changes can happens in a row (we use 10 as a safe
* value).
*
* Impact on performance is pretty small, since in most cases, only
* 3 register reads are needed.
*/
tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
if (tsf_upper2 == tsf_upper1)
break;
tsf_upper1 = tsf_upper2;
}
local_irq_restore(flags);
WARN_ON(i == ATH5K_MAX_TSF_READ);
return ((u64)tsf_upper1 << 32) | tsf_lower;
}
#undef ATH5K_MAX_TSF_READ
/**
* ath5k_hw_set_tsf64() - Set a new 64bit TSF
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
void
ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
* ath5k_hw_reset_tsf() - Force a TSF reset
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
void
ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
/*
* Each write to the RESET_TSF bit toggles a hardware internal
* signal to reset TSF, but if left high it will cause a TSF reset
* on the next chip reset as well. Thus we always write the value
* twice to clear the signal.
*/
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
/**
* ath5k_hw_init_beacon_timers() - Initialize beacon timers
* @ah: The &struct ath5k_hw
* @next_beacon: Next TBTT
* @interval: Current beacon interval
*
* This function is used to initialize beacon timers based on current
* operation mode and settings.
*/
void
ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
/*
* Set the additional timers by mode
*/
switch (ah->opmode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
* timer and timer2 as next CFP duration start
* timer. Both in 1/8TUs. */
/* TODO: PCF handling */
if (ah->ah_version == AR5K_AR5210) {
timer1 = 0xffffffff;
timer2 = 0xffffffff;
} else {
timer1 = 0x0000ffff;
timer2 = 0x0007ffff;
}
/* Mark associated AP as PCF incapable for now */
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF);
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
* software beacon alert. Both in 1/8TUs. */
timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
break;
}
/* Timer3 marks the end of our ATIM window
* a zero length window is not allowed because
* we 'll get no beacons */
timer3 = next_beacon + 1;
/*
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
if (ah->opmode == NL80211_IFTYPE_AP ||
ah->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
/* Force a TSF reset if requested and enable beacons */
if (interval & AR5K_BEACON_RESET_TSF)
ath5k_hw_reset_tsf(ah);
ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
AR5K_BEACON_ENABLE),
AR5K_BEACON);
/* Flush any pending BMISS interrupts on ISR by
* performing a clear-on-write operation on PISR
* register for the BMISS bit (writing a bit on
* ISR toggles a reset for that bit and leaves
* the remaining bits intact) */
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR);
else
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR);
/* TODO: Set enhanced sleep registers on AR5212
* based on vif->bss_conf params, until then
* disable power save reporting.*/
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV);
}
/**
* ath5k_check_timer_win() - Check if timer B is timer A + window
* @a: timer a (before b)
* @b: timer b (after a)
* @window: difference between a and b
* @intval: timers are increased by this interval
*
* This helper function checks if timer B is timer A + window and covers
* cases where timer A or B might have already been updated or wrapped
* around (Timers are 16 bit).
*
* Returns true if O.K.
*/
static inline bool
ath5k_check_timer_win(int a, int b, int window, int intval)
{
/*
* 1.) usually B should be A + window
* 2.) A already updated, B not updated yet
* 3.) A already updated and has wrapped around
* 4.) B has wrapped around
*/
if ((b - a == window) || /* 1.) */
(a - b == intval - window) || /* 2.) */
((a | 0x10000) - b == intval - window) || /* 3.) */
((b | 0x10000) - a == window)) /* 4.) */
return true; /* O.K. */
return false;
}
/**
* ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
* @ah: The &struct ath5k_hw
* @intval: beacon interval
*
* This is a workaround for IBSS mode
*
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
* next beacon target time (NBTT), and that the HW updates these timers
* separately based on the current TSF value. The hardware increments each
* timer by the beacon interval, when the local TSF converted to TU is equal
* to the value stored in the timer.
*
* The reception of a beacon with the same BSSID can update the local HW TSF
* at any time - this is something we can't avoid. If the TSF jumps to a
* time which is later than the time stored in a timer, this timer will not
* be updated until the TSF in TU wraps around at 16 bit (the size of the
* timers) and reaches the time which is stored in the timer.
*
* The problem is that these timers are closely related to TIMER0 (NBTT) and
* that they define a time "window". When the TSF jumps between two timers
* (e.g. ATIM and NBTT), the one in the past will be left behind (not
* updated), while the one in the future will be updated every beacon
* interval. This causes the window to get larger, until the TSF wraps
* around as described above and the timer which was left behind gets
* updated again. But - because the beacon interval is usually not an exact
* divisor of the size of the timers (16 bit), an unwanted "window" between
* these timers has developed!
*
* This is especially important with the ATIM window, because during
* the ATIM window only ATIM frames and no data frames are allowed to be
* sent, which creates transmission pauses after each beacon. This symptom
* has been described as "ramping ping" because ping times increase linearly
* for some time and then drop down again. A wrong window on the DMA beacon
* timer has the same effect, so we check for these two conditions.
*
* Returns true if O.K.
*/
bool
ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
{
unsigned int nbtt, atim, dma;
nbtt = ath5k_hw_reg_read(ah, AR5K_TIMER0);
atim = ath5k_hw_reg_read(ah, AR5K_TIMER3);
dma = ath5k_hw_reg_read(ah, AR5K_TIMER1) >> 3;
/* NOTE: SWBA is different. Having a wrong window there does not
* stop us from sending data and this condition is caught by
* other means (SWBA interrupt) */
if (ath5k_check_timer_win(nbtt, atim, 1, intval) &&
ath5k_check_timer_win(dma, nbtt, AR5K_TUNE_DMA_BEACON_RESP,
intval))
return true; /* O.K. */
return false;
}
/**
* ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
* Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
void
ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
/* As defined by IEEE 802.11-2007 17.3.8.6 */
int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
int cts_timeout = ack_timeout;
ath5k_hw_set_ifs_intervals(ah, slot_time);
ath5k_hw_set_ack_timeout(ah, ack_timeout);
ath5k_hw_set_cts_timeout(ah, cts_timeout);
ah->ah_coverage_class = coverage_class;
}
/***************************\
* Init/Start/Stop functions *
\***************************/
/**
* ath5k_hw_start_rx_pcu() - Start RX engine
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
*/
void
ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* at5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*/
void
ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* ath5k_hw_set_opmode() - Set PCU operating mode
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
*
* Configure PCU for the various operating modes (AP/STA etc)
*/
int
ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
| AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
(AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
beacon_reg = 0;
switch (op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_ADHOC;
if (ah->ah_version == AR5K_AR5210)
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
else
AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_AP;
if (ah->ah_version == AR5K_AR5210)
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
else
AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
break;
case NL80211_IFTYPE_STATION:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_PWR_SV : 0);
case NL80211_IFTYPE_MONITOR:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_NO_PSPOLL : 0);
break;
default:
return -EINVAL;
}
/*
* Set PCU registers
*/
low_id = get_unaligned_le32(common->macaddr);
high_id = get_unaligned_le16(common->macaddr + 4);
ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
/*
* Set Beacon Control Register on 5210
*/
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
return 0;
}
/**
* ath5k_hw_pcu_init() - Initialize PCU
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
* @mode: One of enum ath5k_driver_mode
*
* This function is used to initialize PCU by setting current
* operation mode and various other settings.
*/
void
ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
/* Set bssid and bssid mask */
ath5k_hw_set_bssid(ah);
/* Set PCU config */
ath5k_hw_set_opmode(ah, op_mode);
/* Write rate duration table only on AR5212 and if
* virtual interface has already been brought up
* XXX: rethink this after new mode changes to
* mac80211 are integrated */
if (ah->ah_version == AR5K_AR5212 &&
ah->nvifs)
ath5k_hw_write_rate_duration(ah);
/* Set RSSI/BRSSI thresholds
*
* Note: If we decide to set this value
* dynamically, have in mind that when AR5K_RSSI_THR
* register is read it might return 0x40 if we haven't
* wrote anything to it plus BMISS RSSI threshold is zeroed.
* So doing a save/restore procedure here isn't the right
* choice. Instead store it on ath5k_hw */
ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
AR5K_TUNE_BMISS_THRES <<
AR5K_RSSI_THR_BMISS_S),
AR5K_RSSI_THR);
/* MIC QoS support */
if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
}
/* QoS NOACK Policy */
if (ah->ah_version == AR5K_AR5212) {
ath5k_hw_reg_write(ah,
AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
AR5K_QOS_NOACK);
}
/* Restore slot time and ACK timeouts */
if (ah->ah_coverage_class > 0)
ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
/* Set ACK bitrate mode (see ack_rates_high) */
if (ah->ah_version == AR5K_AR5212) {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (ah->ah_ack_bitrate_high)
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
else
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
}
return;
}
| gpl-2.0 |
GuneetAtwal/kernel_n9005 | sound/isa/gus/gus_volume.c | 10040 | 5656 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/time.h>
#include <linux/export.h>
#include <sound/core.h>
#include <sound/gus.h>
#define __GUS_TABLES_ALLOC__
#include "gus_tables.h"
EXPORT_SYMBOL(snd_gf1_atten_table); /* for snd-gus-synth module */
unsigned short snd_gf1_lvol_to_gvol_raw(unsigned int vol)
{
unsigned short e, m, tmp;
if (vol > 65535)
vol = 65535;
tmp = vol;
e = 7;
if (tmp < 128) {
while (e > 0 && tmp < (1 << e))
e--;
} else {
while (tmp > 255) {
tmp >>= 1;
e++;
}
}
m = vol - (1 << e);
if (m > 0) {
if (e > 8)
m >>= e - 8;
else if (e < 8)
m <<= 8 - e;
m &= 255;
}
return (e << 8) | m;
}
#if 0
unsigned int snd_gf1_gvol_to_lvol_raw(unsigned short gf1_vol)
{
unsigned int rvol;
unsigned short e, m;
if (!gf1_vol)
return 0;
e = gf1_vol >> 8;
m = (unsigned char) gf1_vol;
rvol = 1 << e;
if (e > 8)
return rvol | (m << (e - 8));
return rvol | (m >> (8 - e));
}
unsigned int snd_gf1_calc_ramp_rate(struct snd_gus_card * gus,
unsigned short start,
unsigned short end,
unsigned int us)
{
static unsigned char vol_rates[19] =
{
23, 24, 26, 28, 29, 31, 32, 34,
36, 37, 39, 40, 42, 44, 45, 47,
49, 50, 52
};
unsigned short range, increment, value, i;
start >>= 4;
end >>= 4;
if (start < end)
us /= end - start;
else
us /= start - end;
range = 4;
value = gus->gf1.enh_mode ?
vol_rates[0] :
vol_rates[gus->gf1.active_voices - 14];
for (i = 0; i < 3; i++) {
if (us < value) {
range = i;
break;
} else
value <<= 3;
}
if (range == 4) {
range = 3;
increment = 1;
} else
increment = (value + (value >> 1)) / us;
return (range << 6) | (increment & 0x3f);
}
#endif /* 0 */
unsigned short snd_gf1_translate_freq(struct snd_gus_card * gus, unsigned int freq16)
{
freq16 >>= 3;
if (freq16 < 50)
freq16 = 50;
if (freq16 & 0xf8000000) {
freq16 = ~0xf8000000;
snd_printk(KERN_ERR "snd_gf1_translate_freq: overflow - freq = 0x%x\n", freq16);
}
return ((freq16 << 9) + (gus->gf1.playback_freq >> 1)) / gus->gf1.playback_freq;
}
#if 0
short snd_gf1_compute_vibrato(short cents, unsigned short fc_register)
{
static short vibrato_table[] =
{
0, 0, 32, 592, 61, 1175, 93, 1808,
124, 2433, 152, 3007, 182, 3632, 213, 4290,
241, 4834, 255, 5200
};
long depth;
short *vi1, *vi2, pcents, v1;
pcents = cents < 0 ? -cents : cents;
for (vi1 = vibrato_table, vi2 = vi1 + 2; pcents > *vi2; vi1 = vi2, vi2 += 2);
v1 = *(vi1 + 1);
/* The FC table above is a list of pairs. The first number in the pair */
/* is the cents index from 0-255 cents, and the second number in the */
/* pair is the FC adjustment needed to change the pitch by the indexed */
/* number of cents. The table was created for an FC of 32768. */
/* The following expression does a linear interpolation against the */
/* approximated log curve in the table above, and then scales the number */
/* by the FC before the LFO. This calculation also adjusts the output */
/* value to produce the appropriate depth for the hardware. The depth */
/* is 2 * desired FC + 1. */
depth = (((int) (*(vi2 + 1) - *vi1) * (pcents - *vi1) / (*vi2 - *vi1)) + v1) * fc_register >> 14;
if (depth)
depth++;
if (depth > 255)
depth = 255;
return cents < 0 ? -(short) depth : (short) depth;
}
unsigned short snd_gf1_compute_pitchbend(unsigned short pitchbend, unsigned short sens)
{
static long log_table[] = {1024, 1085, 1149, 1218, 1290, 1367, 1448, 1534, 1625, 1722, 1825, 1933};
int wheel, sensitivity;
unsigned int mantissa, f1, f2;
unsigned short semitones, f1_index, f2_index, f1_power, f2_power;
char bend_down = 0;
int bend;
if (!sens)
return 1024;
wheel = (int) pitchbend - 8192;
sensitivity = ((int) sens * wheel) / 128;
if (sensitivity < 0) {
bend_down = 1;
sensitivity = -sensitivity;
}
semitones = (unsigned int) (sensitivity >> 13);
mantissa = sensitivity % 8192;
f1_index = semitones % 12;
f2_index = (semitones + 1) % 12;
f1_power = semitones / 12;
f2_power = (semitones + 1) / 12;
f1 = log_table[f1_index] << f1_power;
f2 = log_table[f2_index] << f2_power;
bend = (int) ((((f2 - f1) * mantissa) >> 13) + f1);
if (bend_down)
bend = 1048576L / bend;
return bend;
}
unsigned short snd_gf1_compute_freq(unsigned int freq,
unsigned int rate,
unsigned short mix_rate)
{
unsigned int fc;
int scale = 0;
while (freq >= 4194304L) {
scale++;
freq >>= 1;
}
fc = (freq << 10) / rate;
if (fc > 97391L) {
fc = 97391;
snd_printk(KERN_ERR "patch: (1) fc frequency overflow - %u\n", fc);
}
fc = (fc * 44100UL) / mix_rate;
while (scale--)
fc <<= 1;
if (fc > 65535L) {
fc = 65535;
snd_printk(KERN_ERR "patch: (2) fc frequency overflow - %u\n", fc);
}
return (unsigned short) fc;
}
#endif /* 0 */
| gpl-2.0 |
tinymac/123 | src/server/game/Chat/ChatLink.cpp | 57 | 26878 | /*
* Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "ChatLink.h"
#include "SpellMgr.h"
#include "ObjectMgr.h"
#include "SpellInfo.h"
// Supported shift-links (client generated and server side)
// |color|Hachievement:achievement_id:player_guid:0:0:0:0:0:0:0:0|h[name]|h|r
// - client, item icon shift click, not used in server currently
// |color|Harea:area_id|h[name]|h|r
// |color|Hcreature:creature_guid|h[name]|h|r
// |color|Hcreature_entry:creature_id|h[name]|h|r
// |color|Henchant:recipe_spell_id|h[prof_name: recipe_name]|h|r - client, at shift click in recipes list dialog
// |color|Hgameevent:id|h[name]|h|r
// |color|Hgameobject:go_guid|h[name]|h|r
// |color|Hgameobject_entry:go_id|h[name]|h|r
// |color|Hglyph:glyph_slot_id:glyph_prop_id|h[%s]|h|r - client, at shift click in glyphs dialog, GlyphSlot.dbc, GlyphProperties.dbc
// |color|Hitem:item_id:perm_ench_id:gem1:gem2:gem3:0:0:0:0:reporter_level|h[name]|h|r
// - client, item icon shift click
// |color|Hitemset:itemset_id|h[name]|h|r
// |color|Hplayer:name|h[name]|h|r - client, in some messages, at click copy only name instead link
// |color|Hquest:quest_id:quest_level|h[name]|h|r - client, quest list name shift-click
// |color|Hskill:skill_id|h[name]|h|r
// |color|Hspell:spell_id|h[name]|h|r - client, spellbook spell icon shift-click
// |color|Htalent:talent_id, rank|h[name]|h|r - client, talent icon shift-click
// |color|Htaxinode:id|h[name]|h|r
// |color|Htele:id|h[name]|h|r
// |color|Htitle:id|h[name]|h|r
// |color|Htrade:spell_id:cur_value:max_value:unk3int:unk3str|h[name]|h|r - client, spellbook profession icon shift-click
inline bool ReadUInt32(std::istringstream& iss, uint32& res)
{
iss >> std::dec >> res;
return !iss.fail() && !iss.eof();
}
inline bool ReadInt32(std::istringstream& iss, int32& res)
{
iss >> std::dec >> res;
return !iss.fail() && !iss.eof();
}
inline std::string ReadSkip(std::istringstream& iss, char term)
{
std::string res;
char c = iss.peek();
while (c != term && c != '\0')
{
res += c;
iss.ignore(1);
c = iss.peek();
}
return res;
}
inline bool CheckDelimiter(std::istringstream& iss, char delimiter, const char* context)
{
char c = iss.peek();
if (c != delimiter)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): invalid %s link structure ('%c' expected, '%c' found)", iss.str().c_str(), context, delimiter, c);
return false;
}
iss.ignore(1);
return true;
}
inline bool ReadHex(std::istringstream& iss, uint32& res, uint32 length)
{
std::istringstream::pos_type pos = iss.tellg();
iss >> std::hex >> res;
//uint32 size = uint32(iss.gcount());
if (length && uint32(iss.tellg() - pos) != length)
return false;
return !iss.fail() && !iss.eof();
}
#define DELIMITER ':'
#define PIPE_CHAR '|'
bool ChatLink::ValidateName(char* buffer, const char* /*context*/)
{
_name = buffer;
return true;
}
// |color|Hitem:item_id:perm_ench_id:gem1:gem2:gem3:0:random_property:0:reporter_level|h[name]|h|r
// |cffa335ee|Hitem:812:0:0:0:0:0:0:0:70|h[Glowing Brightwood Staff]|h|r
bool ItemChatLink::Initialize(std::istringstream& iss)
{
// Read item entry
uint32 itemEntry = 0;
if (!ReadUInt32(iss, itemEntry))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading item entry", iss.str().c_str());
return false;
}
// Validate item
_item = sObjectMgr->GetItemTemplate(itemEntry);
if (!_item)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid itemEntry %u in |item command", iss.str().c_str(), itemEntry);
return false;
}
// Validate item's color
if (_color != ItemQualityColors[_item->Quality])
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): linked item has color %u, but user claims %u", iss.str().c_str(), ItemQualityColors[_item->Quality], _color);
return false;
}
// Number of various item properties after item entry
const uint8 propsCount = 8;
const uint8 randomPropertyPosition = 5;
for (uint8 index = 0; index < propsCount; ++index)
{
if (!CheckDelimiter(iss, DELIMITER, "item"))
return false;
int32 id = 0;
if (!ReadInt32(iss, id))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading item property (%u)", iss.str().c_str(), index);
return false;
}
if (id && (index == randomPropertyPosition))
{
// Validate random property
if (id > 0)
{
_property = sItemRandomPropertiesStore.LookupEntry(id);
if (!_property)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid item property id %u in |item command", iss.str().c_str(), id);
return false;
}
}
else if (id < 0)
{
_suffix = sItemRandomSuffixStore.LookupEntry(-id);
if (!_suffix)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid item suffix id %u in |item command", iss.str().c_str(), -id);
return false;
}
}
}
_data[index] = id;
}
return true;
}
inline std::string ItemChatLink::FormatName(uint8 index, ItemLocale const* locale, char* const* suffixStrings) const
{
std::stringstream ss;
if (locale == NULL || index >= locale->Name.size())
ss << _item->Name1;
else
ss << locale->Name[index];
if (suffixStrings)
ss << ' ' << suffixStrings[index];
return ss.str();
}
bool ItemChatLink::ValidateName(char* buffer, const char* context)
{
ChatLink::ValidateName(buffer, context);
char* const* suffixStrings = _suffix ? _suffix->nameSuffix : (_property ? _property->nameSuffix : NULL);
bool res = (FormatName(LOCALE_enUS, NULL, suffixStrings) == buffer);
if (!res)
{
ItemLocale const* il = sObjectMgr->GetItemLocale(_item->ItemId);
for (uint8 index = LOCALE_koKR; index < TOTAL_LOCALES; ++index)
{
if (FormatName(index, il, suffixStrings) == buffer)
{
res = true;
break;
}
}
}
if (!res)
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): linked item (id: %u) name wasn't found in any localization", context, _item->ItemId);
return res;
}
// |color|Hquest:quest_id:quest_level|h[name]|h|r
// |cff808080|Hquest:2278:47|h[The Platinum Discs]|h|r
bool QuestChatLink::Initialize(std::istringstream& iss)
{
// Read quest id
uint32 questId = 0;
if (!ReadUInt32(iss, questId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading quest entry", iss.str().c_str());
return false;
}
// Validate quest
_quest = sObjectMgr->GetQuestTemplate(questId);
if (!_quest)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): quest template %u not found", iss.str().c_str(), questId);
return false;
}
// Check delimiter
if (!CheckDelimiter(iss, DELIMITER, "quest"))
return false;
// Read quest level
if (!ReadInt32(iss, _questLevel))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading quest level", iss.str().c_str());
return false;
}
// Validate quest level
if (_questLevel >= STRONG_MAX_LEVEL)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): quest level %d is too big", iss.str().c_str(), _questLevel);
return false;
}
return true;
}
bool QuestChatLink::ValidateName(char* buffer, const char* context)
{
ChatLink::ValidateName(buffer, context);
bool res = (_quest->GetTitle() == buffer);
if (!res)
if (QuestLocale const* ql = sObjectMgr->GetQuestLocale(_quest->GetQuestId()))
for (uint8 i = 0; i < ql->Title.size(); i++)
if (ql->Title[i] == buffer)
{
res = true;
break;
}
if (!res)
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): linked quest (id: %u) title wasn't found in any localization", context, _quest->GetQuestId());
return res;
}
// |color|Hspell:spell_id|h[name]|h|r
// |cff71d5ff|Hspell:21563|h[Command]|h|r
bool SpellChatLink::Initialize(std::istringstream& iss)
{
if (_color != CHAT_LINK_COLOR_SPELL)
return false;
// Read spell id
uint32 spellId = 0;
if (!ReadUInt32(iss, spellId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading spell entry", iss.str().c_str());
return false;
}
// Validate spell
_spell = sSpellMgr->GetSpellInfo(spellId);
if (!_spell)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid spell id %u in |spell command", iss.str().c_str(), spellId);
return false;
}
return true;
}
bool SpellChatLink::ValidateName(char* buffer, const char* context)
{
ChatLink::ValidateName(buffer, context);
// spells with that flag have a prefix of "$PROFESSION: "
if (_spell->Attributes & SPELL_ATTR0_TRADESPELL)
{
SkillLineAbilityMapBounds bounds = sSpellMgr->GetSkillLineAbilityMapBounds(_spell->Id);
if (bounds.first == bounds.second)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): skill line not found for spell %u", context, _spell->Id);
return false;
}
SkillLineAbilityEntry const* skillInfo = bounds.first->second;
if (!skillInfo)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): skill line ability not found for spell %u", context, _spell->Id);
return false;
}
SkillLineEntry const* skillLine = sSkillLineStore.LookupEntry(skillInfo->skillId);
if (!skillLine)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): skill line not found for skill %u", context, skillInfo->skillId);
return false;
}
for (uint8 i = 0; i < TOTAL_LOCALES; ++i)
{
uint32 skillLineNameLength = strlen(skillLine->name[i]);
if (skillLineNameLength > 0 && strncmp(skillLine->name[i], buffer, skillLineNameLength) == 0)
{
// found the prefix, remove it to perform spellname validation below
// -2 = strlen(": ")
uint32 spellNameLength = strlen(buffer) - skillLineNameLength - 2;
memcpy(buffer, buffer + skillLineNameLength + 2, spellNameLength + 1);
}
}
}
bool res = false;
for (uint8 i = 0; i < TOTAL_LOCALES; ++i)
if (*_spell->SpellName[i] && strcmp(_spell->SpellName[i], buffer) == 0)
{
res = true;
break;
}
if (!res)
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): linked spell (id: %u) name wasn't found in any localization", context, _spell->Id);
return res;
}
// |color|Hachievement:achievement_id:player_guid:0:0:0:0:0:0:0:0|h[name]|h|r
// |cffffff00|Hachievement:546:0000000000000001:0:0:0:-1:0:0:0:0|h[Safe Deposit]|h|r
bool AchievementChatLink::Initialize(std::istringstream& iss)
{
if (_color != CHAT_LINK_COLOR_ACHIEVEMENT)
return false;
// Read achievemnt Id
uint32 achievementId = 0;
if (!ReadUInt32(iss, achievementId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading achievement entry", iss.str().c_str());
return false;
}
// Validate achievement
_achievement = sAchievementStore.LookupEntry(achievementId);
if (!_achievement)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid achivement id %u in |achievement command", iss.str().c_str(), achievementId);
return false;
}
// Check delimiter
if (!CheckDelimiter(iss, DELIMITER, "achievement"))
return false;
// Read HEX
if (!ReadHex(iss, _guid, 0))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): invalid hexadecimal number while reading char's guid", iss.str().c_str());
return false;
}
// Skip progress
const uint8 propsCount = 8;
for (uint8 index = 0; index < propsCount; ++index)
{
if (!CheckDelimiter(iss, DELIMITER, "achievement"))
return false;
if (!ReadUInt32(iss, _data[index]))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading achievement property (%u)", iss.str().c_str(), index);
return false;
}
}
return true;
}
bool AchievementChatLink::ValidateName(char* buffer, const char* context)
{
ChatLink::ValidateName(buffer, context);
bool res = false;
for (uint8 i = 0; i < TOTAL_LOCALES; ++i)
if (*_achievement->name[i] && strcmp(_achievement->name[i], buffer) == 0)
{
res = true;
break;
}
if (!res)
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): linked achievement (id: %u) name wasn't found in any localization", context, _achievement->ID);
return res;
}
// |color|Htrade:spell_id:cur_value:max_value:player_guid:base64_data|h[name]|h|r
// |cffffd000|Htrade:4037:1:150:1:6AAAAAAAAAAAAAAAAAAAAAAOAADAAAAAAAAAAAAAAAAIAAAAAAAAA|h[Engineering]|h|r
bool TradeChatLink::Initialize(std::istringstream& iss)
{
if (_color != CHAT_LINK_COLOR_TRADE)
return false;
// Spell Id
uint32 spellId = 0;
if (!ReadUInt32(iss, spellId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading achievement entry", iss.str().c_str());
return false;
}
// Validate spell
_spell = sSpellMgr->GetSpellInfo(spellId);
if (!_spell)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid spell id %u in |trade command", iss.str().c_str(), spellId);
return false;
}
// Check delimiter
if (!CheckDelimiter(iss, DELIMITER, "trade"))
return false;
// Minimum talent level
if (!ReadInt32(iss, _minSkillLevel))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading minimum talent level", iss.str().c_str());
return false;
}
// Check delimiter
if (!CheckDelimiter(iss, DELIMITER, "trade"))
return false;
// Maximum talent level
if (!ReadInt32(iss, _maxSkillLevel))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading maximum talent level", iss.str().c_str());
return false;
}
// Check delimiter
if (!CheckDelimiter(iss, DELIMITER, "trade"))
return false;
// Something hexadecimal
if (!ReadHex(iss, _guid, 0))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading achievement's owner guid", iss.str().c_str());
return false;
}
// Skip base64 encoded stuff
_base64 = ReadSkip(iss, PIPE_CHAR);
return true;
}
// |color|Htalent:talent_id:rank|h[name]|h|r
// |cff4e96f7|Htalent:2232:-1|h[Taste for Blood]|h|r
bool TalentChatLink::Initialize(std::istringstream& iss)
{
if (_color != CHAT_LINK_COLOR_TALENT)
return false;
// Read talent entry
if (!ReadUInt32(iss, _talentId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading talent entry", iss.str().c_str());
return false;
}
// Validate talent
TalentEntry const* talentInfo = sTalentStore.LookupEntry(_talentId);
if (!talentInfo)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid talent id %u in |talent command", iss.str().c_str(), _talentId);
return false;
}
// Validate talent's spell
_spell = sSpellMgr->GetSpellInfo(talentInfo->RankID[0]);
if (!_spell)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid spell id %u in |trade command", iss.str().c_str(), talentInfo->RankID[0]);
return false;
}
// Delimiter
if (!CheckDelimiter(iss, DELIMITER, "talent"))
return false;
// Rank
if (!ReadInt32(iss, _rankId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading talent rank", iss.str().c_str());
return false;
}
return true;
}
// |color|Henchant:recipe_spell_id|h[prof_name: recipe_name]|h|r
// |cffffd000|Henchant:3919|h[Engineering: Rough Dynamite]|h|r
bool EnchantmentChatLink::Initialize(std::istringstream& iss)
{
if (_color != CHAT_LINK_COLOR_ENCHANT)
return false;
// Spell Id
uint32 spellId = 0;
if (!ReadUInt32(iss, spellId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading enchantment spell entry", iss.str().c_str());
return false;
}
// Validate spell
_spell = sSpellMgr->GetSpellInfo(spellId);
if (!_spell)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid spell id %u in |enchant command", iss.str().c_str(), spellId);
return false;
}
return true;
}
// |color|Hglyph:glyph_slot_id:glyph_prop_id|h[%s]|h|r
// |cff66bbff|Hglyph:21:762|h[Glyph of Bladestorm]|h|r
bool GlyphChatLink::Initialize(std::istringstream& iss)
{
if (_color != CHAT_LINK_COLOR_GLYPH)
return false;
// Slot
if (!ReadUInt32(iss, _slotId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading slot id", iss.str().c_str());
return false;
}
// Check delimiter
if (!CheckDelimiter(iss, DELIMITER, "glyph"))
return false;
// Glyph Id
uint32 glyphId = 0;
if (!ReadUInt32(iss, glyphId))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly while reading glyph entry", iss.str().c_str());
return false;
}
// Validate glyph
_glyph = sGlyphPropertiesStore.LookupEntry(glyphId);
if (!_glyph)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid glyph id %u in |glyph command", iss.str().c_str(), glyphId);
return false;
}
// Validate glyph's spell
_spell = sSpellMgr->GetSpellInfo(_glyph->SpellId);
if (!_spell)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid spell id %u in |glyph command", iss.str().c_str(), _glyph->SpellId);
return false;
}
return true;
}
LinkExtractor::LinkExtractor(const char* msg) : _iss(msg)
{
}
LinkExtractor::~LinkExtractor()
{
for (Links::iterator itr = _links.begin(); itr != _links.end(); ++itr)
delete *itr;
_links.clear();
}
bool LinkExtractor::IsValidMessage()
{
const char validSequence[6] = "cHhhr";
const char* validSequenceIterator = validSequence;
char buffer[256];
std::istringstream::pos_type startPos = 0;
uint32 color = 0;
ChatLink* link = NULL;
while (!_iss.eof())
{
if (validSequence == validSequenceIterator)
{
link = NULL;
_iss.ignore(255, PIPE_CHAR);
startPos = _iss.tellg() - std::istringstream::pos_type(1);
}
else if (_iss.get() != PIPE_CHAR)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence aborted unexpectedly", _iss.str().c_str());
return false;
}
// pipe has always to be followed by at least one char
if (_iss.peek() == '\0')
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): pipe followed by '\\0'", _iss.str().c_str());
return false;
}
// no further pipe commands
if (_iss.eof())
break;
char commandChar;
_iss >> commandChar;
// | in normal messages is escaped by ||
if (commandChar != PIPE_CHAR)
{
if (commandChar == *validSequenceIterator)
{
if (validSequenceIterator == validSequence+4)
validSequenceIterator = validSequence;
else
++validSequenceIterator;
}
else
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): invalid sequence, expected '%c' but got '%c'", _iss.str().c_str(), *validSequenceIterator, commandChar);
return false;
}
}
else if (validSequence != validSequenceIterator)
{
// no escaped pipes in sequences
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got escaped pipe in sequence", _iss.str().c_str());
return false;
}
switch (commandChar)
{
case 'c':
if (!ReadHex(_iss, color, 8))
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): invalid hexadecimal number while reading color", _iss.str().c_str());
return false;
}
break;
case 'H':
// read chars up to colon = link type
_iss.getline(buffer, 256, DELIMITER);
if (_iss.eof())
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly", _iss.str().c_str());
return false;
}
if (strcmp(buffer, "item") == 0)
link = new ItemChatLink();
else if (strcmp(buffer, "quest") == 0)
link = new QuestChatLink();
else if (strcmp(buffer, "trade") == 0)
link = new TradeChatLink();
else if (strcmp(buffer, "talent") == 0)
link = new TalentChatLink();
else if (strcmp(buffer, "spell") == 0)
link = new SpellChatLink();
else if (strcmp(buffer, "enchant") == 0)
link = new EnchantmentChatLink();
else if (strcmp(buffer, "achievement") == 0)
link = new AchievementChatLink();
else if (strcmp(buffer, "glyph") == 0)
link = new GlyphChatLink();
else
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): user sent unsupported link type '%s'", _iss.str().c_str(), buffer);
return false;
}
_links.push_back(link);
link->SetColor(color);
if (!link->Initialize(_iss))
return false;
break;
case 'h':
// if h is next element in sequence, this one must contain the linked text :)
if (*validSequenceIterator == 'h')
{
// links start with '['
if (_iss.get() != '[')
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): link caption doesn't start with '['", _iss.str().c_str());
return false;
}
_iss.getline(buffer, 256, ']');
if (_iss.eof())
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): sequence finished unexpectedly", _iss.str().c_str());
return false;
}
if (!link)
return false;
if (!link->ValidateName(buffer, _iss.str().c_str()))
return false;
}
break;
case 'r':
if (link)
link->SetBounds(startPos, _iss.tellg());
case '|':
// no further payload
break;
default:
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): got invalid command |%c", _iss.str().c_str(), commandChar);
return false;
}
}
// check if every opened sequence was also closed properly
if (validSequence != validSequenceIterator)
{
sLog->outDebug(LOG_FILTER_CHATSYS, "ChatHandler::isValidChatMessage('%s'): EOF in active sequence", _iss.str().c_str());
return false;
}
return true;
}
| gpl-2.0 |
Tkkg1994/IronKernel | kernel/time/tick-broadcast.c | 569 | 16436 | /*
* linux/kernel/time/tick-broadcast.c
*
* This file contains functions which emulate a local clock-event
* device via a broadcast event source.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
*
* This code is licenced under the GPL version 2. For details see
* kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include "tick-internal.h"
/*
* Broadcast support for broken x86 hardware, where the local apic
* timer stops in C3 state.
*/
static struct tick_device tick_broadcast_device;
/* FIXME: Use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
static DECLARE_BITMAP(tmpmask, NR_CPUS);
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
static int tick_broadcast_force;
#ifdef CONFIG_TICK_ONESHOT
static void tick_broadcast_clear_oneshot(int cpu);
#else
static inline void tick_broadcast_clear_oneshot(int cpu) { }
#endif
/*
* Debugging: see timer_list.c
*/
struct tick_device *tick_get_broadcast_device(void)
{
return &tick_broadcast_device;
}
struct cpumask *tick_get_broadcast_mask(void)
{
return to_cpumask(tick_broadcast_mask);
}
/*
* Start the device in periodic mode
*/
static void tick_broadcast_start_periodic(struct clock_event_device *bc)
{
if (bc)
tick_setup_periodic(bc, 1);
}
/*
* Check, if the device can be utilized as broadcast device:
*/
int tick_check_broadcast_device(struct clock_event_device *dev)
{
struct clock_event_device *cur = tick_broadcast_device.evtdev;
if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
(tick_broadcast_device.evtdev &&
tick_broadcast_device.evtdev->rating >= dev->rating) ||
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
if (cur)
cur->event_handler = clockevents_handle_noop;
tick_broadcast_device.evtdev = dev;
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
return 1;
}
/*
* Check, if the device is the broadcast device
*/
int tick_is_broadcast_device(struct clock_event_device *dev)
{
return (dev && tick_broadcast_device.evtdev == dev);
}
/*
* Check, if the device is disfunctional and a place holder, which
* needs to be handled by the broadcast device.
*/
int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
{
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Devices might be registered with both periodic and oneshot
* mode disabled. This signals, that the device needs to be
* operated from the broadcast device and is a placeholder for
* the cpu local device.
*/
if (!tick_device_is_functional(dev)) {
dev->event_handler = tick_handle_periodic;
cpumask_set_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
ret = 1;
} else {
/*
* When the new device is not affected by the stop
* feature and the cpu is marked in the broadcast mask
* then clear the broadcast bit.
*/
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
int cpu = smp_processor_id();
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
tick_broadcast_clear_oneshot(cpu);
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return ret;
}
/*
* Broadcast the event to the cpus, which are set in the mask (mangled).
*/
static void tick_do_broadcast(struct cpumask *mask)
{
int cpu = smp_processor_id();
struct tick_device *td;
/*
* Check, if the current cpu is in the mask
*/
if (cpumask_test_cpu(cpu, mask)) {
cpumask_clear_cpu(cpu, mask);
td = &per_cpu(tick_cpu_device, cpu);
td->evtdev->event_handler(td->evtdev);
}
if (!cpumask_empty(mask)) {
/*
* It might be necessary to actually check whether the devices
* have different broadcast functions. For now, just use the
* one of the first device. This works as long as we have this
* misfeature only on x86 (lapic)
*/
td = &per_cpu(tick_cpu_device, cpumask_first(mask));
td->evtdev->broadcast(mask);
}
}
/*
* Periodic broadcast:
* - invoke the broadcast handlers
*/
static void tick_do_periodic_broadcast(void)
{
raw_spin_lock(&tick_broadcast_lock);
cpumask_and(to_cpumask(tmpmask),
cpu_online_mask, tick_get_broadcast_mask());
tick_do_broadcast(to_cpumask(tmpmask));
raw_spin_unlock(&tick_broadcast_lock);
}
/*
* Event handler for periodic broadcast ticks
*/
static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
{
ktime_t next;
tick_do_periodic_broadcast();
/*
* The device is in periodic mode. No reprogramming necessary:
*/
if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
return;
/*
* Setup the next period for devices, which do not have
* periodic mode. We read dev->next_event first and add to it
* when the event already expired. clockevents_program_event()
* sets dev->next_event only when the event is really
* programmed to the device.
*/
for (next = dev->next_event; ;) {
next = ktime_add(next, tick_period);
if (!clockevents_program_event(dev, next, false))
return;
tick_do_periodic_broadcast();
}
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop
*/
static void tick_do_broadcast_on_off(unsigned long *reason)
{
struct clock_event_device *bc, *dev;
struct tick_device *td;
unsigned long flags;
int cpu, bc_stopped;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
dev = td->evtdev;
bc = tick_broadcast_device.evtdev;
/*
* Is the device not affected by the powerstate ?
*/
if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
goto out;
if (!tick_device_is_functional(dev))
goto out;
bc_stopped = cpumask_empty(tick_get_broadcast_mask());
switch (*reason) {
case CLOCK_EVT_NOTIFY_BROADCAST_ON:
case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
cpumask_set_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
clockevents_shutdown(dev);
}
if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
tick_broadcast_force = 1;
break;
case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
if (!tick_broadcast_force &&
cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
tick_setup_periodic(dev, 0);
}
break;
}
if (cpumask_empty(tick_get_broadcast_mask())) {
if (!bc_stopped)
clockevents_shutdown(bc);
} else if (bc_stopped) {
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
tick_broadcast_start_periodic(bc);
else
tick_broadcast_setup_oneshot(bc);
}
out:
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop.
*/
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu);
else
tick_do_broadcast_on_off(&reason);
}
/*
* Set the periodic handler depending on broadcast on/off
*/
void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
{
if (!broadcast)
dev->event_handler = tick_handle_periodic;
else
dev->event_handler = tick_handle_periodic_broadcast;
}
/*
* Remove a CPU from broadcasting
*/
void tick_shutdown_broadcast(unsigned int *cpup)
{
struct clock_event_device *bc;
unsigned long flags;
unsigned int cpu = *cpup;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
if (bc && cpumask_empty(tick_get_broadcast_mask()))
clockevents_shutdown(bc);
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
void tick_suspend_broadcast(void)
{
struct clock_event_device *bc;
unsigned long flags;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc)
clockevents_shutdown(bc);
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
int tick_resume_broadcast(void)
{
struct clock_event_device *bc;
unsigned long flags;
int broadcast = 0;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
bc = tick_broadcast_device.evtdev;
if (bc) {
clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
switch (tick_broadcast_device.mode) {
case TICKDEV_MODE_PERIODIC:
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(bc);
broadcast = cpumask_test_cpu(smp_processor_id(),
tick_get_broadcast_mask());
break;
case TICKDEV_MODE_ONESHOT:
if (!cpumask_empty(tick_get_broadcast_mask()))
broadcast = tick_resume_broadcast_oneshot(bc);
break;
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
return broadcast;
}
#ifdef CONFIG_TICK_ONESHOT
/* FIXME: use cpumask_var_t. */
static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
/*
* Exposed for debugging: see timer_list.c
*/
struct cpumask *tick_get_broadcast_oneshot_mask(void)
{
return to_cpumask(tick_broadcast_oneshot_mask);
}
static int tick_broadcast_set_event(ktime_t expires, int force)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
return clockevents_program_event(bc, expires, force);
}
int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
{
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
return 0;
}
/*
* Called from irq_enter() when idle was interrupted to reenable the
* per cpu device.
*/
void tick_check_oneshot_broadcast(int cpu)
{
if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
/*
* We might be in the middle of switching over from
* periodic to oneshot. If the CPU has not yet
* switched over, leave the device alone.
*/
if (td->mode == TICKDEV_MODE_ONESHOT) {
clockevents_set_mode(td->evtdev,
CLOCK_EVT_MODE_ONESHOT);
}
}
}
/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
ktime_t now, next_event;
int cpu;
raw_spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
next_event.tv64 = KTIME_MAX;
cpumask_clear(to_cpumask(tmpmask));
now = ktime_get();
/* Find all expired events */
for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event.tv64 <= now.tv64)
cpumask_set_cpu(cpu, to_cpumask(tmpmask));
else if (td->evtdev->next_event.tv64 < next_event.tv64)
next_event.tv64 = td->evtdev->next_event.tv64;
}
/*
* Wakeup the cpus which have an expired event.
*/
tick_do_broadcast(to_cpumask(tmpmask));
/*
* Two reasons for reprogram:
*
* - The global event did not expire any CPU local
* events. This happens in dyntick mode, as the maximum PIT
* delta is quite small.
*
* - There are pending events on sleeping CPUs which were not
* in the event mask
*/
if (next_event.tv64 != KTIME_MAX) {
/*
* Rearm the broadcast device. If event expired,
* repeat the above
*/
if (tick_broadcast_set_event(next_event, 0))
goto again;
}
raw_spin_unlock(&tick_broadcast_lock);
}
/*
* Powerstate information: The system enters/leaves a state, where
* affected devices might stop
*/
void tick_broadcast_oneshot_control(unsigned long reason)
{
struct clock_event_device *bc, *dev;
struct tick_device *td;
unsigned long flags;
int cpu;
/*
* Periodic mode does not care about the enter/exit of power
* states
*/
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
return;
/*
* We are called with preemtion disabled from the depth of the
* idle code, so we can't be moved away.
*/
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
dev = td->evtdev;
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
return;
bc = tick_broadcast_device.evtdev;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
if (dev->next_event.tv64 < bc->next_event.tv64)
tick_broadcast_set_event(dev->next_event, 1);
}
} else {
if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
cpumask_clear_cpu(cpu,
tick_get_broadcast_oneshot_mask());
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
if (dev->next_event.tv64 != KTIME_MAX)
tick_program_event(dev->next_event, 1);
}
}
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Reset the one shot broadcast for a cpu
*
* Called with tick_broadcast_lock held
*/
static void tick_broadcast_clear_oneshot(int cpu)
{
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
}
static void tick_broadcast_init_next_event(struct cpumask *mask,
ktime_t expires)
{
struct tick_device *td;
int cpu;
for_each_cpu(cpu, mask) {
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev)
td->evtdev->next_event = expires;
}
}
/**
* tick_broadcast_setup_oneshot - setup the broadcast device
*/
void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
{
int cpu = smp_processor_id();
/* Set it up only once ! */
if (bc->event_handler != tick_handle_oneshot_broadcast) {
int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
bc->event_handler = tick_handle_oneshot_broadcast;
/* Take the do_timer update */
tick_do_timer_cpu = cpu;
/*
* We must be careful here. There might be other CPUs
* waiting for periodic broadcast. We need to set the
* oneshot_mask bits for those and program the
* broadcast device to fire.
*/
cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
cpumask_or(tick_get_broadcast_oneshot_mask(),
tick_get_broadcast_oneshot_mask(),
to_cpumask(tmpmask));
if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
tick_broadcast_init_next_event(to_cpumask(tmpmask),
tick_next_period);
tick_broadcast_set_event(tick_next_period, 1);
} else
bc->next_event.tv64 = KTIME_MAX;
} else {
/*
* The first cpu which switches to oneshot mode sets
* the bit for all other cpus which are in the general
* (periodic) broadcast mask. So the bit is set and
* would prevent the first broadcast enter after this
* to program the bc device.
*/
tick_broadcast_clear_oneshot(cpu);
}
}
/*
* Select oneshot operating mode for the broadcast device
*/
void tick_broadcast_switch_to_oneshot(void)
{
struct clock_event_device *bc;
unsigned long flags;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Remove a dead CPU from broadcasting
*/
void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
{
unsigned long flags;
unsigned int cpu = *cpup;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
/*
* Clear the broadcast mask flag for the dead cpu, but do not
* stop the broadcast device!
*/
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
/*
* Check, whether the broadcast device is in one shot mode
*/
int tick_broadcast_oneshot_active(void)
{
return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
}
/*
* Check whether the broadcast device supports oneshot.
*/
bool tick_broadcast_oneshot_available(void)
{
struct clock_event_device *bc = tick_broadcast_device.evtdev;
return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
}
#endif
| gpl-2.0 |
sndnvaps/linux-1 | drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c | 825 | 21579 | /******************************************************************************
*
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define _RTL8188E_XMIT_C_
#include <osdep_service.h>
#include <drv_types.h>
#include <wifi.h>
#include <osdep_intf.h>
#include <usb_ops_linux.h>
#include <rtl8188e_hal.h>
s32 rtl8188eu_init_xmit_priv(struct adapter *adapt)
{
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
tasklet_init(&pxmitpriv->xmit_tasklet,
(void(*)(unsigned long))rtl8188eu_xmit_tasklet,
(unsigned long)adapt);
return _SUCCESS;
}
static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
{
u8 set_tx_desc_offset;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
set_tx_desc_offset = (((sz + TXDESC_SIZE) % haldata->UsbBulkOutSize) == 0) ? 1 : 0;
return set_tx_desc_offset;
}
static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
{
u16 *usptr = (u16 *)ptxdesc;
u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
u32 index;
u16 checksum = 0;
/* Clear first */
ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
for (index = 0; index < count; index++)
checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
}
/* Description: In normal chip, we should send some packet to Hw which will be used by Fw */
/* in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
/* Fw can tell Hw to send these packet derectly. */
void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8 ispspoll, u8 is_btqosnull)
{
struct tx_desc *ptxdesc;
/* Clear all status */
ptxdesc = (struct tx_desc *)desc;
memset(desc, 0, TXDESC_SIZE);
/* offset 0 */
ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /* Buffer size + command header */
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /* Fixed queue of Mgnt queue */
/* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
if (ispspoll) {
ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
} else {
ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /* Hw set sequence number */
ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
}
if (is_btqosnull)
ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /* BT NULL */
/* offset 16 */
ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
/* USB interface drop packet if the checksum of descriptor isn't correct. */
/* Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
rtl8188eu_cal_txdesc_chksum(ptxdesc);
}
static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
{
if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
switch (pattrib->encrypt) {
/* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
case _WEP40_:
case _WEP104_:
ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
break;
case _TKIP_:
case _TKIP_WTMIC_:
ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
break;
case _AES_:
ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
break;
case _NO_PRIVACY_:
default:
break;
}
}
}
static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
{
switch (pattrib->vcs_mode) {
case RTS_CTS:
*pdw |= cpu_to_le32(RTS_EN);
break;
case CTS_TO_SELF:
*pdw |= cpu_to_le32(CTS_2_SELF);
break;
case NONE_VCS:
default:
break;
}
if (pattrib->vcs_mode) {
*pdw |= cpu_to_le32(HW_RTS_EN);
/* Set RTS BW */
if (pattrib->ht_en) {
*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
*pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
*pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
*pdw |= 0;
else
*pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
}
}
}
static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
{
if (pattrib->ht_en) {
*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
*pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
*pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
*pdw |= 0;
else
*pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
}
}
static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
{
int pull = 0;
uint qsel;
u8 data_rate, pwr_status, offset;
struct adapter *adapt = pxmitframe->padapter;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
int bmcst = IS_MCAST(pattrib->ra);
if (adapt->registrypriv.mp_mode == 0) {
if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
pull = 1;
}
}
memset(ptxdesc, 0, sizeof(struct tx_desc));
/* 4 offset 0 */
ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
offset = TXDESC_SIZE + OFFSET_SZ;
ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
if (bmcst)
ptxdesc->txdw0 |= cpu_to_le32(BMC);
if (adapt->registrypriv.mp_mode == 0) {
if (!bagg_pkt) {
if ((pull) && (pxmitframe->pkt_offset > 0))
pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
}
}
/* pkt_offset, unit:8 bytes padding */
if (pxmitframe->pkt_offset > 0)
ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
/* driver uses rate */
ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
qsel = (uint)(pattrib->qsel & 0x0000001f);
ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
fill_txdesc_sectype(pattrib, ptxdesc);
if (pattrib->ampdu_en) {
ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
} else {
ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
}
/* offset 8 */
/* offset 12 */
ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
/* offset 16 , offset 20 */
if (pattrib->qos_en)
ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
/* offset 20 */
if (pxmitframe->agg_num > 1)
ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
if ((pattrib->ether_type != 0x888e) &&
(pattrib->ether_type != 0x0806) &&
(pattrib->ether_type != 0x88b4) &&
(pattrib->dhcp_pkt != 1)) {
/* Non EAP & ARP & DHCP type data packet */
fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS Rate FB LMT */
if (pattrib->ht_en) {
if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
}
data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
} else {
/* EAP data packet and ARP packet and DHCP. */
/* Use the 1M data rate to send the EAP/ARP packet. */
/* This will maybe make the handshake smooth. */
ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
}
} else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
qsel = (uint)(pattrib->qsel&0x0000001f);
ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
/* offset 8 */
/* CCX-TXRPT ack for xmit mgmt frames. */
if (pxmitframe->ack_report)
ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
/* offset 12 */
ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
/* offset 20 */
ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
if (pattrib->retry_ctrl)
ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
else
ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
} else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
} else {
DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
/* offset 4 */
ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
/* offset 8 */
/* offset 12 */
ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
/* offset 20 */
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
}
/* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
/* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
/* mgnt frame should be controlled by Hw because Fw will also send null data */
/* which we cannot control when Fw LPS enable. */
/* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
/* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
/* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
/* 2010.06.23. Added by tynli. */
if (!pattrib->qos_en) {
ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /* Hw set sequence number */
ptxdesc->txdw4 |= cpu_to_le32(HW_SSN); /* Hw set sequence number */
}
rtl88eu_dm_set_tx_ant_by_tx_info(&haldata->odmpriv, pmem,
pattrib->mac_id);
rtl8188eu_cal_txdesc_chksum(ptxdesc);
_dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
return pull;
}
/* for non-agg data frame or management frame */
static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
s32 ret = _SUCCESS;
s32 inner_ret = _SUCCESS;
int t, sz, w_sz, pull = 0;
u8 *mem_addr;
u32 ff_hwaddr;
struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
struct security_priv *psecuritypriv = &adapt->securitypriv;
if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
(pxmitframe->attrib.ether_type != 0x0806) &&
(pxmitframe->attrib.ether_type != 0x888e) &&
(pxmitframe->attrib.ether_type != 0x88b4) &&
(pxmitframe->attrib.dhcp_pkt != 1))
rtw_issue_addbareq_cmd(adapt, pxmitframe);
mem_addr = pxmitframe->buf_addr;
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
for (t = 0; t < pattrib->nr_frags; t++) {
if (inner_ret != _SUCCESS && ret == _SUCCESS)
ret = _FAIL;
if (t != (pattrib->nr_frags - 1)) {
RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
sz = pxmitpriv->frag_len;
sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
} else {
/* no frag */
sz = pattrib->last_txcmdsz;
}
pull = update_txdesc(pxmitframe, mem_addr, sz, false);
if (pull) {
mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
pxmitframe->buf_addr = mem_addr;
w_sz = sz + TXDESC_SIZE;
} else {
w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
}
ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
rtw_count_tx_stats(adapt, pxmitframe, sz);
RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
mem_addr += w_sz;
mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
}
rtw_free_xmitframe(pxmitpriv, pxmitframe);
if (ret != _SUCCESS)
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
return ret;
}
static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
{
struct pkt_attrib *pattrib = &pxmitframe->attrib;
u32 len = 0;
/* no consider fragement */
len = pattrib->hdrlen + pattrib->iv_len +
SNAP_SIZE + sizeof(u16) +
pattrib->pktlen +
((pattrib->bswenc) ? pattrib->icv_len : 0);
if (pattrib->encrypt == _TKIP_)
len += 8;
return len;
}
s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct xmit_frame *pxmitframe = NULL;
struct xmit_frame *pfirstframe = NULL;
/* aggregate variable */
struct hw_xmit *phwxmit;
struct sta_info *psta = NULL;
struct tx_servq *ptxservq = NULL;
struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
u32 pbuf; /* next pkt address */
u32 pbuf_tail; /* last pkt tail */
u32 len; /* packet length, except TXDESC_SIZE and PKT_OFFSET */
u32 bulksize = haldata->UsbBulkOutSize;
u8 desc_cnt;
u32 bulkptr;
/* dump frame variable */
u32 ff_hwaddr;
RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
/* check xmitbuffer is ok */
if (pxmitbuf == NULL) {
pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
if (pxmitbuf == NULL)
return false;
}
/* 3 1. pick up first frame */
do {
rtw_free_xmitframe(pxmitpriv, pxmitframe);
pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
if (pxmitframe == NULL) {
/* no more xmit frame, release xmit buffer */
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
return false;
}
pxmitframe->pxmitbuf = pxmitbuf;
pxmitframe->buf_addr = pxmitbuf->pbuf;
pxmitbuf->priv_data = pxmitframe;
pxmitframe->agg_num = 1; /* alloc xmitframe should assign to 1. */
pxmitframe->pkt_offset = 1; /* first frame of aggregation, reserve offset */
rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
/* always return ndis_packet after rtw_xmitframe_coalesce */
rtw_os_xmit_complete(adapt, pxmitframe);
break;
} while (1);
/* 3 2. aggregate same priority and same DA(AP or STA) frames */
pfirstframe = pxmitframe;
len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
pbuf_tail = len;
pbuf = round_up(pbuf_tail, 8);
/* check pkt amount in one bulk */
desc_cnt = 0;
bulkptr = bulksize;
if (pbuf < bulkptr) {
desc_cnt++;
} else {
desc_cnt = 0;
bulkptr = ((pbuf / bulksize) + 1) * bulksize; /* round to next bulksize */
}
/* dequeue same priority packet from station tx queue */
psta = pfirstframe->attrib.psta;
switch (pfirstframe->attrib.priority) {
case 1:
case 2:
ptxservq = &(psta->sta_xmitpriv.bk_q);
phwxmit = pxmitpriv->hwxmits + 3;
break;
case 4:
case 5:
ptxservq = &(psta->sta_xmitpriv.vi_q);
phwxmit = pxmitpriv->hwxmits + 1;
break;
case 6:
case 7:
ptxservq = &(psta->sta_xmitpriv.vo_q);
phwxmit = pxmitpriv->hwxmits;
break;
case 0:
case 3:
default:
ptxservq = &(psta->sta_xmitpriv.be_q);
phwxmit = pxmitpriv->hwxmits + 2;
break;
}
spin_lock_bh(&pxmitpriv->lock);
xmitframe_phead = get_list_head(&ptxservq->sta_pending);
xmitframe_plist = xmitframe_phead->next;
while (xmitframe_phead != xmitframe_plist) {
pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
xmitframe_plist = xmitframe_plist->next;
pxmitframe->agg_num = 0; /* not first frame of aggregation */
pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
pxmitframe->agg_num = 1;
pxmitframe->pkt_offset = 1;
break;
}
list_del_init(&pxmitframe->list);
ptxservq->qcnt--;
phwxmit->accnt--;
pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
/* always return ndis_packet after rtw_xmitframe_coalesce */
rtw_os_xmit_complete(adapt, pxmitframe);
/* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
/* don't need xmitframe any more */
rtw_free_xmitframe(pxmitpriv, pxmitframe);
/* handle pointer and stop condition */
pbuf_tail = pbuf + len;
pbuf = round_up(pbuf_tail, 8);
pfirstframe->agg_num++;
if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
break;
if (pbuf < bulkptr) {
desc_cnt++;
if (desc_cnt == haldata->UsbTxAggDescNum)
break;
} else {
desc_cnt = 0;
bulkptr = ((pbuf / bulksize) + 1) * bulksize;
}
} /* end while (aggregate same priority and same DA(AP or STA) frames) */
if (list_empty(&ptxservq->sta_pending.queue))
list_del_init(&ptxservq->tx_pending);
spin_unlock_bh(&pxmitpriv->lock);
if ((pfirstframe->attrib.ether_type != 0x0806) &&
(pfirstframe->attrib.ether_type != 0x888e) &&
(pfirstframe->attrib.ether_type != 0x88b4) &&
(pfirstframe->attrib.dhcp_pkt != 1))
rtw_issue_addbareq_cmd(adapt, pfirstframe);
/* 3 3. update first frame txdesc */
if ((pbuf_tail % bulksize) == 0) {
/* remove pkt_offset */
pbuf_tail -= PACKET_OFFSET_SZ;
pfirstframe->buf_addr += PACKET_OFFSET_SZ;
pfirstframe->pkt_offset--;
}
update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
/* 3 4. write xmit buffer to USB FIFO */
ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
usb_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
/* 3 5. update statisitc */
pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
rtw_free_xmitframe(pxmitpriv, pfirstframe);
return true;
}
static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
s32 res = _SUCCESS;
res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
if (res == _SUCCESS)
rtw_dump_xframe(adapt, pxmitframe);
else
DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
return res;
}
/*
* Return
* true dump packet directly
* false enqueue packet
*/
static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
s32 res;
struct xmit_buf *pxmitbuf = NULL;
struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
spin_lock_bh(&pxmitpriv->lock);
if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
goto enqueue;
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
goto enqueue;
pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
if (pxmitbuf == NULL)
goto enqueue;
spin_unlock_bh(&pxmitpriv->lock);
pxmitframe->pxmitbuf = pxmitbuf;
pxmitframe->buf_addr = pxmitbuf->pbuf;
pxmitbuf->priv_data = pxmitframe;
if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
return true;
enqueue:
res = rtw_xmitframe_enqueue(adapt, pxmitframe);
spin_unlock_bh(&pxmitpriv->lock);
if (res != _SUCCESS) {
RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
rtw_free_xmitframe(pxmitpriv, pxmitframe);
/* Trick, make the statistics correct */
pxmitpriv->tx_pkts--;
pxmitpriv->tx_drop++;
return true;
}
return false;
}
s32 rtl8188eu_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
{
return rtw_dump_xframe(adapt, pmgntframe);
}
/*
* Return
* true dump packet directly ok
* false temporary can't transmit packets to hardware
*/
s32 rtl8188eu_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
{
return pre_xmitframe(adapt, pxmitframe);
}
| gpl-2.0 |
janrinze/loox7xxport.loox2-6-26 | fs/jffs2/xattr_trusted.c | 825 | 1391 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2006 NEC Corporation
*
* Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/jffs2.h>
#include <linux/xattr.h>
#include <linux/mtd/mtd.h>
#include "nodelist.h"
static int jffs2_trusted_getxattr(struct inode *inode, const char *name,
void *buffer, size_t size)
{
if (!strcmp(name, ""))
return -EINVAL;
return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size);
}
static int jffs2_trusted_setxattr(struct inode *inode, const char *name, const void *buffer,
size_t size, int flags)
{
if (!strcmp(name, ""))
return -EINVAL;
return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags);
}
static size_t jffs2_trusted_listxattr(struct inode *inode, char *list, size_t list_size,
const char *name, size_t name_len)
{
size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1;
if (list && retlen<=list_size) {
strcpy(list, XATTR_TRUSTED_PREFIX);
strcpy(list + XATTR_TRUSTED_PREFIX_LEN, name);
}
return retlen;
}
struct xattr_handler jffs2_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.list = jffs2_trusted_listxattr,
.set = jffs2_trusted_setxattr,
.get = jffs2_trusted_getxattr
};
| gpl-2.0 |
BroadCyanMod/kernel_samsung_bcm21553-common | arch/mn10300/kernel/module.c | 1337 | 5425 | /* MN10300 Kernel module helper routines
*
* Copyright (C) 2007, 2008, 2009 Red Hat, Inc. All Rights Reserved.
* Written by Mark Salter (msalter@redhat.com)
* - Derived from arch/i386/kernel/module.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public Licence as published by
* the Free Software Foundation; either version 2 of the Licence, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public Licence for more details.
*
* You should have received a copy of the GNU General Public Licence
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt, ...)
#endif
/*
* allocate storage for a module
*/
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
return vmalloc_exec(size);
}
/*
* free memory returned from module_alloc()
*/
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
}
/*
* allow the arch to fix up the section table
* - we don't need anything special
*/
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
return 0;
}
static void reloc_put16(uint8_t *p, uint32_t val)
{
p[0] = val & 0xff;
p[1] = (val >> 8) & 0xff;
}
static void reloc_put24(uint8_t *p, uint32_t val)
{
reloc_put16(p, val);
p[2] = (val >> 16) & 0xff;
}
static void reloc_put32(uint8_t *p, uint32_t val)
{
reloc_put16(p, val);
reloc_put16(p+2, val >> 16);
}
/*
* apply a REL relocation
*/
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
printk(KERN_ERR "module %s: RELOCATION unsupported\n",
me->name);
return -ENOEXEC;
}
/*
* apply a RELA relocation
*/
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i, sym_diff_seen = 0;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Addr relocation, sym_diff_val = 0;
uint8_t *location;
uint32_t value;
DEBUGP("Applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* this is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* this is the symbol the relocation is referring to (note that
* all undefined symbols have been resolved by the caller) */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
/* this is the adjustment to be made */
relocation = sym->st_value + rel[i].r_addend;
if (sym_diff_seen) {
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_MN10300_32:
case R_MN10300_24:
case R_MN10300_16:
case R_MN10300_8:
relocation -= sym_diff_val;
sym_diff_seen = 0;
break;
default:
printk(KERN_ERR "module %s: Unexpected SYM_DIFF relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
switch (ELF32_R_TYPE(rel[i].r_info)) {
/* for the first four relocation types, we simply
* store the adjustment at the location given */
case R_MN10300_32:
reloc_put32(location, relocation);
break;
case R_MN10300_24:
reloc_put24(location, relocation);
break;
case R_MN10300_16:
reloc_put16(location, relocation);
break;
case R_MN10300_8:
*location = relocation;
break;
/* for the next three relocation types, we write the
* adjustment with the address subtracted over the
* value at the location given */
case R_MN10300_PCREL32:
value = relocation - (uint32_t) location;
reloc_put32(location, value);
break;
case R_MN10300_PCREL16:
value = relocation - (uint32_t) location;
reloc_put16(location, value);
break;
case R_MN10300_PCREL8:
*location = relocation - (uint32_t) location;
break;
case R_MN10300_SYM_DIFF:
/* This is used to adjust the next reloc as required
* by relaxation. */
sym_diff_seen = 1;
sym_diff_val = sym->st_value;
break;
case R_MN10300_ALIGN:
/* Just ignore the ALIGN relocs.
* Only interesting if kernel performed relaxation. */
continue;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
if (sym_diff_seen) {
printk(KERN_ERR "module %s: Nothing follows SYM_DIFF relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
return 0;
}
/*
* finish loading the module
*/
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
return module_bug_finalize(hdr, sechdrs, me);
}
/*
* finish clearing the module
*/
void module_arch_cleanup(struct module *mod)
{
module_bug_cleanup(mod);
}
| gpl-2.0 |
mikewadsten/asuswrt | release/src/router/jpeg/jidctred.c | 1337 | 13528 | /*
* jidctred.c
*
* Copyright (C) 1994-1998, Thomas G. Lane.
* This file is part of the Independent JPEG Group's software.
* For conditions of distribution and use, see the accompanying README file.
*
* This file contains inverse-DCT routines that produce reduced-size output:
* either 4x4, 2x2, or 1x1 pixels from an 8x8 DCT block.
*
* The implementation is based on the Loeffler, Ligtenberg and Moschytz (LL&M)
* algorithm used in jidctint.c. We simply replace each 8-to-8 1-D IDCT step
* with an 8-to-4 step that produces the four averages of two adjacent outputs
* (or an 8-to-2 step producing two averages of four outputs, for 2x2 output).
* These steps were derived by computing the corresponding values at the end
* of the normal LL&M code, then simplifying as much as possible.
*
* 1x1 is trivial: just take the DC coefficient divided by 8.
*
* See jidctint.c for additional comments.
*/
#define JPEG_INTERNALS
#include "jinclude.h"
#include "jpeglib.h"
#include "jdct.h" /* Private declarations for DCT subsystem */
#ifdef IDCT_SCALING_SUPPORTED
/*
* This module is specialized to the case DCTSIZE = 8.
*/
#if DCTSIZE != 8
Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */
#endif
/* Scaling is the same as in jidctint.c. */
#if BITS_IN_JSAMPLE == 8
#define CONST_BITS 13
#define PASS1_BITS 2
#else
#define CONST_BITS 13
#define PASS1_BITS 1 /* lose a little precision to avoid overflow */
#endif
/* Some C compilers fail to reduce "FIX(constant)" at compile time, thus
* causing a lot of useless floating-point operations at run time.
* To get around this we use the following pre-calculated constants.
* If you change CONST_BITS you may want to add appropriate values.
* (With a reasonable C compiler, you can just rely on the FIX() macro...)
*/
#if CONST_BITS == 13
#define FIX_0_211164243 ((INT32) 1730) /* FIX(0.211164243) */
#define FIX_0_509795579 ((INT32) 4176) /* FIX(0.509795579) */
#define FIX_0_601344887 ((INT32) 4926) /* FIX(0.601344887) */
#define FIX_0_720959822 ((INT32) 5906) /* FIX(0.720959822) */
#define FIX_0_765366865 ((INT32) 6270) /* FIX(0.765366865) */
#define FIX_0_850430095 ((INT32) 6967) /* FIX(0.850430095) */
#define FIX_0_899976223 ((INT32) 7373) /* FIX(0.899976223) */
#define FIX_1_061594337 ((INT32) 8697) /* FIX(1.061594337) */
#define FIX_1_272758580 ((INT32) 10426) /* FIX(1.272758580) */
#define FIX_1_451774981 ((INT32) 11893) /* FIX(1.451774981) */
#define FIX_1_847759065 ((INT32) 15137) /* FIX(1.847759065) */
#define FIX_2_172734803 ((INT32) 17799) /* FIX(2.172734803) */
#define FIX_2_562915447 ((INT32) 20995) /* FIX(2.562915447) */
#define FIX_3_624509785 ((INT32) 29692) /* FIX(3.624509785) */
#else
#define FIX_0_211164243 FIX(0.211164243)
#define FIX_0_509795579 FIX(0.509795579)
#define FIX_0_601344887 FIX(0.601344887)
#define FIX_0_720959822 FIX(0.720959822)
#define FIX_0_765366865 FIX(0.765366865)
#define FIX_0_850430095 FIX(0.850430095)
#define FIX_0_899976223 FIX(0.899976223)
#define FIX_1_061594337 FIX(1.061594337)
#define FIX_1_272758580 FIX(1.272758580)
#define FIX_1_451774981 FIX(1.451774981)
#define FIX_1_847759065 FIX(1.847759065)
#define FIX_2_172734803 FIX(2.172734803)
#define FIX_2_562915447 FIX(2.562915447)
#define FIX_3_624509785 FIX(3.624509785)
#endif
/* Multiply an INT32 variable by an INT32 constant to yield an INT32 result.
* For 8-bit samples with the recommended scaling, all the variable
* and constant values involved are no more than 16 bits wide, so a
* 16x16->32 bit multiply can be used instead of a full 32x32 multiply.
* For 12-bit samples, a full 32-bit multiplication will be needed.
*/
#if BITS_IN_JSAMPLE == 8
#define MULTIPLY(var,const) MULTIPLY16C16(var,const)
#else
#define MULTIPLY(var,const) ((var) * (const))
#endif
/* Dequantize a coefficient by multiplying it by the multiplier-table
* entry; produce an int result. In this module, both inputs and result
* are 16 bits or less, so either int or short multiply will work.
*/
#define DEQUANTIZE(coef,quantval) (((ISLOW_MULT_TYPE) (coef)) * (quantval))
/*
* Perform dequantization and inverse DCT on one block of coefficients,
* producing a reduced-size 4x4 output block.
*/
GLOBAL(void)
jpeg_idct_4x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
JCOEFPTR coef_block,
JSAMPARRAY output_buf, JDIMENSION output_col)
{
INT32 tmp0, tmp2, tmp10, tmp12;
INT32 z1, z2, z3, z4;
JCOEFPTR inptr;
ISLOW_MULT_TYPE * quantptr;
int * wsptr;
JSAMPROW outptr;
JSAMPLE *range_limit = IDCT_range_limit(cinfo);
int ctr;
int workspace[DCTSIZE*4]; /* buffers data between passes */
SHIFT_TEMPS
/* Pass 1: process columns from input, store into work array. */
inptr = coef_block;
quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
wsptr = workspace;
for (ctr = DCTSIZE; ctr > 0; inptr++, quantptr++, wsptr++, ctr--) {
/* Don't bother to process column 4, because second pass won't use it */
if (ctr == DCTSIZE-4)
continue;
if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 &&
inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*5] == 0 &&
inptr[DCTSIZE*6] == 0 && inptr[DCTSIZE*7] == 0) {
/* AC terms all zero; we need not examine term 4 for 4x4 output */
int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS;
wsptr[DCTSIZE*0] = dcval;
wsptr[DCTSIZE*1] = dcval;
wsptr[DCTSIZE*2] = dcval;
wsptr[DCTSIZE*3] = dcval;
continue;
}
/* Even part */
tmp0 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
tmp0 <<= (CONST_BITS+1);
z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]);
z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]);
tmp2 = MULTIPLY(z2, FIX_1_847759065) + MULTIPLY(z3, - FIX_0_765366865);
tmp10 = tmp0 + tmp2;
tmp12 = tmp0 - tmp2;
/* Odd part */
z1 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
z2 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
z3 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
z4 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
tmp0 = MULTIPLY(z1, - FIX_0_211164243) /* sqrt(2) * (c3-c1) */
+ MULTIPLY(z2, FIX_1_451774981) /* sqrt(2) * (c3+c7) */
+ MULTIPLY(z3, - FIX_2_172734803) /* sqrt(2) * (-c1-c5) */
+ MULTIPLY(z4, FIX_1_061594337); /* sqrt(2) * (c5+c7) */
tmp2 = MULTIPLY(z1, - FIX_0_509795579) /* sqrt(2) * (c7-c5) */
+ MULTIPLY(z2, - FIX_0_601344887) /* sqrt(2) * (c5-c1) */
+ MULTIPLY(z3, FIX_0_899976223) /* sqrt(2) * (c3-c7) */
+ MULTIPLY(z4, FIX_2_562915447); /* sqrt(2) * (c1+c3) */
/* Final output stage */
wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp2, CONST_BITS-PASS1_BITS+1);
wsptr[DCTSIZE*3] = (int) DESCALE(tmp10 - tmp2, CONST_BITS-PASS1_BITS+1);
wsptr[DCTSIZE*1] = (int) DESCALE(tmp12 + tmp0, CONST_BITS-PASS1_BITS+1);
wsptr[DCTSIZE*2] = (int) DESCALE(tmp12 - tmp0, CONST_BITS-PASS1_BITS+1);
}
/* Pass 2: process 4 rows from work array, store into output array. */
wsptr = workspace;
for (ctr = 0; ctr < 4; ctr++) {
outptr = output_buf[ctr] + output_col;
/* It's not clear whether a zero row test is worthwhile here ... */
#ifndef NO_ZERO_ROW_TEST
if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 &&
wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) {
/* AC terms all zero */
JSAMPLE dcval = range_limit[(int) DESCALE((INT32) wsptr[0], PASS1_BITS+3)
& RANGE_MASK];
outptr[0] = dcval;
outptr[1] = dcval;
outptr[2] = dcval;
outptr[3] = dcval;
wsptr += DCTSIZE; /* advance pointer to next row */
continue;
}
#endif
/* Even part */
tmp0 = ((INT32) wsptr[0]) << (CONST_BITS+1);
tmp2 = MULTIPLY((INT32) wsptr[2], FIX_1_847759065)
+ MULTIPLY((INT32) wsptr[6], - FIX_0_765366865);
tmp10 = tmp0 + tmp2;
tmp12 = tmp0 - tmp2;
/* Odd part */
z1 = (INT32) wsptr[7];
z2 = (INT32) wsptr[5];
z3 = (INT32) wsptr[3];
z4 = (INT32) wsptr[1];
tmp0 = MULTIPLY(z1, - FIX_0_211164243) /* sqrt(2) * (c3-c1) */
+ MULTIPLY(z2, FIX_1_451774981) /* sqrt(2) * (c3+c7) */
+ MULTIPLY(z3, - FIX_2_172734803) /* sqrt(2) * (-c1-c5) */
+ MULTIPLY(z4, FIX_1_061594337); /* sqrt(2) * (c5+c7) */
tmp2 = MULTIPLY(z1, - FIX_0_509795579) /* sqrt(2) * (c7-c5) */
+ MULTIPLY(z2, - FIX_0_601344887) /* sqrt(2) * (c5-c1) */
+ MULTIPLY(z3, FIX_0_899976223) /* sqrt(2) * (c3-c7) */
+ MULTIPLY(z4, FIX_2_562915447); /* sqrt(2) * (c1+c3) */
/* Final output stage */
outptr[0] = range_limit[(int) DESCALE(tmp10 + tmp2,
CONST_BITS+PASS1_BITS+3+1)
& RANGE_MASK];
outptr[3] = range_limit[(int) DESCALE(tmp10 - tmp2,
CONST_BITS+PASS1_BITS+3+1)
& RANGE_MASK];
outptr[1] = range_limit[(int) DESCALE(tmp12 + tmp0,
CONST_BITS+PASS1_BITS+3+1)
& RANGE_MASK];
outptr[2] = range_limit[(int) DESCALE(tmp12 - tmp0,
CONST_BITS+PASS1_BITS+3+1)
& RANGE_MASK];
wsptr += DCTSIZE; /* advance pointer to next row */
}
}
/*
* Perform dequantization and inverse DCT on one block of coefficients,
* producing a reduced-size 2x2 output block.
*/
GLOBAL(void)
jpeg_idct_2x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
JCOEFPTR coef_block,
JSAMPARRAY output_buf, JDIMENSION output_col)
{
INT32 tmp0, tmp10, z1;
JCOEFPTR inptr;
ISLOW_MULT_TYPE * quantptr;
int * wsptr;
JSAMPROW outptr;
JSAMPLE *range_limit = IDCT_range_limit(cinfo);
int ctr;
int workspace[DCTSIZE*2]; /* buffers data between passes */
SHIFT_TEMPS
/* Pass 1: process columns from input, store into work array. */
inptr = coef_block;
quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
wsptr = workspace;
for (ctr = DCTSIZE; ctr > 0; inptr++, quantptr++, wsptr++, ctr--) {
/* Don't bother to process columns 2,4,6 */
if (ctr == DCTSIZE-2 || ctr == DCTSIZE-4 || ctr == DCTSIZE-6)
continue;
if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*3] == 0 &&
inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*7] == 0) {
/* AC terms all zero; we need not examine terms 2,4,6 for 2x2 output */
int dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS;
wsptr[DCTSIZE*0] = dcval;
wsptr[DCTSIZE*1] = dcval;
continue;
}
/* Even part */
z1 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]);
tmp10 = z1 << (CONST_BITS+2);
/* Odd part */
z1 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]);
tmp0 = MULTIPLY(z1, - FIX_0_720959822); /* sqrt(2) * (c7-c5+c3-c1) */
z1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]);
tmp0 += MULTIPLY(z1, FIX_0_850430095); /* sqrt(2) * (-c1+c3+c5+c7) */
z1 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]);
tmp0 += MULTIPLY(z1, - FIX_1_272758580); /* sqrt(2) * (-c1+c3-c5-c7) */
z1 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]);
tmp0 += MULTIPLY(z1, FIX_3_624509785); /* sqrt(2) * (c1+c3+c5+c7) */
/* Final output stage */
wsptr[DCTSIZE*0] = (int) DESCALE(tmp10 + tmp0, CONST_BITS-PASS1_BITS+2);
wsptr[DCTSIZE*1] = (int) DESCALE(tmp10 - tmp0, CONST_BITS-PASS1_BITS+2);
}
/* Pass 2: process 2 rows from work array, store into output array. */
wsptr = workspace;
for (ctr = 0; ctr < 2; ctr++) {
outptr = output_buf[ctr] + output_col;
/* It's not clear whether a zero row test is worthwhile here ... */
#ifndef NO_ZERO_ROW_TEST
if (wsptr[1] == 0 && wsptr[3] == 0 && wsptr[5] == 0 && wsptr[7] == 0) {
/* AC terms all zero */
JSAMPLE dcval = range_limit[(int) DESCALE((INT32) wsptr[0], PASS1_BITS+3)
& RANGE_MASK];
outptr[0] = dcval;
outptr[1] = dcval;
wsptr += DCTSIZE; /* advance pointer to next row */
continue;
}
#endif
/* Even part */
tmp10 = ((INT32) wsptr[0]) << (CONST_BITS+2);
/* Odd part */
tmp0 = MULTIPLY((INT32) wsptr[7], - FIX_0_720959822) /* sqrt(2) * (c7-c5+c3-c1) */
+ MULTIPLY((INT32) wsptr[5], FIX_0_850430095) /* sqrt(2) * (-c1+c3+c5+c7) */
+ MULTIPLY((INT32) wsptr[3], - FIX_1_272758580) /* sqrt(2) * (-c1+c3-c5-c7) */
+ MULTIPLY((INT32) wsptr[1], FIX_3_624509785); /* sqrt(2) * (c1+c3+c5+c7) */
/* Final output stage */
outptr[0] = range_limit[(int) DESCALE(tmp10 + tmp0,
CONST_BITS+PASS1_BITS+3+2)
& RANGE_MASK];
outptr[1] = range_limit[(int) DESCALE(tmp10 - tmp0,
CONST_BITS+PASS1_BITS+3+2)
& RANGE_MASK];
wsptr += DCTSIZE; /* advance pointer to next row */
}
}
/*
* Perform dequantization and inverse DCT on one block of coefficients,
* producing a reduced-size 1x1 output block.
*/
GLOBAL(void)
jpeg_idct_1x1 (j_decompress_ptr cinfo, jpeg_component_info * compptr,
JCOEFPTR coef_block,
JSAMPARRAY output_buf, JDIMENSION output_col)
{
int dcval;
ISLOW_MULT_TYPE * quantptr;
JSAMPLE *range_limit = IDCT_range_limit(cinfo);
SHIFT_TEMPS
/* We hardly need an inverse DCT routine for this: just take the
* average pixel value, which is one-eighth of the DC coefficient.
*/
quantptr = (ISLOW_MULT_TYPE *) compptr->dct_table;
dcval = DEQUANTIZE(coef_block[0], quantptr[0]);
dcval = (int) DESCALE((INT32) dcval, 3);
output_buf[0][output_col] = range_limit[dcval & RANGE_MASK];
}
#endif /* IDCT_SCALING_SUPPORTED */
| gpl-2.0 |
shminer/android_kernel_flounder | arch/powerpc/kernel/iommu.c | 1849 | 22806 | /*
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
*
* Rewrite, cleanup, new allocation schemes, virtual merging:
* Copyright (C) 2004 Olof Johansson, IBM Corporation
* and Ben. Herrenschmidt, IBM Corporation
*
* Dynamic DMA mapping support, bus-independent parts.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
#include <linux/crash_dump.h>
#include <linux/hash.h>
#include <linux/fault-inject.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/kdump.h>
#include <asm/fadump.h>
#include <asm/vio.h>
#define DBG(...)
static int novmerge;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
static int __init setup_iommu(char *str)
{
if (!strcmp(str, "novmerge"))
novmerge = 1;
else if (!strcmp(str, "vmerge"))
novmerge = 0;
return 1;
}
__setup("iommu=", setup_iommu);
static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
/*
* We precalculate the hash to avoid doing it on every allocation.
*
* The hash is important to spread CPUs across all the pools. For example,
* on a POWER7 with 4 way SMT we want interrupts on the primary threads and
* with 4 pools all primary threads would map to the same pool.
*/
static int __init setup_iommu_pool_hash(void)
{
unsigned int i;
for_each_possible_cpu(i)
per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
return 0;
}
subsys_initcall(setup_iommu_pool_hash);
#ifdef CONFIG_FAIL_IOMMU
static DECLARE_FAULT_ATTR(fail_iommu);
static int __init setup_fail_iommu(char *str)
{
return setup_fault_attr(&fail_iommu, str);
}
__setup("fail_iommu=", setup_fail_iommu);
static bool should_fail_iommu(struct device *dev)
{
return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
}
static int __init fail_iommu_debugfs(void)
{
struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
NULL, &fail_iommu);
return PTR_RET(dir);
}
late_initcall(fail_iommu_debugfs);
static ssize_t fail_iommu_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
}
static ssize_t fail_iommu_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
int i;
if (count > 0 && sscanf(buf, "%d", &i) > 0)
dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
return count;
}
static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
fail_iommu_store);
static int fail_iommu_bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
if (action == BUS_NOTIFY_ADD_DEVICE) {
if (device_create_file(dev, &dev_attr_fail_iommu))
pr_warn("Unable to create IOMMU fault injection sysfs "
"entries\n");
} else if (action == BUS_NOTIFY_DEL_DEVICE) {
device_remove_file(dev, &dev_attr_fail_iommu);
}
return 0;
}
static struct notifier_block fail_iommu_bus_notifier = {
.notifier_call = fail_iommu_bus_notify
};
static int __init fail_iommu_setup(void)
{
#ifdef CONFIG_PCI
bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
#endif
#ifdef CONFIG_IBMVIO
bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
#endif
return 0;
}
/*
* Must execute after PCI and VIO subsystem have initialised but before
* devices are probed.
*/
arch_initcall(fail_iommu_setup);
#else
static inline bool should_fail_iommu(struct device *dev)
{
return false;
}
#endif
static unsigned long iommu_range_alloc(struct device *dev,
struct iommu_table *tbl,
unsigned long npages,
unsigned long *handle,
unsigned long mask,
unsigned int align_order)
{
unsigned long n, end, start;
unsigned long limit;
int largealloc = npages > 15;
int pass = 0;
unsigned long align_mask;
unsigned long boundary_size;
unsigned long flags;
unsigned int pool_nr;
struct iommu_pool *pool;
align_mask = 0xffffffffffffffffl >> (64 - align_order);
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
if (should_fail_iommu(dev))
return DMA_ERROR_CODE;
/*
* We don't need to disable preemption here because any CPU can
* safely use any IOMMU pool.
*/
pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
if (largealloc)
pool = &(tbl->large_pool);
else
pool = &(tbl->pools[pool_nr]);
spin_lock_irqsave(&(pool->lock), flags);
again:
if ((pass == 0) && handle && *handle &&
(*handle >= pool->start) && (*handle < pool->end))
start = *handle;
else
start = pool->hint;
limit = pool->end;
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the initial start.
*/
if (start >= limit)
start = pool->start;
if (limit + tbl->it_offset > mask) {
limit = mask - tbl->it_offset + 1;
/* If we're constrained on address range, first try
* at the masked hint to avoid O(n) search complexity,
* but on second pass, start at 0 in pool 0.
*/
if ((start & mask) >= limit || pass > 0) {
spin_unlock(&(pool->lock));
pool = &(tbl->pools[0]);
spin_lock(&(pool->lock));
start = pool->start;
} else {
start &= mask;
}
}
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1 << IOMMU_PAGE_SHIFT);
else
boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
n = iommu_area_alloc(tbl->it_map, limit, start, npages,
tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
align_mask);
if (n == -1) {
if (likely(pass == 0)) {
/* First try the pool from the start */
pool->hint = pool->start;
pass++;
goto again;
} else if (pass <= tbl->nr_pools) {
/* Now try scanning all the other pools */
spin_unlock(&(pool->lock));
pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
pool = &tbl->pools[pool_nr];
spin_lock(&(pool->lock));
pool->hint = pool->start;
pass++;
goto again;
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
return DMA_ERROR_CODE;
}
}
end = n + npages;
/* Bump the hint to a new block for small allocs. */
if (largealloc) {
/* Don't bump to new block to avoid fragmentation */
pool->hint = end;
} else {
/* Overflow will be taken care of at the next allocation */
pool->hint = (end + tbl->it_blocksize - 1) &
~(tbl->it_blocksize - 1);
}
/* Update handle for SG allocations */
if (handle)
*handle = end;
spin_unlock_irqrestore(&(pool->lock), flags);
return n;
}
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
void *page, unsigned int npages,
enum dma_data_direction direction,
unsigned long mask, unsigned int align_order,
struct dma_attrs *attrs)
{
unsigned long entry;
dma_addr_t ret = DMA_ERROR_CODE;
int build_fail;
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
if (unlikely(entry == DMA_ERROR_CODE))
return DMA_ERROR_CODE;
entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
/* Put the TCEs in the HW table */
build_fail = ppc_md.tce_build(tbl, entry, npages,
(unsigned long)page & IOMMU_PAGE_MASK,
direction, attrs);
/* ppc_md.tce_build() only returns non-zero for transient errors.
* Clean up the table bitmap in this case and return
* DMA_ERROR_CODE. For all other errors the functionality is
* not altered.
*/
if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages);
return DMA_ERROR_CODE;
}
/* Flush/invalidate TLB caches if necessary */
if (ppc_md.tce_flush)
ppc_md.tce_flush(tbl);
/* Make sure updates are seen by hardware */
mb();
return ret;
}
static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long entry, free_entry;
entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
if (((free_entry + npages) > tbl->it_size) ||
(entry < tbl->it_offset)) {
if (printk_ratelimit()) {
printk(KERN_INFO "iommu_free: invalid entry\n");
printk(KERN_INFO "\tentry = 0x%lx\n", entry);
printk(KERN_INFO "\tdma_addr = 0x%llx\n", (u64)dma_addr);
printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl);
printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno);
printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size);
printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset);
printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
WARN_ON(1);
}
return false;
}
return true;
}
static struct iommu_pool *get_pool(struct iommu_table *tbl,
unsigned long entry)
{
struct iommu_pool *p;
unsigned long largepool_start = tbl->large_pool.start;
/* The large pool is the last pool at the top of the table */
if (entry >= largepool_start) {
p = &tbl->large_pool;
} else {
unsigned int pool_nr = entry / tbl->poolsize;
BUG_ON(pool_nr > tbl->nr_pools);
p = &tbl->pools[pool_nr];
}
return p;
}
static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
unsigned long entry, free_entry;
unsigned long flags;
struct iommu_pool *pool;
entry = dma_addr >> IOMMU_PAGE_SHIFT;
free_entry = entry - tbl->it_offset;
pool = get_pool(tbl, free_entry);
if (!iommu_free_check(tbl, dma_addr, npages))
return;
ppc_md.tce_free(tbl, entry, npages);
spin_lock_irqsave(&(pool->lock), flags);
bitmap_clear(tbl->it_map, free_entry, npages);
spin_unlock_irqrestore(&(pool->lock), flags);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
__iommu_free(tbl, dma_addr, npages);
/* Make sure TLB cache is flushed if the HW needs it. We do
* not do an mb() here on purpose, it is not needed on any of
* the current platforms.
*/
if (ppc_md.tce_flush)
ppc_md.tce_flush(tbl);
}
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
dma_addr_t dma_next = 0, dma_addr;
struct scatterlist *s, *outs, *segstart;
int outcount, incount, i, build_fail = 0;
unsigned int align;
unsigned long handle;
unsigned int max_seg_size;
BUG_ON(direction == DMA_NONE);
if ((nelems == 0) || !tbl)
return 0;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
DBG("sg mapping %d elements:\n", nelems);
max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) {
unsigned long vaddr, npages, entry, slen;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
vaddr = (unsigned long) sg_virt(s);
npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
align = 0;
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
(vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
entry = iommu_range_alloc(dev, tbl, npages, &handle,
mask >> IOMMU_PAGE_SHIFT, align);
DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit())
dev_info(dev, "iommu_alloc failed, tbl %p "
"vaddr %lx npages %lu\n", tbl, vaddr,
npages);
goto failure;
}
/* Convert entry to a dma_addr_t */
entry += tbl->it_offset;
dma_addr = entry << IOMMU_PAGE_SHIFT;
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
npages, entry, dma_addr);
/* Insert into HW table */
build_fail = ppc_md.tce_build(tbl, entry, npages,
vaddr & IOMMU_PAGE_MASK,
direction, attrs);
if(unlikely(build_fail))
goto failure;
/* If we are in an open segment, try merging */
if (segstart != s) {
DBG(" - trying merge...\n");
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if (novmerge || (dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size)) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
DBG(" can't merge, new segment.\n");
} else {
outs->dma_length += s->length;
DBG(" merged, new len: %ux\n", outs->dma_length);
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
DBG(" - filling new segment.\n");
outs->dma_address = dma_addr;
outs->dma_length = slen;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
DBG(" - dma next is: %lx\n", dma_next);
}
/* Flush/invalidate TLB caches if necessary */
if (ppc_md.tce_flush)
ppc_md.tce_flush(tbl);
DBG("mapped %d elements:\n", outcount);
/* For the sake of iommu_unmap_sg, we clear out the length in the
* next entry of the sglist if we didn't fill the list completely
*/
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
/* Make sure updates are seen by hardware */
mb();
return outcount;
failure:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages;
vaddr = s->dma_address & IOMMU_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE);
__iommu_free(tbl, vaddr, npages);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
return 0;
}
void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
struct scatterlist *sg;
BUG_ON(direction == DMA_NONE);
if (!tbl)
return;
sg = sglist;
while (nelems--) {
unsigned int npages;
dma_addr_t dma_handle = sg->dma_address;
if (sg->dma_length == 0)
break;
npages = iommu_num_pages(dma_handle, sg->dma_length,
IOMMU_PAGE_SIZE);
__iommu_free(tbl, dma_handle, npages);
sg = sg_next(sg);
}
/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
* do not do an mb() here, the affected platforms do not need it
* when freeing.
*/
if (ppc_md.tce_flush)
ppc_md.tce_flush(tbl);
}
static void iommu_table_clear(struct iommu_table *tbl)
{
/*
* In case of firmware assisted dump system goes through clean
* reboot process at the time of system crash. Hence it's safe to
* clear the TCE entries if firmware assisted dump is active.
*/
if (!is_kdump_kernel() || is_fadump_active()) {
/* Clear the table in case firmware left allocations in it */
ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
return;
}
#ifdef CONFIG_CRASH_DUMP
if (ppc_md.tce_get) {
unsigned long index, tceval, tcecount = 0;
/* Reserve the existing mappings left by the first kernel. */
for (index = 0; index < tbl->it_size; index++) {
tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
/*
* Freed TCE entry contains 0x7fffffffffffffff on JS20
*/
if (tceval && (tceval != 0x7fffffffffffffffUL)) {
__set_bit(index, tbl->it_map);
tcecount++;
}
}
if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
printk(KERN_WARNING "TCE table is full; freeing ");
printk(KERN_WARNING "%d entries for the kdump boot\n",
KDUMP_MIN_TCE_ENTRIES);
for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
index < tbl->it_size; index++)
__clear_bit(index, tbl->it_map);
}
}
#endif
}
/*
* Build a iommu_table structure. This contains a bit map which
* is used to manage allocation of the tce space.
*/
struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
{
unsigned long sz;
static int welcomed = 0;
struct page *page;
unsigned int i;
struct iommu_pool *p;
/* number of bytes needed for the bitmap */
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
if (!page)
panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
tbl->it_map = page_address(page);
memset(tbl->it_map, 0, sz);
/*
* Reserve page 0 so it will not be used for any mappings.
* This avoids buggy drivers that consider page 0 to be invalid
* to crash the machine or even lose data.
*/
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
/* We only split the IOMMU table if we have 1GB or more of space */
if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
tbl->nr_pools = IOMMU_NR_POOLS;
else
tbl->nr_pools = 1;
/* We reserve the top 1/4 of the table for large allocations */
tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
for (i = 0; i < tbl->nr_pools; i++) {
p = &tbl->pools[i];
spin_lock_init(&(p->lock));
p->start = tbl->poolsize * i;
p->hint = p->start;
p->end = p->start + tbl->poolsize;
}
p = &tbl->large_pool;
spin_lock_init(&(p->lock));
p->start = tbl->poolsize * i;
p->hint = p->start;
p->end = tbl->it_size;
iommu_table_clear(tbl);
if (!welcomed) {
printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
novmerge ? "disabled" : "enabled");
welcomed = 1;
}
return tbl;
}
void iommu_free_table(struct iommu_table *tbl, const char *node_name)
{
unsigned long bitmap_sz;
unsigned int order;
if (!tbl || !tbl->it_map) {
printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
node_name);
return;
}
/*
* In case we have reserved the first bit, we should not emit
* the warning below.
*/
if (tbl->it_offset == 0)
clear_bit(0, tbl->it_map);
/* verify that table contains no entries */
if (!bitmap_empty(tbl->it_map, tbl->it_size))
pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
/* calculate bitmap size in bytes */
bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
/* free bitmap */
order = get_order(bitmap_sz);
free_pages((unsigned long) tbl->it_map, order);
/* free table */
kfree(tbl);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
* contiguous real kernel storage (not vmalloc). The address passed here
* comprises a page address and offset into that page. The dma_addr_t
* returned will point to the same byte within the page as was passed in.
*/
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
struct page *page, unsigned long offset, size_t size,
unsigned long mask, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
void *vaddr;
unsigned long uaddr;
unsigned int npages, align;
BUG_ON(direction == DMA_NONE);
vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
if (tbl) {
align = 0;
if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
((unsigned long)vaddr & ~PAGE_MASK) == 0)
align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
mask >> IOMMU_PAGE_SHIFT, align,
attrs);
if (dma_handle == DMA_ERROR_CODE) {
if (printk_ratelimit()) {
dev_info(dev, "iommu_alloc failed, tbl %p "
"vaddr %p npages %d\n", tbl, vaddr,
npages);
}
} else
dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
}
return dma_handle;
}
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
struct dma_attrs *attrs)
{
unsigned int npages;
BUG_ON(direction == DMA_NONE);
if (tbl) {
npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
iommu_free(tbl, dma_handle, npages);
}
}
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
size_t size, dma_addr_t *dma_handle,
unsigned long mask, gfp_t flag, int node)
{
void *ret = NULL;
dma_addr_t mapping;
unsigned int order;
unsigned int nio_pages, io_order;
struct page *page;
size = PAGE_ALIGN(size);
order = get_order(size);
/*
* Client asked for way too much space. This is checked later
* anyway. It is easier to debug here for the drivers than in
* the tce tables.
*/
if (order >= IOMAP_MAX_ORDER) {
dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
size);
return NULL;
}
if (!tbl)
return NULL;
/* Alloc enough pages (and possibly more) */
page = alloc_pages_node(node, flag, order);
if (!page)
return NULL;
ret = page_address(page);
memset(ret, 0, size);
/* Set up tces to cover the allocated range */
nio_pages = size >> IOMMU_PAGE_SHIFT;
io_order = get_iommu_order(size);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
}
*dma_handle = mapping;
return ret;
}
void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
if (tbl) {
unsigned int nio_pages;
size = PAGE_ALIGN(size);
nio_pages = size >> IOMMU_PAGE_SHIFT;
iommu_free(tbl, dma_handle, nio_pages);
size = PAGE_ALIGN(size);
free_pages((unsigned long)vaddr, get_order(size));
}
}
| gpl-2.0 |
Pesach85/PH85-KERNEL | arch/arm/mach-tegra/usb_phy.c | 2361 | 20421 | /*
* arch/arm/mach-tegra/usb_phy.c
*
* Copyright (C) 2010 Google, Inc.
*
* Author:
* Erik Gilling <konkers@google.com>
* Benoit Goby <benoit@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/resource.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <asm/mach-types.h>
#include <mach/usb_phy.h>
#include <mach/iomap.h>
#define ULPI_VIEWPORT 0x170
#define USB_PORTSC1 0x184
#define USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
#define USB_PORTSC1_PSPD(x) (((x) & 0x3) << 26)
#define USB_PORTSC1_PHCD (1 << 23)
#define USB_PORTSC1_WKOC (1 << 22)
#define USB_PORTSC1_WKDS (1 << 21)
#define USB_PORTSC1_WKCN (1 << 20)
#define USB_PORTSC1_PTC(x) (((x) & 0xf) << 16)
#define USB_PORTSC1_PP (1 << 12)
#define USB_PORTSC1_SUSP (1 << 7)
#define USB_PORTSC1_PE (1 << 2)
#define USB_PORTSC1_CCS (1 << 0)
#define USB_SUSP_CTRL 0x400
#define USB_WAKE_ON_CNNT_EN_DEV (1 << 3)
#define USB_WAKE_ON_DISCON_EN_DEV (1 << 4)
#define USB_SUSP_CLR (1 << 5)
#define USB_PHY_CLK_VALID (1 << 7)
#define UTMIP_RESET (1 << 11)
#define UHSIC_RESET (1 << 11)
#define UTMIP_PHY_ENABLE (1 << 12)
#define ULPI_PHY_ENABLE (1 << 13)
#define USB_SUSP_SET (1 << 14)
#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
#define USB1_LEGACY_CTRL 0x410
#define USB1_NO_LEGACY_MODE (1 << 0)
#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
#define USB1_VBUS_SENSE_CTL_VBUS_WAKEUP (0 << 1)
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD_OR_VBUS_WAKEUP \
(1 << 1)
#define USB1_VBUS_SENSE_CTL_AB_SESS_VLD (2 << 1)
#define USB1_VBUS_SENSE_CTL_A_SESS_VLD (3 << 1)
#define ULPI_TIMING_CTRL_0 0x424
#define ULPI_OUTPUT_PINMUX_BYP (1 << 10)
#define ULPI_CLKOUT_PINMUX_BYP (1 << 11)
#define ULPI_TIMING_CTRL_1 0x428
#define ULPI_DATA_TRIMMER_LOAD (1 << 0)
#define ULPI_DATA_TRIMMER_SEL(x) (((x) & 0x7) << 1)
#define ULPI_STPDIRNXT_TRIMMER_LOAD (1 << 16)
#define ULPI_STPDIRNXT_TRIMMER_SEL(x) (((x) & 0x7) << 17)
#define ULPI_DIR_TRIMMER_LOAD (1 << 24)
#define ULPI_DIR_TRIMMER_SEL(x) (((x) & 0x7) << 25)
#define UTMIP_PLL_CFG1 0x804
#define UTMIP_XTAL_FREQ_COUNT(x) (((x) & 0xfff) << 0)
#define UTMIP_PLLU_ENABLE_DLY_COUNT(x) (((x) & 0x1f) << 27)
#define UTMIP_XCVR_CFG0 0x808
#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0)
#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8)
#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10)
#define UTMIP_FORCE_PD_POWERDOWN (1 << 14)
#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16)
#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18)
#define UTMIP_XCVR_HSSLEW_MSB(x) (((x) & 0x7f) << 25)
#define UTMIP_BIAS_CFG0 0x80c
#define UTMIP_OTGPD (1 << 11)
#define UTMIP_BIASPD (1 << 10)
#define UTMIP_HSRX_CFG0 0x810
#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
#define UTMIP_IDLE_WAIT(x) (((x) & 0x1f) << 15)
#define UTMIP_HSRX_CFG1 0x814
#define UTMIP_HS_SYNC_START_DLY(x) (((x) & 0x1f) << 1)
#define UTMIP_TX_CFG0 0x820
#define UTMIP_FS_PREABMLE_J (1 << 19)
#define UTMIP_HS_DISCON_DISABLE (1 << 8)
#define UTMIP_MISC_CFG0 0x824
#define UTMIP_DPDM_OBSERVE (1 << 26)
#define UTMIP_DPDM_OBSERVE_SEL(x) (((x) & 0xf) << 27)
#define UTMIP_DPDM_OBSERVE_SEL_FS_J UTMIP_DPDM_OBSERVE_SEL(0xf)
#define UTMIP_DPDM_OBSERVE_SEL_FS_K UTMIP_DPDM_OBSERVE_SEL(0xe)
#define UTMIP_DPDM_OBSERVE_SEL_FS_SE1 UTMIP_DPDM_OBSERVE_SEL(0xd)
#define UTMIP_DPDM_OBSERVE_SEL_FS_SE0 UTMIP_DPDM_OBSERVE_SEL(0xc)
#define UTMIP_SUSPEND_EXIT_ON_EDGE (1 << 22)
#define UTMIP_MISC_CFG1 0x828
#define UTMIP_PLL_ACTIVE_DLY_COUNT(x) (((x) & 0x1f) << 18)
#define UTMIP_PLLU_STABLE_COUNT(x) (((x) & 0xfff) << 6)
#define UTMIP_DEBOUNCE_CFG0 0x82c
#define UTMIP_BIAS_DEBOUNCE_A(x) (((x) & 0xffff) << 0)
#define UTMIP_BAT_CHRG_CFG0 0x830
#define UTMIP_PD_CHRG (1 << 0)
#define UTMIP_SPARE_CFG0 0x834
#define FUSE_SETUP_SEL (1 << 3)
#define UTMIP_XCVR_CFG1 0x838
#define UTMIP_FORCE_PDDISC_POWERDOWN (1 << 0)
#define UTMIP_FORCE_PDCHRP_POWERDOWN (1 << 2)
#define UTMIP_FORCE_PDDR_POWERDOWN (1 << 4)
#define UTMIP_XCVR_TERM_RANGE_ADJ(x) (((x) & 0xf) << 18)
#define UTMIP_BIAS_CFG1 0x83c
#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
static DEFINE_SPINLOCK(utmip_pad_lock);
static int utmip_pad_count;
struct tegra_xtal_freq {
int freq;
u8 enable_delay;
u8 stable_count;
u8 active_delay;
u8 xtal_freq_count;
u16 debounce;
};
static const struct tegra_xtal_freq tegra_freq_table[] = {
{
.freq = 12000000,
.enable_delay = 0x02,
.stable_count = 0x2F,
.active_delay = 0x04,
.xtal_freq_count = 0x76,
.debounce = 0x7530,
},
{
.freq = 13000000,
.enable_delay = 0x02,
.stable_count = 0x33,
.active_delay = 0x05,
.xtal_freq_count = 0x7F,
.debounce = 0x7EF4,
},
{
.freq = 19200000,
.enable_delay = 0x03,
.stable_count = 0x4B,
.active_delay = 0x06,
.xtal_freq_count = 0xBB,
.debounce = 0xBB80,
},
{
.freq = 26000000,
.enable_delay = 0x04,
.stable_count = 0x66,
.active_delay = 0x09,
.xtal_freq_count = 0xFE,
.debounce = 0xFDE8,
},
};
static struct tegra_utmip_config utmip_default[] = {
[0] = {
.hssync_start_delay = 9,
.idle_wait_delay = 17,
.elastic_limit = 16,
.term_range_adj = 6,
.xcvr_setup = 9,
.xcvr_lsfslew = 1,
.xcvr_lsrslew = 1,
},
[2] = {
.hssync_start_delay = 9,
.idle_wait_delay = 17,
.elastic_limit = 16,
.term_range_adj = 6,
.xcvr_setup = 9,
.xcvr_lsfslew = 2,
.xcvr_lsrslew = 2,
},
};
static inline bool phy_is_ulpi(struct tegra_usb_phy *phy)
{
return (phy->instance == 1);
}
static int utmip_pad_open(struct tegra_usb_phy *phy)
{
phy->pad_clk = clk_get_sys("utmip-pad", NULL);
if (IS_ERR(phy->pad_clk)) {
pr_err("%s: can't get utmip pad clock\n", __func__);
return PTR_ERR(phy->pad_clk);
}
if (phy->instance == 0) {
phy->pad_regs = phy->regs;
} else {
phy->pad_regs = ioremap(TEGRA_USB_BASE, TEGRA_USB_SIZE);
if (!phy->pad_regs) {
pr_err("%s: can't remap usb registers\n", __func__);
clk_put(phy->pad_clk);
return -ENOMEM;
}
}
return 0;
}
static void utmip_pad_close(struct tegra_usb_phy *phy)
{
if (phy->instance != 0)
iounmap(phy->pad_regs);
clk_put(phy->pad_clk);
}
static void utmip_pad_power_on(struct tegra_usb_phy *phy)
{
unsigned long val, flags;
void __iomem *base = phy->pad_regs;
clk_enable(phy->pad_clk);
spin_lock_irqsave(&utmip_pad_lock, flags);
if (utmip_pad_count++ == 0) {
val = readl(base + UTMIP_BIAS_CFG0);
val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
writel(val, base + UTMIP_BIAS_CFG0);
}
spin_unlock_irqrestore(&utmip_pad_lock, flags);
clk_disable(phy->pad_clk);
}
static int utmip_pad_power_off(struct tegra_usb_phy *phy)
{
unsigned long val, flags;
void __iomem *base = phy->pad_regs;
if (!utmip_pad_count) {
pr_err("%s: utmip pad already powered off\n", __func__);
return -EINVAL;
}
clk_enable(phy->pad_clk);
spin_lock_irqsave(&utmip_pad_lock, flags);
if (--utmip_pad_count == 0) {
val = readl(base + UTMIP_BIAS_CFG0);
val |= UTMIP_OTGPD | UTMIP_BIASPD;
writel(val, base + UTMIP_BIAS_CFG0);
}
spin_unlock_irqrestore(&utmip_pad_lock, flags);
clk_disable(phy->pad_clk);
return 0;
}
static int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
{
unsigned long timeout = 2000;
do {
if ((readl(reg) & mask) == result)
return 0;
udelay(1);
timeout--;
} while (timeout);
return -1;
}
static void utmi_phy_clk_disable(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
if (phy->instance == 0) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_SET;
writel(val, base + USB_SUSP_CTRL);
udelay(10);
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
writel(val, base + USB_SUSP_CTRL);
}
if (phy->instance == 2) {
val = readl(base + USB_PORTSC1);
val |= USB_PORTSC1_PHCD;
writel(val, base + USB_PORTSC1);
}
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) < 0)
pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
}
static void utmi_phy_clk_enable(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
if (phy->instance == 0) {
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
udelay(10);
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
}
if (phy->instance == 2) {
val = readl(base + USB_PORTSC1);
val &= ~USB_PORTSC1_PHCD;
writel(val, base + USB_PORTSC1);
}
if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID,
USB_PHY_CLK_VALID))
pr_err("%s: timeout waiting for phy to stabilize\n", __func__);
}
static int utmi_phy_power_on(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
struct tegra_utmip_config *config = phy->config;
val = readl(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
writel(val, base + USB_SUSP_CTRL);
if (phy->instance == 0) {
val = readl(base + USB1_LEGACY_CTRL);
val |= USB1_NO_LEGACY_MODE;
writel(val, base + USB1_LEGACY_CTRL);
}
val = readl(base + UTMIP_TX_CFG0);
val &= ~UTMIP_FS_PREABMLE_J;
writel(val, base + UTMIP_TX_CFG0);
val = readl(base + UTMIP_HSRX_CFG0);
val &= ~(UTMIP_IDLE_WAIT(~0) | UTMIP_ELASTIC_LIMIT(~0));
val |= UTMIP_IDLE_WAIT(config->idle_wait_delay);
val |= UTMIP_ELASTIC_LIMIT(config->elastic_limit);
writel(val, base + UTMIP_HSRX_CFG0);
val = readl(base + UTMIP_HSRX_CFG1);
val &= ~UTMIP_HS_SYNC_START_DLY(~0);
val |= UTMIP_HS_SYNC_START_DLY(config->hssync_start_delay);
writel(val, base + UTMIP_HSRX_CFG1);
val = readl(base + UTMIP_DEBOUNCE_CFG0);
val &= ~UTMIP_BIAS_DEBOUNCE_A(~0);
val |= UTMIP_BIAS_DEBOUNCE_A(phy->freq->debounce);
writel(val, base + UTMIP_DEBOUNCE_CFG0);
val = readl(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
writel(val, base + UTMIP_MISC_CFG0);
val = readl(base + UTMIP_MISC_CFG1);
val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) | UTMIP_PLLU_STABLE_COUNT(~0));
val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
writel(val, base + UTMIP_MISC_CFG1);
val = readl(base + UTMIP_PLL_CFG1);
val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) | UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
writel(val, base + UTMIP_PLL_CFG1);
if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
val = readl(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
writel(val, base + USB_SUSP_CTRL);
}
utmip_pad_power_on(phy);
val = readl(base + UTMIP_XCVR_CFG0);
val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_SETUP(~0) |
UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0) |
UTMIP_XCVR_HSSLEW_MSB(~0));
val |= UTMIP_XCVR_SETUP(config->xcvr_setup);
val |= UTMIP_XCVR_LSFSLEW(config->xcvr_lsfslew);
val |= UTMIP_XCVR_LSRSLEW(config->xcvr_lsrslew);
writel(val, base + UTMIP_XCVR_CFG0);
val = readl(base + UTMIP_XCVR_CFG1);
val &= ~(UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN | UTMIP_XCVR_TERM_RANGE_ADJ(~0));
val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj);
writel(val, base + UTMIP_XCVR_CFG1);
val = readl(base + UTMIP_BAT_CHRG_CFG0);
val &= ~UTMIP_PD_CHRG;
writel(val, base + UTMIP_BAT_CHRG_CFG0);
val = readl(base + UTMIP_BIAS_CFG1);
val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
writel(val, base + UTMIP_BIAS_CFG1);
if (phy->instance == 0) {
val = readl(base + UTMIP_SPARE_CFG0);
if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE)
val &= ~FUSE_SETUP_SEL;
else
val |= FUSE_SETUP_SEL;
writel(val, base + UTMIP_SPARE_CFG0);
}
if (phy->instance == 2) {
val = readl(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
writel(val, base + USB_SUSP_CTRL);
}
val = readl(base + USB_SUSP_CTRL);
val &= ~UTMIP_RESET;
writel(val, base + USB_SUSP_CTRL);
if (phy->instance == 0) {
val = readl(base + USB1_LEGACY_CTRL);
val &= ~USB1_VBUS_SENSE_CTL_MASK;
val |= USB1_VBUS_SENSE_CTL_A_SESS_VLD;
writel(val, base + USB1_LEGACY_CTRL);
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_SET;
writel(val, base + USB_SUSP_CTRL);
}
utmi_phy_clk_enable(phy);
if (phy->instance == 2) {
val = readl(base + USB_PORTSC1);
val &= ~USB_PORTSC1_PTS(~0);
writel(val, base + USB_PORTSC1);
}
return 0;
}
static void utmi_phy_power_off(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
utmi_phy_clk_disable(phy);
if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
writel(val, base + USB_SUSP_CTRL);
}
val = readl(base + USB_SUSP_CTRL);
val |= UTMIP_RESET;
writel(val, base + USB_SUSP_CTRL);
val = readl(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
writel(val, base + UTMIP_BAT_CHRG_CFG0);
val = readl(base + UTMIP_XCVR_CFG0);
val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
UTMIP_FORCE_PDZI_POWERDOWN;
writel(val, base + UTMIP_XCVR_CFG0);
val = readl(base + UTMIP_XCVR_CFG1);
val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN;
writel(val, base + UTMIP_XCVR_CFG1);
utmip_pad_power_off(phy);
}
static void utmi_phy_preresume(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
val = readl(base + UTMIP_TX_CFG0);
val |= UTMIP_HS_DISCON_DISABLE;
writel(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_postresume(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
val = readl(base + UTMIP_TX_CFG0);
val &= ~UTMIP_HS_DISCON_DISABLE;
writel(val, base + UTMIP_TX_CFG0);
}
static void utmi_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed)
{
unsigned long val;
void __iomem *base = phy->regs;
val = readl(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE_SEL(~0);
if (port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW)
val |= UTMIP_DPDM_OBSERVE_SEL_FS_K;
else
val |= UTMIP_DPDM_OBSERVE_SEL_FS_J;
writel(val, base + UTMIP_MISC_CFG0);
udelay(1);
val = readl(base + UTMIP_MISC_CFG0);
val |= UTMIP_DPDM_OBSERVE;
writel(val, base + UTMIP_MISC_CFG0);
udelay(10);
}
static void utmi_phy_restore_end(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
val = readl(base + UTMIP_MISC_CFG0);
val &= ~UTMIP_DPDM_OBSERVE;
writel(val, base + UTMIP_MISC_CFG0);
udelay(10);
}
static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
{
int ret;
unsigned long val;
void __iomem *base = phy->regs;
struct tegra_ulpi_config *config = phy->config;
gpio_direction_output(config->reset_gpio, 0);
msleep(5);
gpio_direction_output(config->reset_gpio, 1);
clk_enable(phy->clk);
msleep(1);
val = readl(base + USB_SUSP_CTRL);
val |= UHSIC_RESET;
writel(val, base + USB_SUSP_CTRL);
val = readl(base + ULPI_TIMING_CTRL_0);
val |= ULPI_OUTPUT_PINMUX_BYP | ULPI_CLKOUT_PINMUX_BYP;
writel(val, base + ULPI_TIMING_CTRL_0);
val = readl(base + USB_SUSP_CTRL);
val |= ULPI_PHY_ENABLE;
writel(val, base + USB_SUSP_CTRL);
val = 0;
writel(val, base + ULPI_TIMING_CTRL_1);
val |= ULPI_DATA_TRIMMER_SEL(4);
val |= ULPI_STPDIRNXT_TRIMMER_SEL(4);
val |= ULPI_DIR_TRIMMER_SEL(4);
writel(val, base + ULPI_TIMING_CTRL_1);
udelay(10);
val |= ULPI_DATA_TRIMMER_LOAD;
val |= ULPI_STPDIRNXT_TRIMMER_LOAD;
val |= ULPI_DIR_TRIMMER_LOAD;
writel(val, base + ULPI_TIMING_CTRL_1);
/* Fix VbusInvalid due to floating VBUS */
ret = otg_io_write(phy->ulpi, 0x40, 0x08);
if (ret) {
pr_err("%s: ulpi write failed\n", __func__);
return ret;
}
ret = otg_io_write(phy->ulpi, 0x80, 0x0B);
if (ret) {
pr_err("%s: ulpi write failed\n", __func__);
return ret;
}
val = readl(base + USB_PORTSC1);
val |= USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN;
writel(val, base + USB_PORTSC1);
val = readl(base + USB_SUSP_CTRL);
val |= USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
udelay(100);
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_SUSP_CLR;
writel(val, base + USB_SUSP_CTRL);
return 0;
}
static void ulpi_phy_power_off(struct tegra_usb_phy *phy)
{
unsigned long val;
void __iomem *base = phy->regs;
struct tegra_ulpi_config *config = phy->config;
/* Clear WKCN/WKDS/WKOC wake-on events that can cause the USB
* Controller to immediately bring the ULPI PHY out of low power
*/
val = readl(base + USB_PORTSC1);
val &= ~(USB_PORTSC1_WKOC | USB_PORTSC1_WKDS | USB_PORTSC1_WKCN);
writel(val, base + USB_PORTSC1);
gpio_direction_output(config->reset_gpio, 0);
clk_disable(phy->clk);
}
struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
void *config, enum tegra_usb_phy_mode phy_mode)
{
struct tegra_usb_phy *phy;
struct tegra_ulpi_config *ulpi_config;
unsigned long parent_rate;
int i;
int err;
phy = kmalloc(sizeof(struct tegra_usb_phy), GFP_KERNEL);
if (!phy)
return ERR_PTR(-ENOMEM);
phy->instance = instance;
phy->regs = regs;
phy->config = config;
phy->mode = phy_mode;
if (!phy->config) {
if (phy_is_ulpi(phy)) {
pr_err("%s: ulpi phy configuration missing", __func__);
err = -EINVAL;
goto err0;
} else {
phy->config = &utmip_default[instance];
}
}
phy->pll_u = clk_get_sys(NULL, "pll_u");
if (IS_ERR(phy->pll_u)) {
pr_err("Can't get pll_u clock\n");
err = PTR_ERR(phy->pll_u);
goto err0;
}
clk_enable(phy->pll_u);
parent_rate = clk_get_rate(clk_get_parent(phy->pll_u));
for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) {
if (tegra_freq_table[i].freq == parent_rate) {
phy->freq = &tegra_freq_table[i];
break;
}
}
if (!phy->freq) {
pr_err("invalid pll_u parent rate %ld\n", parent_rate);
err = -EINVAL;
goto err1;
}
if (phy_is_ulpi(phy)) {
ulpi_config = config;
phy->clk = clk_get_sys(NULL, ulpi_config->clk);
if (IS_ERR(phy->clk)) {
pr_err("%s: can't get ulpi clock\n", __func__);
err = -ENXIO;
goto err1;
}
tegra_gpio_enable(ulpi_config->reset_gpio);
gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
gpio_direction_output(ulpi_config->reset_gpio, 0);
phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
phy->ulpi->io_priv = regs + ULPI_VIEWPORT;
} else {
err = utmip_pad_open(phy);
if (err < 0)
goto err1;
}
return phy;
err1:
clk_disable(phy->pll_u);
clk_put(phy->pll_u);
err0:
kfree(phy);
return ERR_PTR(err);
}
int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
{
if (phy_is_ulpi(phy))
return ulpi_phy_power_on(phy);
else
return utmi_phy_power_on(phy);
}
void tegra_usb_phy_power_off(struct tegra_usb_phy *phy)
{
if (phy_is_ulpi(phy))
ulpi_phy_power_off(phy);
else
utmi_phy_power_off(phy);
}
void tegra_usb_phy_preresume(struct tegra_usb_phy *phy)
{
if (!phy_is_ulpi(phy))
utmi_phy_preresume(phy);
}
void tegra_usb_phy_postresume(struct tegra_usb_phy *phy)
{
if (!phy_is_ulpi(phy))
utmi_phy_postresume(phy);
}
void tegra_ehci_phy_restore_start(struct tegra_usb_phy *phy,
enum tegra_usb_phy_port_speed port_speed)
{
if (!phy_is_ulpi(phy))
utmi_phy_restore_start(phy, port_speed);
}
void tegra_ehci_phy_restore_end(struct tegra_usb_phy *phy)
{
if (!phy_is_ulpi(phy))
utmi_phy_restore_end(phy);
}
void tegra_usb_phy_clk_disable(struct tegra_usb_phy *phy)
{
if (!phy_is_ulpi(phy))
utmi_phy_clk_disable(phy);
}
void tegra_usb_phy_clk_enable(struct tegra_usb_phy *phy)
{
if (!phy_is_ulpi(phy))
utmi_phy_clk_enable(phy);
}
void tegra_usb_phy_close(struct tegra_usb_phy *phy)
{
if (phy_is_ulpi(phy))
clk_put(phy->clk);
else
utmip_pad_close(phy);
clk_disable(phy->pll_u);
clk_put(phy->pll_u);
kfree(phy);
}
| gpl-2.0 |
AICP/android_kernel_asus_tf201 | fs/adfs/inode.c | 2873 | 9536 | /*
* linux/fs/adfs/inode.c
*
* Copyright (C) 1997-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include "adfs.h"
/*
* Lookup/Create a block at offset 'block' into 'inode'. We currently do
* not support creation of new blocks, so we return -EIO for this case.
*/
static int
adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh,
int create)
{
if (!create) {
if (block >= inode->i_blocks)
goto abort_toobig;
block = __adfs_block_map(inode->i_sb, inode->i_ino, block);
if (block)
map_bh(bh, inode->i_sb, block);
return 0;
}
/* don't support allocation of blocks yet */
return -EIO;
abort_toobig:
return 0;
}
static int adfs_writepage(struct page *page, struct writeback_control *wbc)
{
return block_write_full_page(page, adfs_get_block, wbc);
}
static int adfs_readpage(struct file *file, struct page *page)
{
return block_read_full_page(page, adfs_get_block);
}
static int adfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret;
*pagep = NULL;
ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
adfs_get_block,
&ADFS_I(mapping->host)->mmu_private);
if (unlikely(ret)) {
loff_t isize = mapping->host->i_size;
if (pos + len > isize)
vmtruncate(mapping->host, isize);
}
return ret;
}
static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping, block, adfs_get_block);
}
static const struct address_space_operations adfs_aops = {
.readpage = adfs_readpage,
.writepage = adfs_writepage,
.write_begin = adfs_write_begin,
.write_end = generic_write_end,
.bmap = _adfs_bmap
};
/*
* Convert ADFS attributes and filetype to Linux permission.
*/
static umode_t
adfs_atts2mode(struct super_block *sb, struct inode *inode)
{
unsigned int attr = ADFS_I(inode)->attr;
umode_t mode, rmask;
struct adfs_sb_info *asb = ADFS_SB(sb);
if (attr & ADFS_NDA_DIRECTORY) {
mode = S_IRUGO & asb->s_owner_mask;
return S_IFDIR | S_IXUGO | mode;
}
switch (ADFS_I(inode)->filetype) {
case 0xfc0: /* LinkFS */
return S_IFLNK|S_IRWXUGO;
case 0xfe6: /* UnixExec */
rmask = S_IRUGO | S_IXUGO;
break;
default:
rmask = S_IRUGO;
}
mode = S_IFREG;
if (attr & ADFS_NDA_OWNER_READ)
mode |= rmask & asb->s_owner_mask;
if (attr & ADFS_NDA_OWNER_WRITE)
mode |= S_IWUGO & asb->s_owner_mask;
if (attr & ADFS_NDA_PUBLIC_READ)
mode |= rmask & asb->s_other_mask;
if (attr & ADFS_NDA_PUBLIC_WRITE)
mode |= S_IWUGO & asb->s_other_mask;
return mode;
}
/*
* Convert Linux permission to ADFS attribute. We try to do the reverse
* of atts2mode, but there is not a 1:1 translation.
*/
static int
adfs_mode2atts(struct super_block *sb, struct inode *inode)
{
umode_t mode;
int attr;
struct adfs_sb_info *asb = ADFS_SB(sb);
/* FIXME: should we be able to alter a link? */
if (S_ISLNK(inode->i_mode))
return ADFS_I(inode)->attr;
if (S_ISDIR(inode->i_mode))
attr = ADFS_NDA_DIRECTORY;
else
attr = 0;
mode = inode->i_mode & asb->s_owner_mask;
if (mode & S_IRUGO)
attr |= ADFS_NDA_OWNER_READ;
if (mode & S_IWUGO)
attr |= ADFS_NDA_OWNER_WRITE;
mode = inode->i_mode & asb->s_other_mask;
mode &= ~asb->s_owner_mask;
if (mode & S_IRUGO)
attr |= ADFS_NDA_PUBLIC_READ;
if (mode & S_IWUGO)
attr |= ADFS_NDA_PUBLIC_WRITE;
return attr;
}
/*
* Convert an ADFS time to Unix time. ADFS has a 40-bit centi-second time
* referenced to 1 Jan 1900 (til 2248) so we need to discard 2208988800 seconds
* of time to convert from RISC OS epoch to Unix epoch.
*/
static void
adfs_adfs2unix_time(struct timespec *tv, struct inode *inode)
{
unsigned int high, low;
/* 01 Jan 1970 00:00:00 (Unix epoch) as nanoseconds since
* 01 Jan 1900 00:00:00 (RISC OS epoch)
*/
static const s64 nsec_unix_epoch_diff_risc_os_epoch =
2208988800000000000LL;
s64 nsec;
if (ADFS_I(inode)->stamped == 0)
goto cur_time;
high = ADFS_I(inode)->loadaddr & 0xFF; /* top 8 bits of timestamp */
low = ADFS_I(inode)->execaddr; /* bottom 32 bits of timestamp */
/* convert 40-bit centi-seconds to 32-bit seconds
* going via nanoseconds to retain precision
*/
nsec = (((s64) high << 32) | (s64) low) * 10000000; /* cs to ns */
/* Files dated pre 01 Jan 1970 00:00:00. */
if (nsec < nsec_unix_epoch_diff_risc_os_epoch)
goto too_early;
/* convert from RISC OS to Unix epoch */
nsec -= nsec_unix_epoch_diff_risc_os_epoch;
*tv = ns_to_timespec(nsec);
return;
cur_time:
*tv = CURRENT_TIME;
return;
too_early:
tv->tv_sec = tv->tv_nsec = 0;
return;
}
/*
* Convert an Unix time to ADFS time. We only do this if the entry has a
* time/date stamp already.
*/
static void
adfs_unix2adfs_time(struct inode *inode, unsigned int secs)
{
unsigned int high, low;
if (ADFS_I(inode)->stamped) {
/* convert 32-bit seconds to 40-bit centi-seconds */
low = (secs & 255) * 100;
high = (secs / 256) * 100 + (low >> 8) + 0x336e996a;
ADFS_I(inode)->loadaddr = (high >> 24) |
(ADFS_I(inode)->loadaddr & ~0xff);
ADFS_I(inode)->execaddr = (low & 255) | (high << 8);
}
}
/*
* Fill in the inode information from the object information.
*
* Note that this is an inode-less filesystem, so we can't use the inode
* number to reference the metadata on the media. Instead, we use the
* inode number to hold the object ID, which in turn will tell us where
* the data is held. We also save the parent object ID, and with these
* two, we can locate the metadata.
*
* This does mean that we rely on an objects parent remaining the same at
* all times - we cannot cope with a cross-directory rename (yet).
*/
struct inode *
adfs_iget(struct super_block *sb, struct object_info *obj)
{
struct inode *inode;
inode = new_inode(sb);
if (!inode)
goto out;
inode->i_uid = ADFS_SB(sb)->s_uid;
inode->i_gid = ADFS_SB(sb)->s_gid;
inode->i_ino = obj->file_id;
inode->i_size = obj->size;
inode->i_nlink = 2;
inode->i_blocks = (inode->i_size + sb->s_blocksize - 1) >>
sb->s_blocksize_bits;
/*
* we need to save the parent directory ID so that
* write_inode can update the directory information
* for this file. This will need special handling
* for cross-directory renames.
*/
ADFS_I(inode)->parent_id = obj->parent_id;
ADFS_I(inode)->loadaddr = obj->loadaddr;
ADFS_I(inode)->execaddr = obj->execaddr;
ADFS_I(inode)->attr = obj->attr;
ADFS_I(inode)->filetype = obj->filetype;
ADFS_I(inode)->stamped = ((obj->loadaddr & 0xfff00000) == 0xfff00000);
inode->i_mode = adfs_atts2mode(sb, inode);
adfs_adfs2unix_time(&inode->i_mtime, inode);
inode->i_atime = inode->i_mtime;
inode->i_ctime = inode->i_mtime;
if (S_ISDIR(inode->i_mode)) {
inode->i_op = &adfs_dir_inode_operations;
inode->i_fop = &adfs_dir_operations;
} else if (S_ISREG(inode->i_mode)) {
inode->i_op = &adfs_file_inode_operations;
inode->i_fop = &adfs_file_operations;
inode->i_mapping->a_ops = &adfs_aops;
ADFS_I(inode)->mmu_private = inode->i_size;
}
insert_inode_hash(inode);
out:
return inode;
}
/*
* Validate and convert a changed access mode/time to their ADFS equivalents.
* adfs_write_inode will actually write the information back to the directory
* later.
*/
int
adfs_notify_change(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = inode->i_sb;
unsigned int ia_valid = attr->ia_valid;
int error;
error = inode_change_ok(inode, attr);
/*
* we can't change the UID or GID of any file -
* we have a global UID/GID in the superblock
*/
if ((ia_valid & ATTR_UID && attr->ia_uid != ADFS_SB(sb)->s_uid) ||
(ia_valid & ATTR_GID && attr->ia_gid != ADFS_SB(sb)->s_gid))
error = -EPERM;
if (error)
goto out;
/* XXX: this is missing some actual on-disk truncation.. */
if (ia_valid & ATTR_SIZE)
truncate_setsize(inode, attr->ia_size);
if (ia_valid & ATTR_MTIME) {
inode->i_mtime = attr->ia_mtime;
adfs_unix2adfs_time(inode, attr->ia_mtime.tv_sec);
}
/*
* FIXME: should we make these == to i_mtime since we don't
* have the ability to represent them in our filesystem?
*/
if (ia_valid & ATTR_ATIME)
inode->i_atime = attr->ia_atime;
if (ia_valid & ATTR_CTIME)
inode->i_ctime = attr->ia_ctime;
if (ia_valid & ATTR_MODE) {
ADFS_I(inode)->attr = adfs_mode2atts(sb, inode);
inode->i_mode = adfs_atts2mode(sb, inode);
}
/*
* FIXME: should we be marking this inode dirty even if
* we don't have any metadata to write back?
*/
if (ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MODE))
mark_inode_dirty(inode);
out:
return error;
}
/*
* write an existing inode back to the directory, and therefore the disk.
* The adfs-specific inode data has already been updated by
* adfs_notify_change()
*/
int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct super_block *sb = inode->i_sb;
struct object_info obj;
int ret;
obj.file_id = inode->i_ino;
obj.name_len = 0;
obj.parent_id = ADFS_I(inode)->parent_id;
obj.loadaddr = ADFS_I(inode)->loadaddr;
obj.execaddr = ADFS_I(inode)->execaddr;
obj.attr = ADFS_I(inode)->attr;
obj.size = inode->i_size;
ret = adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL);
return ret;
}
| gpl-2.0 |
ptmr3/i717_JB_Kernel | net/dccp/ccid.c | 3641 | 5371 | /*
* net/dccp/ccid.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* CCID infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/slab.h>
#include "ccid.h"
#include "ccids/lib/tfrc.h"
static struct ccid_operations *ccids[] = {
&ccid2_ops,
#ifdef CONFIG_IP_DCCP_CCID3
&ccid3_ops,
#endif
};
static struct ccid_operations *ccid_by_number(const u8 id)
{
int i;
for (i = 0; i < ARRAY_SIZE(ccids); i++)
if (ccids[i]->ccid_id == id)
return ccids[i];
return NULL;
}
/* check that up to @array_len members in @ccid_array are supported */
bool ccid_support_check(u8 const *ccid_array, u8 array_len)
{
while (array_len > 0)
if (ccid_by_number(ccid_array[--array_len]) == NULL)
return false;
return true;
}
/**
* ccid_get_builtin_ccids - Populate a list of built-in CCIDs
* @ccid_array: pointer to copy into
* @array_len: value to return length into
* This function allocates memory - caller must see that it is freed after use.
*/
int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
{
*ccid_array = kmalloc(ARRAY_SIZE(ccids), gfp_any());
if (*ccid_array == NULL)
return -ENOBUFS;
for (*array_len = 0; *array_len < ARRAY_SIZE(ccids); *array_len += 1)
(*ccid_array)[*array_len] = ccids[*array_len]->ccid_id;
return 0;
}
int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
u8 *ccid_array, array_len;
int err = 0;
if (ccid_get_builtin_ccids(&ccid_array, &array_len))
return -ENOBUFS;
if (put_user(array_len, optlen))
err = -EFAULT;
else if (len > 0 && copy_to_user(optval, ccid_array,
len > array_len ? array_len : len))
err = -EFAULT;
kfree(ccid_array);
return err;
}
static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...)
{
struct kmem_cache *slab;
va_list args;
va_start(args, fmt);
vsnprintf(slab_name_fmt, CCID_SLAB_NAME_LENGTH, fmt, args);
va_end(args);
slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0,
SLAB_HWCACHE_ALIGN, NULL);
return slab;
}
static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
{
if (slab != NULL)
kmem_cache_destroy(slab);
}
static int ccid_activate(struct ccid_operations *ccid_ops)
{
int err = -ENOBUFS;
ccid_ops->ccid_hc_rx_slab =
ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
ccid_ops->ccid_hc_rx_slab_name,
"ccid%u_hc_rx_sock",
ccid_ops->ccid_id);
if (ccid_ops->ccid_hc_rx_slab == NULL)
goto out;
ccid_ops->ccid_hc_tx_slab =
ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
ccid_ops->ccid_hc_tx_slab_name,
"ccid%u_hc_tx_sock",
ccid_ops->ccid_id);
if (ccid_ops->ccid_hc_tx_slab == NULL)
goto out_free_rx_slab;
pr_info("CCID: Activated CCID %d (%s)\n",
ccid_ops->ccid_id, ccid_ops->ccid_name);
err = 0;
out:
return err;
out_free_rx_slab:
ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
ccid_ops->ccid_hc_rx_slab = NULL;
goto out;
}
static void ccid_deactivate(struct ccid_operations *ccid_ops)
{
ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
ccid_ops->ccid_hc_tx_slab = NULL;
ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
ccid_ops->ccid_hc_rx_slab = NULL;
pr_info("CCID: Deactivated CCID %d (%s)\n",
ccid_ops->ccid_id, ccid_ops->ccid_name);
}
struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx)
{
struct ccid_operations *ccid_ops = ccid_by_number(id);
struct ccid *ccid = NULL;
if (ccid_ops == NULL)
goto out;
ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
ccid_ops->ccid_hc_tx_slab, gfp_any());
if (ccid == NULL)
goto out;
ccid->ccid_ops = ccid_ops;
if (rx) {
memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
goto out_free_ccid;
} else {
memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
goto out_free_ccid;
}
out:
return ccid;
out_free_ccid:
kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
ccid_ops->ccid_hc_tx_slab, ccid);
ccid = NULL;
goto out;
}
void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
{
if (ccid != NULL) {
if (ccid->ccid_ops->ccid_hc_rx_exit != NULL)
ccid->ccid_ops->ccid_hc_rx_exit(sk);
kmem_cache_free(ccid->ccid_ops->ccid_hc_rx_slab, ccid);
}
}
void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
{
if (ccid != NULL) {
if (ccid->ccid_ops->ccid_hc_tx_exit != NULL)
ccid->ccid_ops->ccid_hc_tx_exit(sk);
kmem_cache_free(ccid->ccid_ops->ccid_hc_tx_slab, ccid);
}
}
int __init ccid_initialize_builtins(void)
{
int i, err = tfrc_lib_init();
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(ccids); i++) {
err = ccid_activate(ccids[i]);
if (err)
goto unwind_registrations;
}
return 0;
unwind_registrations:
while(--i >= 0)
ccid_deactivate(ccids[i]);
tfrc_lib_exit();
return err;
}
void ccid_cleanup_builtins(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(ccids); i++)
ccid_deactivate(ccids[i]);
tfrc_lib_exit();
}
| gpl-2.0 |
rickyzhang82/linux-allwinner | drivers/media/video/pwc/pwc-dec23.c | 3641 | 25251 | /* Linux driver for Philips webcam
Decompression for chipset version 2 et 3
(C) 2004-2006 Luc Saillard (luc@saillard.org)
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
Please send bug reports and support requests to <luc@saillard.org>.
The decompression routines have been implemented by reverse-engineering the
Nemosoft binary pwcx module. Caveat emptor.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "pwc-timon.h"
#include "pwc-kiara.h"
#include "pwc-dec23.h"
#include <media/pwc-ioctl.h>
#include <linux/string.h>
#include <linux/slab.h>
/*
* USE_LOOKUP_TABLE_TO_CLAMP
* 0: use a C version of this tests: { a<0?0:(a>255?255:a) }
* 1: use a faster lookup table for cpu with a big cache (intel)
*/
#define USE_LOOKUP_TABLE_TO_CLAMP 1
/*
* UNROLL_LOOP_FOR_COPYING_BLOCK
* 0: use a loop for a smaller code (but little slower)
* 1: when unrolling the loop, gcc produces some faster code (perhaps only
* valid for intel processor class). Activating this option, automaticaly
* activate USE_LOOKUP_TABLE_TO_CLAMP
*/
#define UNROLL_LOOP_FOR_COPY 1
#if UNROLL_LOOP_FOR_COPY
# undef USE_LOOKUP_TABLE_TO_CLAMP
# define USE_LOOKUP_TABLE_TO_CLAMP 1
#endif
/*
* ENABLE_BAYER_DECODER
* 0: bayer decoder is not build (save some space)
* 1: bayer decoder is build and can be used
*/
#define ENABLE_BAYER_DECODER 0
static void build_subblock_pattern(struct pwc_dec23_private *pdec)
{
static const unsigned int initial_values[12] = {
-0x526500, -0x221200, 0x221200, 0x526500,
-0x3de200, 0x3de200,
-0x6db480, -0x2d5d00, 0x2d5d00, 0x6db480,
-0x12c200, 0x12c200
};
static const unsigned int values_derivated[12] = {
0xa4ca, 0x4424, -0x4424, -0xa4ca,
0x7bc4, -0x7bc4,
0xdb69, 0x5aba, -0x5aba, -0xdb69,
0x2584, -0x2584
};
unsigned int temp_values[12];
int i, j;
memcpy(temp_values, initial_values, sizeof(initial_values));
for (i = 0; i < 256; i++) {
for (j = 0; j < 12; j++) {
pdec->table_subblock[i][j] = temp_values[j];
temp_values[j] += values_derivated[j];
}
}
}
static void build_bit_powermask_table(struct pwc_dec23_private *pdec)
{
unsigned char *p;
unsigned int bit, byte, mask, val;
unsigned int bitpower = 1;
for (bit = 0; bit < 8; bit++) {
mask = bitpower - 1;
p = pdec->table_bitpowermask[bit];
for (byte = 0; byte < 256; byte++) {
val = (byte & mask);
if (byte & bitpower)
val = -val;
*p++ = val;
}
bitpower<<=1;
}
}
static void build_table_color(const unsigned int romtable[16][8],
unsigned char p0004[16][1024],
unsigned char p8004[16][256])
{
int compression_mode, j, k, bit, pw;
unsigned char *p0, *p8;
const unsigned int *r;
/* We have 16 compressions tables */
for (compression_mode = 0; compression_mode < 16; compression_mode++) {
p0 = p0004[compression_mode];
p8 = p8004[compression_mode];
r = romtable[compression_mode];
for (j = 0; j < 8; j++, r++, p0 += 128) {
for (k = 0; k < 16; k++) {
if (k == 0)
bit = 1;
else if (k >= 1 && k < 3)
bit = (r[0] >> 15) & 7;
else if (k >= 3 && k < 6)
bit = (r[0] >> 12) & 7;
else if (k >= 6 && k < 10)
bit = (r[0] >> 9) & 7;
else if (k >= 10 && k < 13)
bit = (r[0] >> 6) & 7;
else if (k >= 13 && k < 15)
bit = (r[0] >> 3) & 7;
else
bit = (r[0]) & 7;
if (k == 0)
*p8++ = 8;
else
*p8++ = j - bit;
*p8++ = bit;
pw = 1 << bit;
p0[k + 0x00] = (1 * pw) + 0x80;
p0[k + 0x10] = (2 * pw) + 0x80;
p0[k + 0x20] = (3 * pw) + 0x80;
p0[k + 0x30] = (4 * pw) + 0x80;
p0[k + 0x40] = (-1 * pw) + 0x80;
p0[k + 0x50] = (-2 * pw) + 0x80;
p0[k + 0x60] = (-3 * pw) + 0x80;
p0[k + 0x70] = (-4 * pw) + 0x80;
} /* end of for (k=0; k<16; k++, p8++) */
} /* end of for (j=0; j<8; j++ , table++) */
} /* end of foreach compression_mode */
}
/*
*
*/
static void fill_table_dc00_d800(struct pwc_dec23_private *pdec)
{
#define SCALEBITS 15
#define ONE_HALF (1UL << (SCALEBITS - 1))
int i;
unsigned int offset1 = ONE_HALF;
unsigned int offset2 = 0x0000;
for (i=0; i<256; i++) {
pdec->table_dc00[i] = offset1 & ~(ONE_HALF);
pdec->table_d800[i] = offset2;
offset1 += 0x7bc4;
offset2 += 0x7bc4;
}
}
/*
* To decode the stream:
* if look_bits(2) == 0: # op == 2 in the lookup table
* skip_bits(2)
* end of the stream
* elif look_bits(3) == 7: # op == 1 in the lookup table
* skip_bits(3)
* yyyy = get_bits(4)
* xxxx = get_bits(8)
* else: # op == 0 in the lookup table
* skip_bits(x)
*
* For speedup processing, we build a lookup table and we takes the first 6 bits.
*
* struct {
* unsigned char op; // operation to execute
* unsigned char bits; // bits use to perform operation
* unsigned char offset1; // offset to add to access in the table_0004 % 16
* unsigned char offset2; // offset to add to access in the table_0004
* }
*
* How to build this table ?
* op == 2 when (i%4)==0
* op == 1 when (i%8)==7
* op == 0 otherwise
*
*/
static const unsigned char hash_table_ops[64*4] = {
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x00,
0x00, 0x04, 0x01, 0x10,
0x00, 0x06, 0x01, 0x30,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x40,
0x00, 0x05, 0x01, 0x20,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x00,
0x00, 0x04, 0x01, 0x50,
0x00, 0x05, 0x02, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x40,
0x00, 0x05, 0x03, 0x00,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x00,
0x00, 0x04, 0x01, 0x10,
0x00, 0x06, 0x02, 0x10,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x40,
0x00, 0x05, 0x01, 0x60,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x00,
0x00, 0x04, 0x01, 0x50,
0x00, 0x05, 0x02, 0x40,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x40,
0x00, 0x05, 0x03, 0x40,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x00,
0x00, 0x04, 0x01, 0x10,
0x00, 0x06, 0x01, 0x70,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x40,
0x00, 0x05, 0x01, 0x20,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x00,
0x00, 0x04, 0x01, 0x50,
0x00, 0x05, 0x02, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x40,
0x00, 0x05, 0x03, 0x00,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x00,
0x00, 0x04, 0x01, 0x10,
0x00, 0x06, 0x02, 0x50,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x40,
0x00, 0x05, 0x01, 0x60,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x00,
0x00, 0x04, 0x01, 0x50,
0x00, 0x05, 0x02, 0x40,
0x02, 0x00, 0x00, 0x00,
0x00, 0x03, 0x01, 0x40,
0x00, 0x05, 0x03, 0x40,
0x01, 0x00, 0x00, 0x00
};
/*
*
*/
static const unsigned int MulIdx[16][16] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,},
{0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3,},
{0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3,},
{4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4,},
{6, 7, 8, 9, 7, 10, 11, 8, 8, 11, 10, 7, 9, 8, 7, 6,},
{4, 5, 5, 4, 4, 5, 5, 4, 4, 5, 5, 4, 4, 5, 5, 4,},
{1, 3, 0, 2, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3, 0, 2,},
{0, 3, 3, 0, 1, 2, 2, 1, 2, 1, 1, 2, 3, 0, 0, 3,},
{0, 1, 2, 3, 3, 2, 1, 0, 3, 2, 1, 0, 0, 1, 2, 3,},
{1, 1, 1, 1, 3, 3, 3, 3, 0, 0, 0, 0, 2, 2, 2, 2,},
{7, 10, 11, 8, 9, 8, 7, 6, 6, 7, 8, 9, 8, 11, 10, 7,},
{4, 5, 5, 4, 5, 4, 4, 5, 5, 4, 4, 5, 4, 5, 5, 4,},
{7, 9, 6, 8, 10, 8, 7, 11, 11, 7, 8, 10, 8, 6, 9, 7,},
{1, 3, 0, 2, 2, 0, 3, 1, 2, 0, 3, 1, 1, 3, 0, 2,},
{1, 2, 2, 1, 3, 0, 0, 3, 0, 3, 3, 0, 2, 1, 1, 2,},
{10, 8, 7, 11, 8, 6, 9, 7, 7, 9, 6, 8, 11, 7, 8, 10}
};
#if USE_LOOKUP_TABLE_TO_CLAMP
#define MAX_OUTER_CROP_VALUE (512)
static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE];
#define CLAMP(x) (pwc_crop_table[MAX_OUTER_CROP_VALUE+(x)])
#else
#define CLAMP(x) ((x)>255?255:((x)<0?0:x))
#endif
/* If the type or the command change, we rebuild the lookup table */
int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
{
int flags, version, shift, i;
struct pwc_dec23_private *pdec;
if (pwc->decompress_data == NULL) {
pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
if (pdec == NULL)
return -ENOMEM;
pwc->decompress_data = pdec;
}
pdec = pwc->decompress_data;
if (DEVICE_USE_CODEC3(type)) {
flags = cmd[2] & 0x18;
if (flags == 8)
pdec->nbits = 7; /* More bits, mean more bits to encode the stream, but better quality */
else if (flags == 0x10)
pdec->nbits = 8;
else
pdec->nbits = 6;
version = cmd[2] >> 5;
build_table_color(KiaraRomTable[version][0], pdec->table_0004_pass1, pdec->table_8004_pass1);
build_table_color(KiaraRomTable[version][1], pdec->table_0004_pass2, pdec->table_8004_pass2);
} else {
flags = cmd[2] & 6;
if (flags == 2)
pdec->nbits = 7;
else if (flags == 4)
pdec->nbits = 8;
else
pdec->nbits = 6;
version = cmd[2] >> 3;
build_table_color(TimonRomTable[version][0], pdec->table_0004_pass1, pdec->table_8004_pass1);
build_table_color(TimonRomTable[version][1], pdec->table_0004_pass2, pdec->table_8004_pass2);
}
/* Informations can be coded on a variable number of bits but never less than 8 */
shift = 8 - pdec->nbits;
pdec->scalebits = SCALEBITS - shift;
pdec->nbitsmask = 0xFF >> shift;
fill_table_dc00_d800(pdec);
build_subblock_pattern(pdec);
build_bit_powermask_table(pdec);
#if USE_LOOKUP_TABLE_TO_CLAMP
/* Build the static table to clamp value [0-255] */
for (i=0;i<MAX_OUTER_CROP_VALUE;i++)
pwc_crop_table[i] = 0;
for (i=0; i<256; i++)
pwc_crop_table[MAX_OUTER_CROP_VALUE+i] = i;
for (i=0; i<MAX_OUTER_CROP_VALUE; i++)
pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
#endif
return 0;
}
/*
* Copy the 4x4 image block to Y plane buffer
*/
static void copy_image_block_Y(const int *src, unsigned char *dst, unsigned int bytes_per_line, unsigned int scalebits)
{
#if UNROLL_LOOP_FOR_COPY
const unsigned char *cm = pwc_crop_table+MAX_OUTER_CROP_VALUE;
const int *c = src;
unsigned char *d = dst;
*d++ = cm[c[0] >> scalebits];
*d++ = cm[c[1] >> scalebits];
*d++ = cm[c[2] >> scalebits];
*d++ = cm[c[3] >> scalebits];
d = dst + bytes_per_line;
*d++ = cm[c[4] >> scalebits];
*d++ = cm[c[5] >> scalebits];
*d++ = cm[c[6] >> scalebits];
*d++ = cm[c[7] >> scalebits];
d = dst + bytes_per_line*2;
*d++ = cm[c[8] >> scalebits];
*d++ = cm[c[9] >> scalebits];
*d++ = cm[c[10] >> scalebits];
*d++ = cm[c[11] >> scalebits];
d = dst + bytes_per_line*3;
*d++ = cm[c[12] >> scalebits];
*d++ = cm[c[13] >> scalebits];
*d++ = cm[c[14] >> scalebits];
*d++ = cm[c[15] >> scalebits];
#else
int i;
const int *c = src;
unsigned char *d = dst;
for (i = 0; i < 4; i++, c++)
*d++ = CLAMP((*c) >> scalebits);
d = dst + bytes_per_line;
for (i = 0; i < 4; i++, c++)
*d++ = CLAMP((*c) >> scalebits);
d = dst + bytes_per_line*2;
for (i = 0; i < 4; i++, c++)
*d++ = CLAMP((*c) >> scalebits);
d = dst + bytes_per_line*3;
for (i = 0; i < 4; i++, c++)
*d++ = CLAMP((*c) >> scalebits);
#endif
}
/*
* Copy the 4x4 image block to a CrCb plane buffer
*
*/
static void copy_image_block_CrCb(const int *src, unsigned char *dst, unsigned int bytes_per_line, unsigned int scalebits)
{
#if UNROLL_LOOP_FOR_COPY
/* Unroll all loops */
const unsigned char *cm = pwc_crop_table+MAX_OUTER_CROP_VALUE;
const int *c = src;
unsigned char *d = dst;
*d++ = cm[c[0] >> scalebits];
*d++ = cm[c[4] >> scalebits];
*d++ = cm[c[1] >> scalebits];
*d++ = cm[c[5] >> scalebits];
*d++ = cm[c[2] >> scalebits];
*d++ = cm[c[6] >> scalebits];
*d++ = cm[c[3] >> scalebits];
*d++ = cm[c[7] >> scalebits];
d = dst + bytes_per_line;
*d++ = cm[c[12] >> scalebits];
*d++ = cm[c[8] >> scalebits];
*d++ = cm[c[13] >> scalebits];
*d++ = cm[c[9] >> scalebits];
*d++ = cm[c[14] >> scalebits];
*d++ = cm[c[10] >> scalebits];
*d++ = cm[c[15] >> scalebits];
*d++ = cm[c[11] >> scalebits];
#else
int i;
const int *c1 = src;
const int *c2 = src + 4;
unsigned char *d = dst;
for (i = 0; i < 4; i++, c1++, c2++) {
*d++ = CLAMP((*c1) >> scalebits);
*d++ = CLAMP((*c2) >> scalebits);
}
c1 = src + 12;
d = dst + bytes_per_line;
for (i = 0; i < 4; i++, c1++, c2++) {
*d++ = CLAMP((*c1) >> scalebits);
*d++ = CLAMP((*c2) >> scalebits);
}
#endif
}
#if ENABLE_BAYER_DECODER
/*
* Format: 8x2 pixels
* . G . G . G . G . G . G . G
* . . . . . . . . . . . . . .
* . G . G . G . G . G . G . G
* . . . . . . . . . . . . . .
* or
* . . . . . . . . . . . . . .
* G . G . G . G . G . G . G .
* . . . . . . . . . . . . . .
* G . G . G . G . G . G . G .
*/
static void copy_image_block_Green(const int *src, unsigned char *dst, unsigned int bytes_per_line, unsigned int scalebits)
{
#if UNROLL_LOOP_FOR_COPY
/* Unroll all loops */
const unsigned char *cm = pwc_crop_table+MAX_OUTER_CROP_VALUE;
unsigned char *d = dst;
const int *c = src;
d[0] = cm[c[0] >> scalebits];
d[2] = cm[c[1] >> scalebits];
d[4] = cm[c[2] >> scalebits];
d[6] = cm[c[3] >> scalebits];
d[8] = cm[c[4] >> scalebits];
d[10] = cm[c[5] >> scalebits];
d[12] = cm[c[6] >> scalebits];
d[14] = cm[c[7] >> scalebits];
d = dst + bytes_per_line;
d[0] = cm[c[8] >> scalebits];
d[2] = cm[c[9] >> scalebits];
d[4] = cm[c[10] >> scalebits];
d[6] = cm[c[11] >> scalebits];
d[8] = cm[c[12] >> scalebits];
d[10] = cm[c[13] >> scalebits];
d[12] = cm[c[14] >> scalebits];
d[14] = cm[c[15] >> scalebits];
#else
int i;
unsigned char *d;
const int *c = src;
d = dst;
for (i = 0; i < 8; i++, c++)
d[i*2] = CLAMP((*c) >> scalebits);
d = dst + bytes_per_line;
for (i = 0; i < 8; i++, c++)
d[i*2] = CLAMP((*c) >> scalebits);
#endif
}
#endif
#if ENABLE_BAYER_DECODER
/*
* Format: 4x4 pixels
* R . R . R . R
* . B . B . B .
* R . R . R . R
* . B . B . B .
*/
static void copy_image_block_RedBlue(const int *src, unsigned char *dst, unsigned int bytes_per_line, unsigned int scalebits)
{
#if UNROLL_LOOP_FOR_COPY
/* Unroll all loops */
const unsigned char *cm = pwc_crop_table+MAX_OUTER_CROP_VALUE;
unsigned char *d = dst;
const int *c = src;
d[0] = cm[c[0] >> scalebits];
d[2] = cm[c[1] >> scalebits];
d[4] = cm[c[2] >> scalebits];
d[6] = cm[c[3] >> scalebits];
d = dst + bytes_per_line;
d[1] = cm[c[4] >> scalebits];
d[3] = cm[c[5] >> scalebits];
d[5] = cm[c[6] >> scalebits];
d[7] = cm[c[7] >> scalebits];
d = dst + bytes_per_line*2;
d[0] = cm[c[8] >> scalebits];
d[2] = cm[c[9] >> scalebits];
d[4] = cm[c[10] >> scalebits];
d[6] = cm[c[11] >> scalebits];
d = dst + bytes_per_line*3;
d[1] = cm[c[12] >> scalebits];
d[3] = cm[c[13] >> scalebits];
d[5] = cm[c[14] >> scalebits];
d[7] = cm[c[15] >> scalebits];
#else
int i;
unsigned char *d;
const int *c = src;
d = dst;
for (i = 0; i < 4; i++, c++)
d[i*2] = CLAMP((*c) >> scalebits);
d = dst + bytes_per_line;
for (i = 0; i < 4; i++, c++)
d[i*2+1] = CLAMP((*c) >> scalebits);
d = dst + bytes_per_line*2;
for (i = 0; i < 4; i++, c++)
d[i*2] = CLAMP((*c) >> scalebits);
d = dst + bytes_per_line*3;
for (i = 0; i < 4; i++, c++)
d[i*2+1] = CLAMP((*c) >> scalebits);
#endif
}
#endif
/*
* To manage the stream, we keep bits in a 32 bits register.
* fill_nbits(n): fill the reservoir with at least n bits
* skip_bits(n): discard n bits from the reservoir
* get_bits(n): fill the reservoir, returns the first n bits and discard the
* bits from the reservoir.
* __get_nbits(n): faster version of get_bits(n), but asumes that the reservoir
* contains at least n bits. bits returned is discarded.
*/
#define fill_nbits(pdec, nbits_wanted) do { \
while (pdec->nbits_in_reservoir<(nbits_wanted)) \
{ \
pdec->reservoir |= (*(pdec->stream)++) << (pdec->nbits_in_reservoir); \
pdec->nbits_in_reservoir += 8; \
} \
} while(0);
#define skip_nbits(pdec, nbits_to_skip) do { \
pdec->reservoir >>= (nbits_to_skip); \
pdec->nbits_in_reservoir -= (nbits_to_skip); \
} while(0);
#define get_nbits(pdec, nbits_wanted, result) do { \
fill_nbits(pdec, nbits_wanted); \
result = (pdec->reservoir) & ((1U<<(nbits_wanted))-1); \
skip_nbits(pdec, nbits_wanted); \
} while(0);
#define __get_nbits(pdec, nbits_wanted, result) do { \
result = (pdec->reservoir) & ((1U<<(nbits_wanted))-1); \
skip_nbits(pdec, nbits_wanted); \
} while(0);
#define look_nbits(pdec, nbits_wanted) \
((pdec->reservoir) & ((1U<<(nbits_wanted))-1))
/*
* Decode a 4x4 pixel block
*/
static void decode_block(struct pwc_dec23_private *pdec,
const unsigned char *ptable0004,
const unsigned char *ptable8004)
{
unsigned int primary_color;
unsigned int channel_v, offset1, op;
int i;
fill_nbits(pdec, 16);
__get_nbits(pdec, pdec->nbits, primary_color);
if (look_nbits(pdec,2) == 0) {
skip_nbits(pdec, 2);
/* Very simple, the color is the same for all pixels of the square */
for (i = 0; i < 16; i++)
pdec->temp_colors[i] = pdec->table_dc00[primary_color];
return;
}
/* This block is encoded with small pattern */
for (i = 0; i < 16; i++)
pdec->temp_colors[i] = pdec->table_d800[primary_color];
__get_nbits(pdec, 3, channel_v);
channel_v = ((channel_v & 1) << 2) | (channel_v & 2) | ((channel_v & 4) >> 2);
ptable0004 += (channel_v * 128);
ptable8004 += (channel_v * 32);
offset1 = 0;
do
{
unsigned int htable_idx, rows = 0;
const unsigned int *block;
/* [ zzzz y x x ]
* xx == 00 :=> end of the block def, remove the two bits from the stream
* yxx == 111
* yxx == any other value
*
*/
fill_nbits(pdec, 16);
htable_idx = look_nbits(pdec, 6);
op = hash_table_ops[htable_idx * 4];
if (op == 2) {
skip_nbits(pdec, 2);
} else if (op == 1) {
/* 15bits [ xxxx xxxx yyyy 111 ]
* yyy => offset in the table8004
* xxx => offset in the tabled004 (tree)
*/
unsigned int mask, shift;
unsigned int nbits, col1;
unsigned int yyyy;
skip_nbits(pdec, 3);
/* offset1 += yyyy */
__get_nbits(pdec, 4, yyyy);
offset1 += 1 + yyyy;
offset1 &= 0x0F;
nbits = ptable8004[offset1 * 2];
/* col1 = xxxx xxxx */
__get_nbits(pdec, nbits+1, col1);
/* Bit mask table */
mask = pdec->table_bitpowermask[nbits][col1];
shift = ptable8004[offset1 * 2 + 1];
rows = ((mask << shift) + 0x80) & 0xFF;
block = pdec->table_subblock[rows];
for (i = 0; i < 16; i++)
pdec->temp_colors[i] += block[MulIdx[offset1][i]];
} else {
/* op == 0
* offset1 is coded on 3 bits
*/
unsigned int shift;
offset1 += hash_table_ops [htable_idx * 4 + 2];
offset1 &= 0x0F;
rows = ptable0004[offset1 + hash_table_ops [htable_idx * 4 + 3]];
block = pdec->table_subblock[rows];
for (i = 0; i < 16; i++)
pdec->temp_colors[i] += block[MulIdx[offset1][i]];
shift = hash_table_ops[htable_idx * 4 + 1];
skip_nbits(pdec, shift);
}
} while (op != 2);
}
static void DecompressBand23(struct pwc_dec23_private *pdec,
const unsigned char *rawyuv,
unsigned char *planar_y,
unsigned char *planar_u,
unsigned char *planar_v,
unsigned int compressed_image_width,
unsigned int real_image_width)
{
int compression_index, nblocks;
const unsigned char *ptable0004;
const unsigned char *ptable8004;
pdec->reservoir = 0;
pdec->nbits_in_reservoir = 0;
pdec->stream = rawyuv + 1; /* The first byte of the stream is skipped */
get_nbits(pdec, 4, compression_index);
/* pass 1: uncompress Y component */
nblocks = compressed_image_width / 4;
ptable0004 = pdec->table_0004_pass1[compression_index];
ptable8004 = pdec->table_8004_pass1[compression_index];
/* Each block decode a square of 4x4 */
while (nblocks) {
decode_block(pdec, ptable0004, ptable8004);
copy_image_block_Y(pdec->temp_colors, planar_y, real_image_width, pdec->scalebits);
planar_y += 4;
nblocks--;
}
/* pass 2: uncompress UV component */
nblocks = compressed_image_width / 8;
ptable0004 = pdec->table_0004_pass2[compression_index];
ptable8004 = pdec->table_8004_pass2[compression_index];
/* Each block decode a square of 4x4 */
while (nblocks) {
decode_block(pdec, ptable0004, ptable8004);
copy_image_block_CrCb(pdec->temp_colors, planar_u, real_image_width/2, pdec->scalebits);
decode_block(pdec, ptable0004, ptable8004);
copy_image_block_CrCb(pdec->temp_colors, planar_v, real_image_width/2, pdec->scalebits);
planar_v += 8;
planar_u += 8;
nblocks -= 2;
}
}
#if ENABLE_BAYER_DECODER
/*
* Size need to be a multiple of 8 in width
*
* Return a block of four line encoded like this:
*
* G R G R G R G R G R G R G R G R
* B G B G B G B G B G B G B G B G
* G R G R G R G R G R G R G R G R
* B G B G B G B G B G B G B G B G
*
*/
static void DecompressBandBayer(struct pwc_dec23_private *pdec,
const unsigned char *rawyuv,
unsigned char *rgbbayer,
unsigned int compressed_image_width,
unsigned int real_image_width)
{
int compression_index, nblocks;
const unsigned char *ptable0004;
const unsigned char *ptable8004;
unsigned char *dest;
pdec->reservoir = 0;
pdec->nbits_in_reservoir = 0;
pdec->stream = rawyuv + 1; /* The first byte of the stream is skipped */
get_nbits(pdec, 4, compression_index);
/* pass 1: uncompress RB component */
nblocks = compressed_image_width / 4;
ptable0004 = pdec->table_0004_pass1[compression_index];
ptable8004 = pdec->table_8004_pass1[compression_index];
dest = rgbbayer;
/* Each block decode a square of 4x4 */
while (nblocks) {
decode_block(pdec, ptable0004, ptable8004);
copy_image_block_RedBlue(pdec->temp_colors, rgbbayer, real_image_width, pdec->scalebits);
dest += 8;
nblocks--;
}
/* pass 2: uncompress G component */
nblocks = compressed_image_width / 8;
ptable0004 = pdec->table_0004_pass2[compression_index];
ptable8004 = pdec->table_8004_pass2[compression_index];
/* Each block decode a square of 4x4 */
while (nblocks) {
decode_block(pdec, ptable0004, ptable8004);
copy_image_block_Green(pdec->temp_colors, rgbbayer+1, real_image_width, pdec->scalebits);
decode_block(pdec, ptable0004, ptable8004);
copy_image_block_Green(pdec->temp_colors, rgbbayer+real_image_width, real_image_width, pdec->scalebits);
rgbbayer += 16;
nblocks -= 2;
}
}
#endif
/**
*
* Uncompress a pwc23 buffer.
*
* pwc.view: size of the image wanted
* pwc.image: size of the image returned by the camera
* pwc.offset: (x,y) to displayer image in the view
*
* src: raw data
* dst: image output
* flags: PWCX_FLAG_PLANAR or PWCX_FLAG_BAYER
*/
void pwc_dec23_decompress(const struct pwc_device *pwc,
const void *src,
void *dst,
int flags)
{
int bandlines_left, stride, bytes_per_block;
bandlines_left = pwc->image.y / 4;
bytes_per_block = pwc->view.x * 4;
if (flags & PWCX_FLAG_BAYER) {
#if ENABLE_BAYER_DECODER
/* RGB Bayer format */
unsigned char *rgbout;
stride = pwc->view.x * pwc->offset.y;
rgbout = dst + stride + pwc->offset.x;
while (bandlines_left--) {
DecompressBandBayer(pwc->decompress_data,
src,
rgbout,
pwc->image.x, pwc->view.x);
src += pwc->vbandlength;
rgbout += bytes_per_block;
}
#else
memset(dst, 0, pwc->view.x * pwc->view.y);
#endif
} else {
/* YUV420P image format */
unsigned char *pout_planar_y;
unsigned char *pout_planar_u;
unsigned char *pout_planar_v;
unsigned int plane_size;
plane_size = pwc->view.x * pwc->view.y;
/* offset in Y plane */
stride = pwc->view.x * pwc->offset.y;
pout_planar_y = dst + stride + pwc->offset.x;
/* offsets in U/V planes */
stride = (pwc->view.x * pwc->offset.y) / 4 + pwc->offset.x / 2;
pout_planar_u = dst + plane_size + stride;
pout_planar_v = dst + plane_size + plane_size / 4 + stride;
while (bandlines_left--) {
DecompressBand23(pwc->decompress_data,
src,
pout_planar_y, pout_planar_u, pout_planar_v,
pwc->image.x, pwc->view.x);
src += pwc->vbandlength;
pout_planar_y += bytes_per_block;
pout_planar_u += pwc->view.x;
pout_planar_v += pwc->view.x;
}
}
}
void pwc_dec23_exit(void)
{
/* Do nothing */
}
/**
* Allocate a private structure used by lookup table.
* You must call kfree() to free the memory allocated.
*/
int pwc_dec23_alloc(struct pwc_device *pwc)
{
pwc->decompress_data = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
if (pwc->decompress_data == NULL)
return -ENOMEM;
return 0;
}
/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
| gpl-2.0 |
imang/gcore_kernel | arch/cris/arch-v10/kernel/kgdb.c | 4665 | 50519 | /*!**************************************************************************
*!
*! FILE NAME : kgdb.c
*!
*! DESCRIPTION: Implementation of the gdb stub with respect to ETRAX 100.
*! It is a mix of arch/m68k/kernel/kgdb.c and cris_stub.c.
*!
*!---------------------------------------------------------------------------
*! HISTORY
*!
*! DATE NAME CHANGES
*! ---- ---- -------
*! Apr 26 1999 Hendrik Ruijter Initial version.
*! May 6 1999 Hendrik Ruijter Removed call to strlen in libc and removed
*! struct assignment as it generates calls to
*! memcpy in libc.
*! Jun 17 1999 Hendrik Ruijter Added gdb 4.18 support. 'X', 'qC' and 'qL'.
*! Jul 21 1999 Bjorn Wesen eLinux port
*!
*!---------------------------------------------------------------------------
*!
*! (C) Copyright 1999, Axis Communications AB, LUND, SWEDEN
*!
*!**************************************************************************/
/* @(#) cris_stub.c 1.3 06/17/99 */
/*
* kgdb usage notes:
* -----------------
*
* If you select CONFIG_ETRAX_KGDB in the configuration, the kernel will be
* built with different gcc flags: "-g" is added to get debug infos, and
* "-fomit-frame-pointer" is omitted to make debugging easier. Since the
* resulting kernel will be quite big (approx. > 7 MB), it will be stripped
* before compresion. Such a kernel will behave just as usually, except if
* given a "debug=<device>" command line option. (Only serial devices are
* allowed for <device>, i.e. no printers or the like; possible values are
* machine depedend and are the same as for the usual debug device, the one
* for logging kernel messages.) If that option is given and the device can be
* initialized, the kernel will connect to the remote gdb in trap_init(). The
* serial parameters are fixed to 8N1 and 115200 bps, for easyness of
* implementation.
*
* To start a debugging session, start that gdb with the debugging kernel
* image (the one with the symbols, vmlinux.debug) named on the command line.
* This file will be used by gdb to get symbol and debugging infos about the
* kernel. Next, select remote debug mode by
* target remote <device>
* where <device> is the name of the serial device over which the debugged
* machine is connected. Maybe you have to adjust the baud rate by
* set remotebaud <rate>
* or also other parameters with stty:
* shell stty ... </dev/...
* If the kernel to debug has already booted, it waited for gdb and now
* connects, and you'll see a breakpoint being reported. If the kernel isn't
* running yet, start it now. The order of gdb and the kernel doesn't matter.
* Another thing worth knowing about in the getting-started phase is how to
* debug the remote protocol itself. This is activated with
* set remotedebug 1
* gdb will then print out each packet sent or received. You'll also get some
* messages about the gdb stub on the console of the debugged machine.
*
* If all that works, you can use lots of the usual debugging techniques on
* the kernel, e.g. inspecting and changing variables/memory, setting
* breakpoints, single stepping and so on. It's also possible to interrupt the
* debugged kernel by pressing C-c in gdb. Have fun! :-)
*
* The gdb stub is entered (and thus the remote gdb gets control) in the
* following situations:
*
* - If breakpoint() is called. This is just after kgdb initialization, or if
* a breakpoint() call has been put somewhere into the kernel source.
* (Breakpoints can of course also be set the usual way in gdb.)
* In eLinux, we call breakpoint() in init/main.c after IRQ initialization.
*
* - If there is a kernel exception, i.e. bad_super_trap() or die_if_kernel()
* are entered. All the CPU exceptions are mapped to (more or less..., see
* the hard_trap_info array below) appropriate signal, which are reported
* to gdb. die_if_kernel() is usually called after some kind of access
* error and thus is reported as SIGSEGV.
*
* - When panic() is called. This is reported as SIGABRT.
*
* - If C-c is received over the serial line, which is treated as
* SIGINT.
*
* Of course, all these signals are just faked for gdb, since there is no
* signal concept as such for the kernel. It also isn't possible --obviously--
* to set signal handlers from inside gdb, or restart the kernel with a
* signal.
*
* Current limitations:
*
* - While the kernel is stopped, interrupts are disabled for safety reasons
* (i.e., variables not changing magically or the like). But this also
* means that the clock isn't running anymore, and that interrupts from the
* hardware may get lost/not be served in time. This can cause some device
* errors...
*
* - When single-stepping, only one instruction of the current thread is
* executed, but interrupts are allowed for that time and will be serviced
* if pending. Be prepared for that.
*
* - All debugging happens in kernel virtual address space. There's no way to
* access physical memory not mapped in kernel space, or to access user
* space. A way to work around this is using get_user_long & Co. in gdb
* expressions, but only for the current process.
*
* - Interrupting the kernel only works if interrupts are currently allowed,
* and the interrupt of the serial line isn't blocked by some other means
* (IPL too high, disabled, ...)
*
* - The gdb stub is currently not reentrant, i.e. errors that happen therein
* (e.g. accessing invalid memory) may not be caught correctly. This could
* be removed in future by introducing a stack of struct registers.
*
*/
/*
* To enable debugger support, two things need to happen. One, a
* call to kgdb_init() is necessary in order to allow any breakpoints
* or error conditions to be properly intercepted and reported to gdb.
* Two, a breakpoint needs to be generated to begin communication. This
* is most easily accomplished by a call to breakpoint().
*
* The following gdb commands are supported:
*
* command function Return value
*
* g return the value of the CPU registers hex data or ENN
* G set the value of the CPU registers OK or ENN
*
* mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
* MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
*
* c Resume at current address SNN ( signal NN)
* cAA..AA Continue at address AA..AA SNN
*
* s Step one instruction SNN
* sAA..AA Step one instruction from AA..AA SNN
*
* k kill
*
* ? What was the last sigval ? SNN (signal NN)
*
* bBB..BB Set baud rate to BB..BB OK or BNN, then sets
* baud rate
*
* All commands and responses are sent with a packet which includes a
* checksum. A packet consists of
*
* $<packet info>#<checksum>.
*
* where
* <packet info> :: <characters representing the command or response>
* <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
*
* When a packet is received, it is first acknowledged with either '+' or '-'.
* '+' indicates a successful transfer. '-' indicates a failed transfer.
*
* Example:
*
* Host: Reply:
* $m0,10#2a +$00010203040506070809101112131415#42
*
*/
#include <linux/string.h>
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/linkage.h>
#include <linux/reboot.h>
#include <asm/setup.h>
#include <asm/ptrace.h>
#include <arch/svinto.h>
#include <asm/irq.h>
static int kgdb_started = 0;
/********************************* Register image ****************************/
/* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's
Reference", p. 1-1, with the additional register definitions of the
ETRAX 100LX in cris-opc.h.
There are 16 general 32-bit registers, R0-R15, where R14 is the stack
pointer, SP, and R15 is the program counter, PC.
There are 16 special registers, P0-P15, where three of the unimplemented
registers, P0, P4 and P8, are reserved as zero-registers. A read from
any of these registers returns zero and a write has no effect. */
typedef
struct register_image
{
/* Offset */
unsigned int r0; /* 0x00 */
unsigned int r1; /* 0x04 */
unsigned int r2; /* 0x08 */
unsigned int r3; /* 0x0C */
unsigned int r4; /* 0x10 */
unsigned int r5; /* 0x14 */
unsigned int r6; /* 0x18 */
unsigned int r7; /* 0x1C */
unsigned int r8; /* 0x20 Frame pointer */
unsigned int r9; /* 0x24 */
unsigned int r10; /* 0x28 */
unsigned int r11; /* 0x2C */
unsigned int r12; /* 0x30 */
unsigned int r13; /* 0x34 */
unsigned int sp; /* 0x38 Stack pointer */
unsigned int pc; /* 0x3C Program counter */
unsigned char p0; /* 0x40 8-bit zero-register */
unsigned char vr; /* 0x41 Version register */
unsigned short p4; /* 0x42 16-bit zero-register */
unsigned short ccr; /* 0x44 Condition code register */
unsigned int mof; /* 0x46 Multiply overflow register */
unsigned int p8; /* 0x4A 32-bit zero-register */
unsigned int ibr; /* 0x4E Interrupt base register */
unsigned int irp; /* 0x52 Interrupt return pointer */
unsigned int srp; /* 0x56 Subroutine return pointer */
unsigned int bar; /* 0x5A Breakpoint address register */
unsigned int dccr; /* 0x5E Double condition code register */
unsigned int brp; /* 0x62 Breakpoint return pointer (pc in caller) */
unsigned int usp; /* 0x66 User mode stack pointer */
} registers;
/************** Prototypes for local library functions ***********************/
/* Copy of strcpy from libc. */
static char *gdb_cris_strcpy (char *s1, const char *s2);
/* Copy of strlen from libc. */
static int gdb_cris_strlen (const char *s);
/* Copy of memchr from libc. */
static void *gdb_cris_memchr (const void *s, int c, int n);
/* Copy of strtol from libc. Does only support base 16. */
static int gdb_cris_strtol (const char *s, char **endptr, int base);
/********************** Prototypes for local functions. **********************/
/* Copy the content of a register image into another. The size n is
the size of the register image. Due to struct assignment generation of
memcpy in libc. */
static void copy_registers (registers *dptr, registers *sptr, int n);
/* Copy the stored registers from the stack. Put the register contents
of thread thread_id in the struct reg. */
static void copy_registers_from_stack (int thread_id, registers *reg);
/* Copy the registers to the stack. Put the register contents of thread
thread_id from struct reg to the stack. */
static void copy_registers_to_stack (int thread_id, registers *reg);
/* Write a value to a specified register regno in the register image
of the current thread. */
static int write_register (int regno, char *val);
/* Write a value to a specified register in the stack of a thread other
than the current thread. */
static write_stack_register (int thread_id, int regno, char *valptr);
/* Read a value from a specified register in the register image. Returns the
status of the read operation. The register value is returned in valptr. */
static int read_register (char regno, unsigned int *valptr);
/* Serial port, reads one character. ETRAX 100 specific. from debugport.c */
int getDebugChar (void);
/* Serial port, writes one character. ETRAX 100 specific. from debugport.c */
void putDebugChar (int val);
void enableDebugIRQ (void);
/* Returns the integer equivalent of a hexadecimal character. */
static int hex (char ch);
/* Convert the memory, pointed to by mem into hexadecimal representation.
Put the result in buf, and return a pointer to the last character
in buf (null). */
static char *mem2hex (char *buf, unsigned char *mem, int count);
/* Convert the array, in hexadecimal representation, pointed to by buf into
binary representation. Put the result in mem, and return a pointer to
the character after the last byte written. */
static unsigned char *hex2mem (unsigned char *mem, char *buf, int count);
/* Put the content of the array, in binary representation, pointed to by buf
into memory pointed to by mem, and return a pointer to
the character after the last byte written. */
static unsigned char *bin2mem (unsigned char *mem, unsigned char *buf, int count);
/* Await the sequence $<data>#<checksum> and store <data> in the array buffer
returned. */
static void getpacket (char *buffer);
/* Send $<data>#<checksum> from the <data> in the array buffer. */
static void putpacket (char *buffer);
/* Build and send a response packet in order to inform the host the
stub is stopped. */
static void stub_is_stopped (int sigval);
/* All expected commands are sent from remote.c. Send a response according
to the description in remote.c. */
static void handle_exception (int sigval);
/* Performs a complete re-start from scratch. ETRAX specific. */
static void kill_restart (void);
/******************** Prototypes for global functions. ***********************/
/* The string str is prepended with the GDB printout token and sent. */
void putDebugString (const unsigned char *str, int length); /* used by etrax100ser.c */
/* The hook for both static (compiled) and dynamic breakpoints set by GDB.
ETRAX 100 specific. */
void handle_breakpoint (void); /* used by irq.c */
/* The hook for an interrupt generated by GDB. ETRAX 100 specific. */
void handle_interrupt (void); /* used by irq.c */
/* A static breakpoint to be used at startup. */
void breakpoint (void); /* called by init/main.c */
/* From osys_int.c, executing_task contains the number of the current
executing task in osys. Does not know of object-oriented threads. */
extern unsigned char executing_task;
/* The number of characters used for a 64 bit thread identifier. */
#define HEXCHARS_IN_THREAD_ID 16
/* Avoid warning as the internal_stack is not used in the C-code. */
#define USEDVAR(name) { if (name) { ; } }
#define USEDFUN(name) { void (*pf)(void) = (void *)name; USEDVAR(pf) }
/********************************** Packet I/O ******************************/
/* BUFMAX defines the maximum number of characters in
inbound/outbound buffers */
#define BUFMAX 512
/* Run-length encoding maximum length. Send 64 at most. */
#define RUNLENMAX 64
/* The inbound/outbound buffers used in packet I/O */
static char remcomInBuffer[BUFMAX];
static char remcomOutBuffer[BUFMAX];
/* Error and warning messages. */
enum error_type
{
SUCCESS, E01, E02, E03, E04, E05, E06, E07
};
static char *error_message[] =
{
"",
"E01 Set current or general thread - H[c,g] - internal error.",
"E02 Change register content - P - cannot change read-only register.",
"E03 Thread is not alive.", /* T, not used. */
"E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.",
"E05 Change register content - P - the register is not implemented..",
"E06 Change memory content - M - internal error.",
"E07 Change register content - P - the register is not stored on the stack"
};
/********************************* Register image ****************************/
/* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's
Reference", p. 1-1, with the additional register definitions of the
ETRAX 100LX in cris-opc.h.
There are 16 general 32-bit registers, R0-R15, where R14 is the stack
pointer, SP, and R15 is the program counter, PC.
There are 16 special registers, P0-P15, where three of the unimplemented
registers, P0, P4 and P8, are reserved as zero-registers. A read from
any of these registers returns zero and a write has no effect. */
enum register_name
{
R0, R1, R2, R3,
R4, R5, R6, R7,
R8, R9, R10, R11,
R12, R13, SP, PC,
P0, VR, P2, P3,
P4, CCR, P6, MOF,
P8, IBR, IRP, SRP,
BAR, DCCR, BRP, USP
};
/* The register sizes of the registers in register_name. An unimplemented register
is designated by size 0 in this array. */
static int register_size[] =
{
4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 4, 4,
1, 1, 0, 0,
2, 2, 0, 4,
4, 4, 4, 4,
4, 4, 4, 4
};
/* Contains the register image of the executing thread in the assembler
part of the code in order to avoid horrible addressing modes. */
static registers reg;
/* FIXME: Should this be used? Delete otherwise. */
/* Contains the assumed consistency state of the register image. Uses the
enum error_type for state information. */
static int consistency_status = SUCCESS;
/********************************** Handle exceptions ************************/
/* The variable reg contains the register image associated with the
current_thread_c variable. It is a complete register image created at
entry. The reg_g contains a register image of a task where the general
registers are taken from the stack and all special registers are taken
from the executing task. It is associated with current_thread_g and used
in order to provide access mainly for 'g', 'G' and 'P'.
*/
/* Need two task id pointers in order to handle Hct and Hgt commands. */
static int current_thread_c = 0;
static int current_thread_g = 0;
/* Need two register images in order to handle Hct and Hgt commands. The
variable reg_g is in addition to reg above. */
static registers reg_g;
/********************************** Breakpoint *******************************/
/* Use an internal stack in the breakpoint and interrupt response routines */
#define INTERNAL_STACK_SIZE 1024
static char internal_stack[INTERNAL_STACK_SIZE];
/* Due to the breakpoint return pointer, a state variable is needed to keep
track of whether it is a static (compiled) or dynamic (gdb-invoked)
breakpoint to be handled. A static breakpoint uses the content of register
BRP as it is whereas a dynamic breakpoint requires subtraction with 2
in order to execute the instruction. The first breakpoint is static. */
static unsigned char is_dyn_brkp = 0;
/********************************* String library ****************************/
/* Single-step over library functions creates trap loops. */
/* Copy char s2[] to s1[]. */
static char*
gdb_cris_strcpy (char *s1, const char *s2)
{
char *s = s1;
for (s = s1; (*s++ = *s2++) != '\0'; )
;
return (s1);
}
/* Find length of s[]. */
static int
gdb_cris_strlen (const char *s)
{
const char *sc;
for (sc = s; *sc != '\0'; sc++)
;
return (sc - s);
}
/* Find first occurrence of c in s[n]. */
static void*
gdb_cris_memchr (const void *s, int c, int n)
{
const unsigned char uc = c;
const unsigned char *su;
for (su = s; 0 < n; ++su, --n)
if (*su == uc)
return ((void *)su);
return (NULL);
}
/******************************* Standard library ****************************/
/* Single-step over library functions creates trap loops. */
/* Convert string to long. */
static int
gdb_cris_strtol (const char *s, char **endptr, int base)
{
char *s1;
char *sd;
int x = 0;
for (s1 = (char*)s; (sd = gdb_cris_memchr(hex_asc, *s1, base)) != NULL; ++s1)
x = x * base + (sd - hex_asc);
if (endptr)
{
/* Unconverted suffix is stored in endptr unless endptr is NULL. */
*endptr = s1;
}
return x;
}
/********************************* Register image ****************************/
/* Copy the content of a register image into another. The size n is
the size of the register image. Due to struct assignment generation of
memcpy in libc. */
static void
copy_registers (registers *dptr, registers *sptr, int n)
{
unsigned char *dreg;
unsigned char *sreg;
for (dreg = (unsigned char*)dptr, sreg = (unsigned char*)sptr; n > 0; n--)
*dreg++ = *sreg++;
}
#ifdef PROCESS_SUPPORT
/* Copy the stored registers from the stack. Put the register contents
of thread thread_id in the struct reg. */
static void
copy_registers_from_stack (int thread_id, registers *regptr)
{
int j;
stack_registers *s = (stack_registers *)stack_list[thread_id];
unsigned int *d = (unsigned int *)regptr;
for (j = 13; j >= 0; j--)
*d++ = s->r[j];
regptr->sp = (unsigned int)stack_list[thread_id];
regptr->pc = s->pc;
regptr->dccr = s->dccr;
regptr->srp = s->srp;
}
/* Copy the registers to the stack. Put the register contents of thread
thread_id from struct reg to the stack. */
static void
copy_registers_to_stack (int thread_id, registers *regptr)
{
int i;
stack_registers *d = (stack_registers *)stack_list[thread_id];
unsigned int *s = (unsigned int *)regptr;
for (i = 0; i < 14; i++) {
d->r[i] = *s++;
}
d->pc = regptr->pc;
d->dccr = regptr->dccr;
d->srp = regptr->srp;
}
#endif
/* Write a value to a specified register in the register image of the current
thread. Returns status code SUCCESS, E02 or E05. */
static int
write_register (int regno, char *val)
{
int status = SUCCESS;
registers *current_reg = ®
if (regno >= R0 && regno <= PC) {
/* 32-bit register with simple offset. */
hex2mem ((unsigned char *)current_reg + regno * sizeof(unsigned int),
val, sizeof(unsigned int));
}
else if (regno == P0 || regno == VR || regno == P4 || regno == P8) {
/* Do not support read-only registers. */
status = E02;
}
else if (regno == CCR) {
/* 16 bit register with complex offset. (P4 is read-only, P6 is not implemented,
and P7 (MOF) is 32 bits in ETRAX 100LX. */
hex2mem ((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short),
val, sizeof(unsigned short));
}
else if (regno >= MOF && regno <= USP) {
/* 32 bit register with complex offset. (P8 has been taken care of.) */
hex2mem ((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int),
val, sizeof(unsigned int));
}
else {
/* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */
status = E05;
}
return status;
}
#ifdef PROCESS_SUPPORT
/* Write a value to a specified register in the stack of a thread other
than the current thread. Returns status code SUCCESS or E07. */
static int
write_stack_register (int thread_id, int regno, char *valptr)
{
int status = SUCCESS;
stack_registers *d = (stack_registers *)stack_list[thread_id];
unsigned int val;
hex2mem ((unsigned char *)&val, valptr, sizeof(unsigned int));
if (regno >= R0 && regno < SP) {
d->r[regno] = val;
}
else if (regno == SP) {
stack_list[thread_id] = val;
}
else if (regno == PC) {
d->pc = val;
}
else if (regno == SRP) {
d->srp = val;
}
else if (regno == DCCR) {
d->dccr = val;
}
else {
/* Do not support registers in the current thread. */
status = E07;
}
return status;
}
#endif
/* Read a value from a specified register in the register image. Returns the
value in the register or -1 for non-implemented registers.
Should check consistency_status after a call which may be E05 after changes
in the implementation. */
static int
read_register (char regno, unsigned int *valptr)
{
registers *current_reg = ®
if (regno >= R0 && regno <= PC) {
/* 32-bit register with simple offset. */
*valptr = *(unsigned int *)((char *)current_reg + regno * sizeof(unsigned int));
return SUCCESS;
}
else if (regno == P0 || regno == VR) {
/* 8 bit register with complex offset. */
*valptr = (unsigned int)(*(unsigned char *)
((char *)&(current_reg->p0) + (regno-P0) * sizeof(char)));
return SUCCESS;
}
else if (regno == P4 || regno == CCR) {
/* 16 bit register with complex offset. */
*valptr = (unsigned int)(*(unsigned short *)
((char *)&(current_reg->p4) + (regno-P4) * sizeof(unsigned short)));
return SUCCESS;
}
else if (regno >= MOF && regno <= USP) {
/* 32 bit register with complex offset. */
*valptr = *(unsigned int *)((char *)&(current_reg->p8)
+ (regno-P8) * sizeof(unsigned int));
return SUCCESS;
}
else {
/* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */
consistency_status = E05;
return E05;
}
}
/********************************** Packet I/O ******************************/
/* Returns the integer equivalent of a hexadecimal character. */
static int
hex (char ch)
{
if ((ch >= 'a') && (ch <= 'f'))
return (ch - 'a' + 10);
if ((ch >= '0') && (ch <= '9'))
return (ch - '0');
if ((ch >= 'A') && (ch <= 'F'))
return (ch - 'A' + 10);
return (-1);
}
/* Convert the memory, pointed to by mem into hexadecimal representation.
Put the result in buf, and return a pointer to the last character
in buf (null). */
static int do_printk = 0;
static char *
mem2hex(char *buf, unsigned char *mem, int count)
{
int i;
int ch;
if (mem == NULL) {
/* Bogus read from m0. FIXME: What constitutes a valid address? */
for (i = 0; i < count; i++) {
*buf++ = '0';
*buf++ = '0';
}
} else {
/* Valid mem address. */
for (i = 0; i < count; i++) {
ch = *mem++;
buf = hex_byte_pack(buf, ch);
}
}
/* Terminate properly. */
*buf = '\0';
return (buf);
}
/* Convert the array, in hexadecimal representation, pointed to by buf into
binary representation. Put the result in mem, and return a pointer to
the character after the last byte written. */
static unsigned char*
hex2mem (unsigned char *mem, char *buf, int count)
{
int i;
unsigned char ch;
for (i = 0; i < count; i++) {
ch = hex (*buf++) << 4;
ch = ch + hex (*buf++);
*mem++ = ch;
}
return (mem);
}
/* Put the content of the array, in binary representation, pointed to by buf
into memory pointed to by mem, and return a pointer to the character after
the last byte written.
Gdb will escape $, #, and the escape char (0x7d). */
static unsigned char*
bin2mem (unsigned char *mem, unsigned char *buf, int count)
{
int i;
unsigned char *next;
for (i = 0; i < count; i++) {
/* Check for any escaped characters. Be paranoid and
only unescape chars that should be escaped. */
if (*buf == 0x7d) {
next = buf + 1;
if (*next == 0x3 || *next == 0x4 || *next == 0x5D) /* #, $, ESC */
{
buf++;
*buf += 0x20;
}
}
*mem++ = *buf++;
}
return (mem);
}
/* Await the sequence $<data>#<checksum> and store <data> in the array buffer
returned. */
static void
getpacket (char *buffer)
{
unsigned char checksum;
unsigned char xmitcsum;
int i;
int count;
char ch;
do {
while ((ch = getDebugChar ()) != '$')
/* Wait for the start character $ and ignore all other characters */;
checksum = 0;
xmitcsum = -1;
count = 0;
/* Read until a # or the end of the buffer is reached */
while (count < BUFMAX) {
ch = getDebugChar ();
if (ch == '#')
break;
checksum = checksum + ch;
buffer[count] = ch;
count = count + 1;
}
buffer[count] = '\0';
if (ch == '#') {
xmitcsum = hex (getDebugChar ()) << 4;
xmitcsum += hex (getDebugChar ());
if (checksum != xmitcsum) {
/* Wrong checksum */
putDebugChar ('-');
}
else {
/* Correct checksum */
putDebugChar ('+');
/* If sequence characters are received, reply with them */
if (buffer[2] == ':') {
putDebugChar (buffer[0]);
putDebugChar (buffer[1]);
/* Remove the sequence characters from the buffer */
count = gdb_cris_strlen (buffer);
for (i = 3; i <= count; i++)
buffer[i - 3] = buffer[i];
}
}
}
} while (checksum != xmitcsum);
}
/* Send $<data>#<checksum> from the <data> in the array buffer. */
static void
putpacket(char *buffer)
{
int checksum;
int runlen;
int encode;
do {
char *src = buffer;
putDebugChar ('$');
checksum = 0;
while (*src) {
/* Do run length encoding */
putDebugChar (*src);
checksum += *src;
runlen = 0;
while (runlen < RUNLENMAX && *src == src[runlen]) {
runlen++;
}
if (runlen > 3) {
/* Got a useful amount */
putDebugChar ('*');
checksum += '*';
encode = runlen + ' ' - 4;
putDebugChar (encode);
checksum += encode;
src += runlen;
}
else {
src++;
}
}
putDebugChar('#');
putDebugChar(hex_asc_hi(checksum));
putDebugChar(hex_asc_lo(checksum));
} while(kgdb_started && (getDebugChar() != '+'));
}
/* The string str is prepended with the GDB printout token and sent. Required
in traditional implementations. */
void
putDebugString (const unsigned char *str, int length)
{
remcomOutBuffer[0] = 'O';
mem2hex(&remcomOutBuffer[1], (unsigned char *)str, length);
putpacket(remcomOutBuffer);
}
/********************************** Handle exceptions ************************/
/* Build and send a response packet in order to inform the host the
stub is stopped. TAAn...:r...;n...:r...;n...:r...;
AA = signal number
n... = register number (hex)
r... = register contents
n... = `thread'
r... = thread process ID. This is a hex integer.
n... = other string not starting with valid hex digit.
gdb should ignore this n,r pair and go on to the next.
This way we can extend the protocol. */
static void
stub_is_stopped(int sigval)
{
char *ptr = remcomOutBuffer;
int regno;
unsigned int reg_cont;
int status;
/* Send trap type (converted to signal) */
*ptr++ = 'T';
ptr = hex_byte_pack(ptr, sigval);
/* Send register contents. We probably only need to send the
* PC, frame pointer and stack pointer here. Other registers will be
* explicitly asked for. But for now, send all.
*/
for (regno = R0; regno <= USP; regno++) {
/* Store n...:r...; for the registers in the buffer. */
status = read_register (regno, ®_cont);
if (status == SUCCESS) {
ptr = hex_byte_pack(ptr, regno);
*ptr++ = ':';
ptr = mem2hex(ptr, (unsigned char *)®_cont,
register_size[regno]);
*ptr++ = ';';
}
}
#ifdef PROCESS_SUPPORT
/* Store the registers of the executing thread. Assume that both step,
continue, and register content requests are with respect to this
thread. The executing task is from the operating system scheduler. */
current_thread_c = executing_task;
current_thread_g = executing_task;
/* A struct assignment translates into a libc memcpy call. Avoid
all libc functions in order to prevent recursive break points. */
copy_registers (®_g, ®, sizeof(registers));
/* Store thread:r...; with the executing task TID. */
gdb_cris_strcpy (&remcomOutBuffer[pos], "thread:");
pos += gdb_cris_strlen ("thread:");
remcomOutBuffer[pos++] = hex_asc_hi(executing_task);
remcomOutBuffer[pos++] = hex_asc_lo(executing_task);
gdb_cris_strcpy (&remcomOutBuffer[pos], ";");
#endif
/* null-terminate and send it off */
*ptr = 0;
putpacket (remcomOutBuffer);
}
/* All expected commands are sent from remote.c. Send a response according
to the description in remote.c. */
static void
handle_exception (int sigval)
{
/* Avoid warning of not used. */
USEDFUN(handle_exception);
USEDVAR(internal_stack[0]);
/* Send response. */
stub_is_stopped (sigval);
for (;;) {
remcomOutBuffer[0] = '\0';
getpacket (remcomInBuffer);
switch (remcomInBuffer[0]) {
case 'g':
/* Read registers: g
Success: Each byte of register data is described by two hex digits.
Registers are in the internal order for GDB, and the bytes
in a register are in the same order the machine uses.
Failure: void. */
{
#ifdef PROCESS_SUPPORT
/* Use the special register content in the executing thread. */
copy_registers (®_g, ®, sizeof(registers));
/* Replace the content available on the stack. */
if (current_thread_g != executing_task) {
copy_registers_from_stack (current_thread_g, ®_g);
}
mem2hex ((unsigned char *)remcomOutBuffer, (unsigned char *)®_g, sizeof(registers));
#else
mem2hex(remcomOutBuffer, (char *)®, sizeof(registers));
#endif
}
break;
case 'G':
/* Write registers. GXX..XX
Each byte of register data is described by two hex digits.
Success: OK
Failure: void. */
#ifdef PROCESS_SUPPORT
hex2mem ((unsigned char *)®_g, &remcomInBuffer[1], sizeof(registers));
if (current_thread_g == executing_task) {
copy_registers (®, ®_g, sizeof(registers));
}
else {
copy_registers_to_stack(current_thread_g, ®_g);
}
#else
hex2mem((char *)®, &remcomInBuffer[1], sizeof(registers));
#endif
gdb_cris_strcpy (remcomOutBuffer, "OK");
break;
case 'P':
/* Write register. Pn...=r...
Write register n..., hex value without 0x, with value r...,
which contains a hex value without 0x and two hex digits
for each byte in the register (target byte order). P1f=11223344 means
set register 31 to 44332211.
Success: OK
Failure: E02, E05 */
{
char *suffix;
int regno = gdb_cris_strtol (&remcomInBuffer[1], &suffix, 16);
int status;
#ifdef PROCESS_SUPPORT
if (current_thread_g != executing_task)
status = write_stack_register (current_thread_g, regno, suffix+1);
else
#endif
status = write_register (regno, suffix+1);
switch (status) {
case E02:
/* Do not support read-only registers. */
gdb_cris_strcpy (remcomOutBuffer, error_message[E02]);
break;
case E05:
/* Do not support non-existing registers. */
gdb_cris_strcpy (remcomOutBuffer, error_message[E05]);
break;
case E07:
/* Do not support non-existing registers on the stack. */
gdb_cris_strcpy (remcomOutBuffer, error_message[E07]);
break;
default:
/* Valid register number. */
gdb_cris_strcpy (remcomOutBuffer, "OK");
break;
}
}
break;
case 'm':
/* Read from memory. mAA..AA,LLLL
AA..AA is the address and LLLL is the length.
Success: XX..XX is the memory content. Can be fewer bytes than
requested if only part of the data may be read. m6000120a,6c means
retrieve 108 byte from base address 6000120a.
Failure: void. */
{
char *suffix;
unsigned char *addr = (unsigned char *)gdb_cris_strtol(&remcomInBuffer[1],
&suffix, 16); int length = gdb_cris_strtol(suffix+1, 0, 16);
mem2hex(remcomOutBuffer, addr, length);
}
break;
case 'X':
/* Write to memory. XAA..AA,LLLL:XX..XX
AA..AA is the start address, LLLL is the number of bytes, and
XX..XX is the binary data.
Success: OK
Failure: void. */
case 'M':
/* Write to memory. MAA..AA,LLLL:XX..XX
AA..AA is the start address, LLLL is the number of bytes, and
XX..XX is the hexadecimal data.
Success: OK
Failure: void. */
{
char *lenptr;
char *dataptr;
unsigned char *addr = (unsigned char *)gdb_cris_strtol(&remcomInBuffer[1],
&lenptr, 16);
int length = gdb_cris_strtol(lenptr+1, &dataptr, 16);
if (*lenptr == ',' && *dataptr == ':') {
if (remcomInBuffer[0] == 'M') {
hex2mem(addr, dataptr + 1, length);
}
else /* X */ {
bin2mem(addr, dataptr + 1, length);
}
gdb_cris_strcpy (remcomOutBuffer, "OK");
}
else {
gdb_cris_strcpy (remcomOutBuffer, error_message[E06]);
}
}
break;
case 'c':
/* Continue execution. cAA..AA
AA..AA is the address where execution is resumed. If AA..AA is
omitted, resume at the present address.
Success: return to the executing thread.
Failure: will never know. */
if (remcomInBuffer[1] != '\0') {
reg.pc = gdb_cris_strtol (&remcomInBuffer[1], 0, 16);
}
enableDebugIRQ();
return;
case 's':
/* Step. sAA..AA
AA..AA is the address where execution is resumed. If AA..AA is
omitted, resume at the present address. Success: return to the
executing thread. Failure: will never know.
Should never be invoked. The single-step is implemented on
the host side. If ever invoked, it is an internal error E04. */
gdb_cris_strcpy (remcomOutBuffer, error_message[E04]);
putpacket (remcomOutBuffer);
return;
case '?':
/* The last signal which caused a stop. ?
Success: SAA, where AA is the signal number.
Failure: void. */
remcomOutBuffer[0] = 'S';
remcomOutBuffer[1] = hex_asc_hi(sigval);
remcomOutBuffer[2] = hex_asc_lo(sigval);
remcomOutBuffer[3] = 0;
break;
case 'D':
/* Detach from host. D
Success: OK, and return to the executing thread.
Failure: will never know */
putpacket ("OK");
return;
case 'k':
case 'r':
/* kill request or reset request.
Success: restart of target.
Failure: will never know. */
kill_restart ();
break;
case 'C':
case 'S':
case '!':
case 'R':
case 'd':
/* Continue with signal sig. Csig;AA..AA
Step with signal sig. Ssig;AA..AA
Use the extended remote protocol. !
Restart the target system. R0
Toggle debug flag. d
Search backwards. tAA:PP,MM
Not supported: E04 */
gdb_cris_strcpy (remcomOutBuffer, error_message[E04]);
break;
#ifdef PROCESS_SUPPORT
case 'T':
/* Thread alive. TXX
Is thread XX alive?
Success: OK, thread XX is alive.
Failure: E03, thread XX is dead. */
{
int thread_id = (int)gdb_cris_strtol (&remcomInBuffer[1], 0, 16);
/* Cannot tell whether it is alive or not. */
if (thread_id >= 0 && thread_id < number_of_tasks)
gdb_cris_strcpy (remcomOutBuffer, "OK");
}
break;
case 'H':
/* Set thread for subsequent operations: Hct
c = 'c' for thread used in step and continue;
t can be -1 for all threads.
c = 'g' for thread used in other operations.
t = 0 means pick any thread.
Success: OK
Failure: E01 */
{
int thread_id = gdb_cris_strtol (&remcomInBuffer[2], 0, 16);
if (remcomInBuffer[1] == 'c') {
/* c = 'c' for thread used in step and continue */
/* Do not change current_thread_c here. It would create a mess in
the scheduler. */
gdb_cris_strcpy (remcomOutBuffer, "OK");
}
else if (remcomInBuffer[1] == 'g') {
/* c = 'g' for thread used in other operations.
t = 0 means pick any thread. Impossible since the scheduler does
not allow that. */
if (thread_id >= 0 && thread_id < number_of_tasks) {
current_thread_g = thread_id;
gdb_cris_strcpy (remcomOutBuffer, "OK");
}
else {
/* Not expected - send an error message. */
gdb_cris_strcpy (remcomOutBuffer, error_message[E01]);
}
}
else {
/* Not expected - send an error message. */
gdb_cris_strcpy (remcomOutBuffer, error_message[E01]);
}
}
break;
case 'q':
case 'Q':
/* Query of general interest. qXXXX
Set general value XXXX. QXXXX=yyyy */
{
int pos;
int nextpos;
int thread_id;
switch (remcomInBuffer[1]) {
case 'C':
/* Identify the remote current thread. */
gdb_cris_strcpy (&remcomOutBuffer[0], "QC");
remcomOutBuffer[2] = hex_asc_hi(current_thread_c);
remcomOutBuffer[3] = hex_asc_lo(current_thread_c);
remcomOutBuffer[4] = '\0';
break;
case 'L':
gdb_cris_strcpy (&remcomOutBuffer[0], "QM");
/* Reply with number of threads. */
if (os_is_started()) {
remcomOutBuffer[2] = hex_asc_hi(number_of_tasks);
remcomOutBuffer[3] = hex_asc_lo(number_of_tasks);
}
else {
remcomOutBuffer[2] = hex_asc_hi(0);
remcomOutBuffer[3] = hex_asc_lo(1);
}
/* Done with the reply. */
remcomOutBuffer[4] = hex_asc_lo(1);
pos = 5;
/* Expects the argument thread id. */
for (; pos < (5 + HEXCHARS_IN_THREAD_ID); pos++)
remcomOutBuffer[pos] = remcomInBuffer[pos];
/* Reply with the thread identifiers. */
if (os_is_started()) {
/* Store the thread identifiers of all tasks. */
for (thread_id = 0; thread_id < number_of_tasks; thread_id++) {
nextpos = pos + HEXCHARS_IN_THREAD_ID - 1;
for (; pos < nextpos; pos ++)
remcomOutBuffer[pos] = hex_asc_lo(0);
remcomOutBuffer[pos++] = hex_asc_lo(thread_id);
}
}
else {
/* Store the thread identifier of the boot task. */
nextpos = pos + HEXCHARS_IN_THREAD_ID - 1;
for (; pos < nextpos; pos ++)
remcomOutBuffer[pos] = hex_asc_lo(0);
remcomOutBuffer[pos++] = hex_asc_lo(current_thread_c);
}
remcomOutBuffer[pos] = '\0';
break;
default:
/* Not supported: "" */
/* Request information about section offsets: qOffsets. */
remcomOutBuffer[0] = 0;
break;
}
}
break;
#endif /* PROCESS_SUPPORT */
default:
/* The stub should ignore other request and send an empty
response ($#<checksum>). This way we can extend the protocol and GDB
can tell whether the stub it is talking to uses the old or the new. */
remcomOutBuffer[0] = 0;
break;
}
putpacket(remcomOutBuffer);
}
}
/* Performs a complete re-start from scratch. */
static void
kill_restart ()
{
machine_restart("");
}
/********************************** Breakpoint *******************************/
/* The hook for both a static (compiled) and a dynamic breakpoint set by GDB.
An internal stack is used by the stub. The register image of the caller is
stored in the structure register_image.
Interactive communication with the host is handled by handle_exception and
finally the register image is restored. */
void kgdb_handle_breakpoint(void);
asm ("
.global kgdb_handle_breakpoint
kgdb_handle_breakpoint:
;;
;; Response to the break-instruction
;;
;; Create a register image of the caller
;;
move $dccr,[reg+0x5E] ; Save the flags in DCCR before disable interrupts
di ; Disable interrupts
move.d $r0,[reg] ; Save R0
move.d $r1,[reg+0x04] ; Save R1
move.d $r2,[reg+0x08] ; Save R2
move.d $r3,[reg+0x0C] ; Save R3
move.d $r4,[reg+0x10] ; Save R4
move.d $r5,[reg+0x14] ; Save R5
move.d $r6,[reg+0x18] ; Save R6
move.d $r7,[reg+0x1C] ; Save R7
move.d $r8,[reg+0x20] ; Save R8
move.d $r9,[reg+0x24] ; Save R9
move.d $r10,[reg+0x28] ; Save R10
move.d $r11,[reg+0x2C] ; Save R11
move.d $r12,[reg+0x30] ; Save R12
move.d $r13,[reg+0x34] ; Save R13
move.d $sp,[reg+0x38] ; Save SP (R14)
;; Due to the old assembler-versions BRP might not be recognized
.word 0xE670 ; move brp,$r0
subq 2,$r0 ; Set to address of previous instruction.
move.d $r0,[reg+0x3c] ; Save the address in PC (R15)
clear.b [reg+0x40] ; Clear P0
move $vr,[reg+0x41] ; Save special register P1
clear.w [reg+0x42] ; Clear P4
move $ccr,[reg+0x44] ; Save special register CCR
move $mof,[reg+0x46] ; P7
clear.d [reg+0x4A] ; Clear P8
move $ibr,[reg+0x4E] ; P9,
move $irp,[reg+0x52] ; P10,
move $srp,[reg+0x56] ; P11,
move $dtp0,[reg+0x5A] ; P12, register BAR, assembler might not know BAR
; P13, register DCCR already saved
;; Due to the old assembler-versions BRP might not be recognized
.word 0xE670 ; move brp,r0
;; Static (compiled) breakpoints must return to the next instruction in order
;; to avoid infinite loops. Dynamic (gdb-invoked) must restore the instruction
;; in order to execute it when execution is continued.
test.b [is_dyn_brkp] ; Is this a dynamic breakpoint?
beq is_static ; No, a static breakpoint
nop
subq 2,$r0 ; rerun the instruction the break replaced
is_static:
moveq 1,$r1
move.b $r1,[is_dyn_brkp] ; Set the state variable to dynamic breakpoint
move.d $r0,[reg+0x62] ; Save the return address in BRP
move $usp,[reg+0x66] ; USP
;;
;; Handle the communication
;;
move.d internal_stack+1020,$sp ; Use the internal stack which grows upward
moveq 5,$r10 ; SIGTRAP
jsr handle_exception ; Interactive routine
;;
;; Return to the caller
;;
move.d [reg],$r0 ; Restore R0
move.d [reg+0x04],$r1 ; Restore R1
move.d [reg+0x08],$r2 ; Restore R2
move.d [reg+0x0C],$r3 ; Restore R3
move.d [reg+0x10],$r4 ; Restore R4
move.d [reg+0x14],$r5 ; Restore R5
move.d [reg+0x18],$r6 ; Restore R6
move.d [reg+0x1C],$r7 ; Restore R7
move.d [reg+0x20],$r8 ; Restore R8
move.d [reg+0x24],$r9 ; Restore R9
move.d [reg+0x28],$r10 ; Restore R10
move.d [reg+0x2C],$r11 ; Restore R11
move.d [reg+0x30],$r12 ; Restore R12
move.d [reg+0x34],$r13 ; Restore R13
;;
;; FIXME: Which registers should be restored?
;;
move.d [reg+0x38],$sp ; Restore SP (R14)
move [reg+0x56],$srp ; Restore the subroutine return pointer.
move [reg+0x5E],$dccr ; Restore DCCR
move [reg+0x66],$usp ; Restore USP
jump [reg+0x62] ; A jump to the content in register BRP works.
nop ;
");
/* The hook for an interrupt generated by GDB. An internal stack is used
by the stub. The register image of the caller is stored in the structure
register_image. Interactive communication with the host is handled by
handle_exception and finally the register image is restored. Due to the
old assembler which does not recognise the break instruction and the
breakpoint return pointer hex-code is used. */
void kgdb_handle_serial(void);
asm ("
.global kgdb_handle_serial
kgdb_handle_serial:
;;
;; Response to a serial interrupt
;;
move $dccr,[reg+0x5E] ; Save the flags in DCCR
di ; Disable interrupts
move.d $r0,[reg] ; Save R0
move.d $r1,[reg+0x04] ; Save R1
move.d $r2,[reg+0x08] ; Save R2
move.d $r3,[reg+0x0C] ; Save R3
move.d $r4,[reg+0x10] ; Save R4
move.d $r5,[reg+0x14] ; Save R5
move.d $r6,[reg+0x18] ; Save R6
move.d $r7,[reg+0x1C] ; Save R7
move.d $r8,[reg+0x20] ; Save R8
move.d $r9,[reg+0x24] ; Save R9
move.d $r10,[reg+0x28] ; Save R10
move.d $r11,[reg+0x2C] ; Save R11
move.d $r12,[reg+0x30] ; Save R12
move.d $r13,[reg+0x34] ; Save R13
move.d $sp,[reg+0x38] ; Save SP (R14)
move $irp,[reg+0x3c] ; Save the address in PC (R15)
clear.b [reg+0x40] ; Clear P0
move $vr,[reg+0x41] ; Save special register P1,
clear.w [reg+0x42] ; Clear P4
move $ccr,[reg+0x44] ; Save special register CCR
move $mof,[reg+0x46] ; P7
clear.d [reg+0x4A] ; Clear P8
move $ibr,[reg+0x4E] ; P9,
move $irp,[reg+0x52] ; P10,
move $srp,[reg+0x56] ; P11,
move $dtp0,[reg+0x5A] ; P12, register BAR, assembler might not know BAR
; P13, register DCCR already saved
;; Due to the old assembler-versions BRP might not be recognized
.word 0xE670 ; move brp,r0
move.d $r0,[reg+0x62] ; Save the return address in BRP
move $usp,[reg+0x66] ; USP
;; get the serial character (from debugport.c) and check if it is a ctrl-c
jsr getDebugChar
cmp.b 3, $r10
bne goback
nop
move.d [reg+0x5E], $r10 ; Get DCCR
btstq 8, $r10 ; Test the U-flag.
bmi goback
nop
;;
;; Handle the communication
;;
move.d internal_stack+1020,$sp ; Use the internal stack
moveq 2,$r10 ; SIGINT
jsr handle_exception ; Interactive routine
goback:
;;
;; Return to the caller
;;
move.d [reg],$r0 ; Restore R0
move.d [reg+0x04],$r1 ; Restore R1
move.d [reg+0x08],$r2 ; Restore R2
move.d [reg+0x0C],$r3 ; Restore R3
move.d [reg+0x10],$r4 ; Restore R4
move.d [reg+0x14],$r5 ; Restore R5
move.d [reg+0x18],$r6 ; Restore R6
move.d [reg+0x1C],$r7 ; Restore R7
move.d [reg+0x20],$r8 ; Restore R8
move.d [reg+0x24],$r9 ; Restore R9
move.d [reg+0x28],$r10 ; Restore R10
move.d [reg+0x2C],$r11 ; Restore R11
move.d [reg+0x30],$r12 ; Restore R12
move.d [reg+0x34],$r13 ; Restore R13
;;
;; FIXME: Which registers should be restored?
;;
move.d [reg+0x38],$sp ; Restore SP (R14)
move [reg+0x56],$srp ; Restore the subroutine return pointer.
move [reg+0x5E],$dccr ; Restore DCCR
move [reg+0x66],$usp ; Restore USP
reti ; Return from the interrupt routine
nop
");
/* Use this static breakpoint in the start-up only. */
void
breakpoint(void)
{
kgdb_started = 1;
is_dyn_brkp = 0; /* This is a static, not a dynamic breakpoint. */
__asm__ volatile ("break 8"); /* Jump to handle_breakpoint. */
}
/* initialize kgdb. doesn't break into the debugger, but sets up irq and ports */
void
kgdb_init(void)
{
/* could initialize debug port as well but it's done in head.S already... */
/* breakpoint handler is now set in irq.c */
set_int_vector(8, kgdb_handle_serial);
enableDebugIRQ();
}
/****************************** End of file **********************************/
| gpl-2.0 |
hiikezoe/android_kernel_sharp_is17sh | drivers/net/cxgb4/t4_hw.c | 7993 | 89030 | /*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/init.h>
#include <linux/delay.h>
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4fw_api.h"
/**
* t4_wait_op_done_val - wait until an operation is completed
* @adapter: the adapter performing the operation
* @reg: the register to check for completion
* @mask: a single-bit field within @reg that indicates completion
* @polarity: the value of the field when the operation is completed
* @attempts: number of check iterations
* @delay: delay in usecs between iterations
* @valp: where to store the value of the register at completion time
*
* Wait until an operation is completed by checking a bit in a register
* up to @attempts times. If @valp is not NULL the value of the register
* at the time it indicated completion is stored there. Returns 0 if the
* operation completes and -EAGAIN otherwise.
*/
static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay, u32 *valp)
{
while (1) {
u32 val = t4_read_reg(adapter, reg);
if (!!(val & mask) == polarity) {
if (valp)
*valp = val;
return 0;
}
if (--attempts == 0)
return -EAGAIN;
if (delay)
udelay(delay);
}
}
static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay)
{
return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
delay, NULL);
}
/**
* t4_set_reg_field - set a register field to a value
* @adapter: the adapter to program
* @addr: the register address
* @mask: specifies the portion of the register to modify
* @val: the new value for the register field
*
* Sets a register field specified by the supplied mask to the
* given value.
*/
void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
u32 val)
{
u32 v = t4_read_reg(adapter, addr) & ~mask;
t4_write_reg(adapter, addr, v | val);
(void) t4_read_reg(adapter, addr); /* flush */
}
/**
* t4_read_indirect - read indirectly addressed registers
* @adap: the adapter
* @addr_reg: register holding the indirect address
* @data_reg: register holding the value of the indirect register
* @vals: where the read register values are stored
* @nregs: how many indirect registers to read
* @start_idx: index of first indirect register to read
*
* Reads registers that are accessed indirectly through an address/data
* register pair.
*/
static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals,
unsigned int nregs, unsigned int start_idx)
{
while (nregs--) {
t4_write_reg(adap, addr_reg, start_idx);
*vals++ = t4_read_reg(adap, data_reg);
start_idx++;
}
}
/*
* Get the reply to a mailbox command and store it in @rpl in big-endian order.
*/
static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
u32 mbox_addr)
{
for ( ; nflit; nflit--, mbox_addr += 8)
*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
}
/*
* Handle a FW assertion reported in a mailbox.
*/
static void fw_asrt(struct adapter *adap, u32 mbox_addr)
{
struct fw_debug_cmd asrt;
get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
dev_alert(adap->pdev_dev,
"FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
}
static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
{
dev_err(adap->pdev_dev,
"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
(unsigned long long)t4_read_reg64(adap, data_reg),
(unsigned long long)t4_read_reg64(adap, data_reg + 8),
(unsigned long long)t4_read_reg64(adap, data_reg + 16),
(unsigned long long)t4_read_reg64(adap, data_reg + 24),
(unsigned long long)t4_read_reg64(adap, data_reg + 32),
(unsigned long long)t4_read_reg64(adap, data_reg + 40),
(unsigned long long)t4_read_reg64(adap, data_reg + 48),
(unsigned long long)t4_read_reg64(adap, data_reg + 56));
}
/**
* t4_wr_mbox_meat - send a command to FW through the given mailbox
* @adap: the adapter
* @mbox: index of the mailbox to use
* @cmd: the command to write
* @size: command length in bytes
* @rpl: where to optionally store the reply
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Sends the given command to FW through the selected mailbox and waits
* for the FW to execute the command. If @rpl is not %NULL it is used to
* store the FW's reply to the command. The command and its optional
* reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
* to respond. @sleep_ok determines whether we may sleep while awaiting
* the response. If sleeping is allowed we use progressive backoff
* otherwise we spin.
*
* The return value is 0 on success or a negative errno on failure. A
* failure can happen either because we are not able to execute the
* command or FW executes it but signals an error. In the latter case
* the return value is the error code indicated by FW (negated).
*/
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
static const int delay[] = {
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
u32 v;
u64 res;
int i, ms, delay_idx;
const __be64 *p = cmd;
u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
/*
* If the device is off-line, as in EEH, commands will time out.
* Fail them early so we don't waste time waiting.
*/
if (adap->pdev->error_state != pci_channel_io_normal)
return -EIO;
v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV)
return v ? -EBUSY : -ETIMEDOUT;
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
t4_read_reg(adap, ctl_reg); /* flush write */
delay_idx = 0;
ms = delay[0];
for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
delay_idx++;
msleep(ms);
} else
mdelay(ms);
v = t4_read_reg(adap, ctl_reg);
if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
if (!(v & MBMSGVALID)) {
t4_write_reg(adap, ctl_reg, 0);
continue;
}
res = t4_read_reg64(adap, data_reg);
if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
fw_asrt(adap, data_reg);
res = FW_CMD_RETVAL(EIO);
} else if (rpl)
get_mbox_rpl(adap, rpl, size / 8, data_reg);
if (FW_CMD_RETVAL_GET((int)res))
dump_mbox(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, 0);
return -FW_CMD_RETVAL_GET((int)res);
}
}
dump_mbox(adap, mbox, data_reg);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
return -ETIMEDOUT;
}
/**
* t4_mc_read - read from MC through backdoor accesses
* @adap: the adapter
* @addr: address of first byte requested
* @data: 64 bytes of data containing the requested address
* @ecc: where to store the corresponding 64-bit ECC word
*
* Read 64 bytes of data from MC starting at a 64-byte-aligned address
* that covers the requested address @addr. If @parity is not %NULL it
* is assigned the 64-bit ECC word for the read data.
*/
int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
{
int i;
if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
return -EBUSY;
t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
BIST_CMD_GAP(1));
i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
if (i)
return i;
#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
for (i = 15; i >= 0; i--)
*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
if (ecc)
*ecc = t4_read_reg64(adap, MC_DATA(16));
#undef MC_DATA
return 0;
}
/**
* t4_edc_read - read from EDC through backdoor accesses
* @adap: the adapter
* @idx: which EDC to access
* @addr: address of first byte requested
* @data: 64 bytes of data containing the requested address
* @ecc: where to store the corresponding 64-bit ECC word
*
* Read 64 bytes of data from EDC starting at a 64-byte-aligned address
* that covers the requested address @addr. If @parity is not %NULL it
* is assigned the 64-bit ECC word for the read data.
*/
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
{
int i;
idx *= EDC_STRIDE;
if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
return -EBUSY;
t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
t4_write_reg(adap, EDC_BIST_CMD + idx,
BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
if (i)
return i;
#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
for (i = 15; i >= 0; i--)
*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
if (ecc)
*ecc = t4_read_reg64(adap, EDC_DATA(16));
#undef EDC_DATA
return 0;
}
#define EEPROM_STAT_ADDR 0x7bfc
#define VPD_BASE 0
#define VPD_LEN 512
/**
* t4_seeprom_wp - enable/disable EEPROM write protection
* @adapter: the adapter
* @enable: whether to enable or disable write protection
*
* Enables or disables write protection on the serial EEPROM.
*/
int t4_seeprom_wp(struct adapter *adapter, bool enable)
{
unsigned int v = enable ? 0xc : 0;
int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
return ret < 0 ? ret : 0;
}
/**
* get_vpd_params - read VPD parameters from VPD EEPROM
* @adapter: adapter to read
* @p: where to store the parameters
*
* Reads card parameters stored in VPD EEPROM.
*/
static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
int i, ret;
int ec, sn;
u8 vpd[VPD_LEN], csum;
unsigned int vpdr_len, kw_offset, id_len;
ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
if (ret < 0)
return ret;
if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
dev_err(adapter->pdev_dev, "missing VPD ID string\n");
return -EINVAL;
}
id_len = pci_vpd_lrdt_size(vpd);
if (id_len > ID_LEN)
id_len = ID_LEN;
i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
if (i < 0) {
dev_err(adapter->pdev_dev, "missing VPD-R section\n");
return -EINVAL;
}
vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
if (vpdr_len + kw_offset > VPD_LEN) {
dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
return -EINVAL;
}
#define FIND_VPD_KW(var, name) do { \
var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
if (var < 0) { \
dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
return -EINVAL; \
} \
var += PCI_VPD_INFO_FLD_HDR_SIZE; \
} while (0)
FIND_VPD_KW(i, "RV");
for (csum = 0; i >= 0; i--)
csum += vpd[i];
if (csum) {
dev_err(adapter->pdev_dev,
"corrupted VPD EEPROM, actual csum %u\n", csum);
return -EINVAL;
}
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
strim(p->id);
memcpy(p->ec, vpd + ec, EC_LEN);
strim(p->ec);
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
return 0;
}
/* serial flash and firmware constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
/* flash command opcodes */
SF_PROG_PAGE = 2, /* program page */
SF_WR_DISABLE = 4, /* disable writes */
SF_RD_STATUS = 5, /* read status register */
SF_WR_ENABLE = 6, /* enable writes */
SF_RD_DATA_FAST = 0xb, /* read flash */
SF_RD_ID = 0x9f, /* read ID */
SF_ERASE_SECTOR = 0xd8, /* erase sector */
FW_MAX_SIZE = 512 * 1024,
};
/**
* sf1_read - read data from the serial flash
* @adapter: the adapter
* @byte_cnt: number of bytes to read
* @cont: whether another operation will be chained
* @lock: whether to lock SF for PL access only
* @valp: where to store the read data
*
* Reads up to 4 bytes of data from the serial flash. The location of
* the read needs to be specified prior to calling this by issuing the
* appropriate commands to the serial flash.
*/
static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
int lock, u32 *valp)
{
int ret;
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
if (t4_read_reg(adapter, SF_OP) & BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, SF_DATA);
return ret;
}
/**
* sf1_write - write data to the serial flash
* @adapter: the adapter
* @byte_cnt: number of bytes to write
* @cont: whether another operation will be chained
* @lock: whether to lock SF for PL access only
* @val: value to write
*
* Writes up to 4 bytes of data to the serial flash. The location of
* the write needs to be specified prior to calling this by issuing the
* appropriate commands to the serial flash.
*/
static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
int lock, u32 val)
{
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
if (t4_read_reg(adapter, SF_OP) & BUSY)
return -EBUSY;
cont = cont ? SF_CONT : 0;
lock = lock ? SF_LOCK : 0;
t4_write_reg(adapter, SF_DATA, val);
t4_write_reg(adapter, SF_OP, lock |
cont | BYTECNT(byte_cnt - 1) | OP_WR);
return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
}
/**
* flash_wait_op - wait for a flash operation to complete
* @adapter: the adapter
* @attempts: max number of polls of the status register
* @delay: delay between polls in ms
*
* Wait for a flash operation to complete by polling the status register.
*/
static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
{
int ret;
u32 status;
while (1) {
if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
(ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
return ret;
if (!(status & 1))
return 0;
if (--attempts == 0)
return -EAGAIN;
if (delay)
msleep(delay);
}
}
/**
* t4_read_flash - read words from serial flash
* @adapter: the adapter
* @addr: the start address for the read
* @nwords: how many 32-bit words to read
* @data: where to store the read data
* @byte_oriented: whether to store data as bytes or as words
*
* Read the specified number of 32-bit words from the serial flash.
* If @byte_oriented is set the read data is stored as a byte array
* (i.e., big-endian), otherwise as 32-bit words in the platform's
* natural endianess.
*/
static int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented)
{
int ret;
if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
return -EINVAL;
addr = swab32(addr) | SF_RD_DATA_FAST;
if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
(ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
return ret;
for ( ; nwords; nwords--, data++) {
ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
if (nwords == 1)
t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
if (ret)
return ret;
if (byte_oriented)
*data = htonl(*data);
}
return 0;
}
/**
* t4_write_flash - write up to a page of data to the serial flash
* @adapter: the adapter
* @addr: the start address to write
* @n: length of data to write in bytes
* @data: the data to write
*
* Writes up to a page of data (256 bytes) to the serial flash starting
* at the given address. All the data must be written to the same page.
*/
static int t4_write_flash(struct adapter *adapter, unsigned int addr,
unsigned int n, const u8 *data)
{
int ret;
u32 buf[64];
unsigned int i, c, left, val, offset = addr & 0xff;
if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
return -EINVAL;
val = swab32(addr) | SF_PROG_PAGE;
if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
(ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
goto unlock;
for (left = n; left; left -= c) {
c = min(left, 4U);
for (val = 0, i = 0; i < c; ++i)
val = (val << 8) + *data++;
ret = sf1_write(adapter, c, c != left, 1, val);
if (ret)
goto unlock;
}
ret = flash_wait_op(adapter, 8, 1);
if (ret)
goto unlock;
t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
/* Read the page to verify the write succeeded */
ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
if (ret)
return ret;
if (memcmp(data - n, (u8 *)buf + offset, n)) {
dev_err(adapter->pdev_dev,
"failed to correctly write the flash page at %#x\n",
addr);
return -EIO;
}
return 0;
unlock:
t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
return ret;
}
/**
* get_fw_version - read the firmware version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the FW version from flash.
*/
static int get_fw_version(struct adapter *adapter, u32 *vers)
{
return t4_read_flash(adapter, adapter->params.sf_fw_start +
offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
}
/**
* get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the TP microcode version from flash.
*/
static int get_tp_version(struct adapter *adapter, u32 *vers)
{
return t4_read_flash(adapter, adapter->params.sf_fw_start +
offsetof(struct fw_hdr, tp_microcode_ver),
1, vers, 0);
}
/**
* t4_check_fw_version - check if the FW is compatible with this driver
* @adapter: the adapter
*
* Checks if an adapter's FW is compatible with the driver. Returns 0
* if there's exact match, a negative error if the version could not be
* read or there's a major version mismatch, and a positive value if the
* expected major version is found but there's a minor version mismatch.
*/
int t4_check_fw_version(struct adapter *adapter)
{
u32 api_vers[2];
int ret, major, minor, micro;
ret = get_fw_version(adapter, &adapter->params.fw_vers);
if (!ret)
ret = get_tp_version(adapter, &adapter->params.tp_vers);
if (!ret)
ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
offsetof(struct fw_hdr, intfver_nic),
2, api_vers, 1);
if (ret)
return ret;
major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
memcpy(adapter->params.api_vers, api_vers,
sizeof(adapter->params.api_vers));
if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
dev_err(adapter->pdev_dev,
"card FW has major version %u, driver wants %u\n",
major, FW_VERSION_MAJOR);
return -EINVAL;
}
if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
return 0; /* perfect match */
/* Minor/micro version mismatch. Report it but often it's OK. */
return 1;
}
/**
* t4_flash_erase_sectors - erase a range of flash sectors
* @adapter: the adapter
* @start: the first sector to erase
* @end: the last sector to erase
*
* Erases the sectors in the given inclusive range.
*/
static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
{
int ret = 0;
while (start <= end) {
if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
(ret = sf1_write(adapter, 4, 0, 1,
SF_ERASE_SECTOR | (start << 8))) != 0 ||
(ret = flash_wait_op(adapter, 14, 500)) != 0) {
dev_err(adapter->pdev_dev,
"erase of flash sector %d failed, error %d\n",
start, ret);
break;
}
start++;
}
t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
return ret;
}
/**
* t4_load_fw - download firmware
* @adap: the adapter
* @fw_data: the firmware image to write
* @size: image size
*
* Write the supplied firmware image to the card's serial flash.
*/
int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
{
u32 csum;
int ret, addr;
unsigned int i;
u8 first_page[SF_PAGE_SIZE];
const u32 *p = (const u32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_img_start = adap->params.sf_fw_start;
unsigned int fw_start_sec = fw_img_start / sf_sec_size;
if (!size) {
dev_err(adap->pdev_dev, "FW image has no data\n");
return -EINVAL;
}
if (size & 511) {
dev_err(adap->pdev_dev,
"FW image size not multiple of 512 bytes\n");
return -EINVAL;
}
if (ntohs(hdr->len512) * 512 != size) {
dev_err(adap->pdev_dev,
"FW image size differs from size in FW header\n");
return -EINVAL;
}
if (size > FW_MAX_SIZE) {
dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
FW_MAX_SIZE);
return -EFBIG;
}
for (csum = 0, i = 0; i < size / sizeof(csum); i++)
csum += ntohl(p[i]);
if (csum != 0xffffffff) {
dev_err(adap->pdev_dev,
"corrupted firmware image, checksum %#x\n", csum);
return -EINVAL;
}
i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
if (ret)
goto out;
/*
* We write the correct version at the end so the driver can see a bad
* version if the FW write fails. Start by writing a copy of the
* first page with a bad version.
*/
memcpy(first_page, fw_data, SF_PAGE_SIZE);
((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
if (ret)
goto out;
addr = fw_img_start;
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
addr += SF_PAGE_SIZE;
fw_data += SF_PAGE_SIZE;
ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
if (ret)
goto out;
}
ret = t4_write_flash(adap,
fw_img_start + offsetof(struct fw_hdr, fw_ver),
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
out:
if (ret)
dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
ret);
return ret;
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
/**
* t4_link_start - apply link configuration to MAC/PHY
* @phy: the PHY to setup
* @mac: the MAC to setup
* @lc: the requested link configuration
*
* Set up a port's MAC and PHY according to a desired link configuration.
* - If the PHY can auto-negotiate first decide what to advertise, then
* enable/disable auto-negotiation as desired, and reset.
* - If the PHY does not auto-negotiate just reset it.
* - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
* otherwise do it later based on the outcome of auto-negotiation.
*/
int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
struct fw_port_cmd c;
unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
lc->link_ok = 0;
if (lc->requested_fc & PAUSE_RX)
fc |= FW_PORT_CAP_FC_RX;
if (lc->requested_fc & PAUSE_TX)
fc |= FW_PORT_CAP_FC_TX;
memset(&c, 0, sizeof(c));
c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else if (lc->autoneg == AUTONEG_DISABLE) {
c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
} else
c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_restart_aneg - restart autonegotiation
* @adap: the adapter
* @mbox: mbox to use for the FW command
* @port: the port id
*
* Restarts autonegotiation for the selected port.
*/
int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
{
struct fw_port_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
struct intr_info {
unsigned int mask; /* bits to check in interrupt status */
const char *msg; /* message to print or NULL */
short stat_idx; /* stat counter to increment or -1 */
unsigned short fatal; /* whether the condition reported is fatal */
};
/**
* t4_handle_intr_status - table driven interrupt handler
* @adapter: the adapter that generated the interrupt
* @reg: the interrupt status register to process
* @acts: table of interrupt actions
*
* A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the
* interrupts described by the mask have occurred. The actions include
* optionally emitting a warning or alert message. The table is terminated
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
*/
static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
const struct intr_info *acts)
{
int fatal = 0;
unsigned int mask = 0;
unsigned int status = t4_read_reg(adapter, reg);
for ( ; acts->mask; ++acts) {
if (!(status & acts->mask))
continue;
if (acts->fatal) {
fatal++;
dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
status & acts->mask);
} else if (acts->msg && printk_ratelimit())
dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
status & acts->mask);
mask |= acts->mask;
}
status &= mask;
if (status) /* clear processed interrupts */
t4_write_reg(adapter, reg, status);
return fatal;
}
/*
* Interrupt handler for the PCIE module.
*/
static void pcie_intr_handler(struct adapter *adapter)
{
static const struct intr_info sysbus_intr_info[] = {
{ RNPP, "RXNP array parity error", -1, 1 },
{ RPCP, "RXPC array parity error", -1, 1 },
{ RCIP, "RXCIF array parity error", -1, 1 },
{ RCCP, "Rx completions control array parity error", -1, 1 },
{ RFTP, "RXFT array parity error", -1, 1 },
{ 0 }
};
static const struct intr_info pcie_port_intr_info[] = {
{ TPCP, "TXPC array parity error", -1, 1 },
{ TNPP, "TXNP array parity error", -1, 1 },
{ TFTP, "TXFT array parity error", -1, 1 },
{ TCAP, "TXCA array parity error", -1, 1 },
{ TCIP, "TXCIF array parity error", -1, 1 },
{ RCAP, "RXCA array parity error", -1, 1 },
{ OTDD, "outbound request TLP discarded", -1, 1 },
{ RDPE, "Rx data parity error", -1, 1 },
{ TDUE, "Tx uncorrectable data error", -1, 1 },
{ 0 }
};
static const struct intr_info pcie_intr_info[] = {
{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
{ MSIDATAPERR, "MSI data parity error", -1, 1 },
{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
{ FIDPERR, "PCI FID parity error", -1, 1 },
{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
{ MATAGPERR, "PCI MA tag parity error", -1, 1 },
{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
{ RXWRPERR, "PCI Rx write parity error", -1, 1 },
{ RPLPERR, "PCI replay buffer parity error", -1, 1 },
{ PCIESINT, "PCI core secondary fault", -1, 1 },
{ PCIEPINT, "PCI core primary fault", -1, 1 },
{ UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
{ 0 }
};
int fat;
fat = t4_handle_intr_status(adapter,
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
sysbus_intr_info) +
t4_handle_intr_status(adapter,
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
pcie_port_intr_info) +
t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
if (fat)
t4_fatal_err(adapter);
}
/*
* TP interrupt handler.
*/
static void tp_intr_handler(struct adapter *adapter)
{
static const struct intr_info tp_intr_info[] = {
{ 0x3fffffff, "TP parity error", -1, 1 },
{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
t4_fatal_err(adapter);
}
/*
* SGE interrupt handler.
*/
static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE,
"SGE received CPL exceeding IQE size", -1, 1 },
{ ERR_INVALID_CIDX_INC,
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
{ ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
0 },
{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
0 },
{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
0 },
{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
0 },
{ ERR_ING_CTXT_PRIO,
"SGE too many priority ingress contexts", -1, 0 },
{ ERR_EGR_CTXT_PRIO,
"SGE too many priority egress contexts", -1, 0 },
{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
if (v) {
dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
(unsigned long long)v);
t4_write_reg(adapter, SGE_INT_CAUSE1, v);
t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
}
if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
v != 0)
t4_fatal_err(adapter);
}
/*
* CIM interrupt handler.
*/
static void cim_intr_handler(struct adapter *adapter)
{
static const struct intr_info cim_intr_info[] = {
{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
{ OBQPARERR, "CIM OBQ parity error", -1, 1 },
{ IBQPARERR, "CIM IBQ parity error", -1, 1 },
{ MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
{ MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
{ TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
{ 0 }
};
static const struct intr_info cim_upintr_info[] = {
{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
{ ILLWRINT, "CIM illegal write", -1, 1 },
{ ILLRDINT, "CIM illegal read", -1, 1 },
{ ILLRDBEINT, "CIM illegal read BE", -1, 1 },
{ ILLWRBEINT, "CIM illegal write BE", -1, 1 },
{ SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
{ SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
{ BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
{ SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
{ SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
{ BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
{ SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
{ SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
{ BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
{ BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
{ SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
{ SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
{ BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
{ BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
{ SGLRDPLINT , "CIM single read from PL space", -1, 1 },
{ SGLWRPLINT , "CIM single write to PL space", -1, 1 },
{ BLKRDPLINT , "CIM block read from PL space", -1, 1 },
{ BLKWRPLINT , "CIM block write to PL space", -1, 1 },
{ REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
{ RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
{ TIMEOUTINT , "CIM PIF timeout", -1, 1 },
{ TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
{ 0 }
};
int fat;
fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
cim_intr_info) +
t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
cim_upintr_info);
if (fat)
t4_fatal_err(adapter);
}
/*
* ULP RX interrupt handler.
*/
static void ulprx_intr_handler(struct adapter *adapter)
{
static const struct intr_info ulprx_intr_info[] = {
{ 0x1800000, "ULPRX context error", -1, 1 },
{ 0x7fffff, "ULPRX parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
t4_fatal_err(adapter);
}
/*
* ULP TX interrupt handler.
*/
static void ulptx_intr_handler(struct adapter *adapter)
{
static const struct intr_info ulptx_intr_info[] = {
{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
0 },
{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
0 },
{ PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
0 },
{ PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
0 },
{ 0xfffffff, "ULPTX parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
t4_fatal_err(adapter);
}
/*
* PM TX interrupt handler.
*/
static void pmtx_intr_handler(struct adapter *adapter)
{
static const struct intr_info pmtx_intr_info[] = {
{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
{ ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
{ PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
{ OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
{ DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
{ ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
{ C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
{ 0 }
};
if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
t4_fatal_err(adapter);
}
/*
* PM RX interrupt handler.
*/
static void pmrx_intr_handler(struct adapter *adapter)
{
static const struct intr_info pmrx_intr_info[] = {
{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
{ PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
{ DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
{ IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
{ E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
{ 0 }
};
if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
t4_fatal_err(adapter);
}
/*
* CPL switch interrupt handler.
*/
static void cplsw_intr_handler(struct adapter *adapter)
{
static const struct intr_info cplsw_intr_info[] = {
{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
{ SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
{ CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
{ ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
t4_fatal_err(adapter);
}
/*
* LE interrupt handler.
*/
static void le_intr_handler(struct adapter *adap)
{
static const struct intr_info le_intr_info[] = {
{ LIPMISS, "LE LIP miss", -1, 0 },
{ LIP0, "LE 0 LIP error", -1, 0 },
{ PARITYERR, "LE parity error", -1, 1 },
{ UNKNOWNCMD, "LE unknown command", -1, 1 },
{ REQQPARERR, "LE request queue parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
t4_fatal_err(adap);
}
/*
* MPS interrupt handler.
*/
static void mps_intr_handler(struct adapter *adapter)
{
static const struct intr_info mps_rx_intr_info[] = {
{ 0xffffff, "MPS Rx parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_tx_intr_info[] = {
{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
{ TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
{ BUBBLE, "MPS Tx underflow", -1, 1 },
{ SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
{ FRMERR, "MPS Tx framing error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_trc_intr_info[] = {
{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_stat_sram_intr_info[] = {
{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_stat_tx_intr_info[] = {
{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_stat_rx_intr_info[] = {
{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
{ 0 }
};
static const struct intr_info mps_cls_intr_info[] = {
{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
{ 0 }
};
int fat;
fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
mps_rx_intr_info) +
t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
mps_tx_intr_info) +
t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
mps_trc_intr_info) +
t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
mps_stat_sram_intr_info) +
t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
mps_stat_tx_intr_info) +
t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
mps_stat_rx_intr_info) +
t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
mps_cls_intr_info);
t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
RXINT | TXINT | STATINT);
t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
if (fat)
t4_fatal_err(adapter);
}
#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
/*
* EDC/MC interrupt handler.
*/
static void mem_intr_handler(struct adapter *adapter, int idx)
{
static const char name[3][5] = { "EDC0", "EDC1", "MC" };
unsigned int addr, cnt_addr, v;
if (idx <= MEM_EDC1) {
addr = EDC_REG(EDC_INT_CAUSE, idx);
cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
} else {
addr = MC_INT_CAUSE;
cnt_addr = MC_ECC_STATUS;
}
v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
if (v & PERR_INT_CAUSE)
dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
name[idx]);
if (v & ECC_CE_INT_CAUSE) {
u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
if (printk_ratelimit())
dev_warn(adapter->pdev_dev,
"%u %s correctable ECC data error%s\n",
cnt, name[idx], cnt > 1 ? "s" : "");
}
if (v & ECC_UE_INT_CAUSE)
dev_alert(adapter->pdev_dev,
"%s uncorrectable ECC data error\n", name[idx]);
t4_write_reg(adapter, addr, v);
if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
t4_fatal_err(adapter);
}
/*
* MA interrupt handler.
*/
static void ma_intr_handler(struct adapter *adap)
{
u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
if (status & MEM_PERR_INT_CAUSE)
dev_alert(adap->pdev_dev,
"MA parity error, parity status %#x\n",
t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
if (status & MEM_WRAP_INT_CAUSE) {
v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
dev_alert(adap->pdev_dev, "MA address wrap-around error by "
"client %u to address %#x\n",
MEM_WRAP_CLIENT_NUM_GET(v),
MEM_WRAP_ADDRESS_GET(v) << 4);
}
t4_write_reg(adap, MA_INT_CAUSE, status);
t4_fatal_err(adap);
}
/*
* SMB interrupt handler.
*/
static void smb_intr_handler(struct adapter *adap)
{
static const struct intr_info smb_intr_info[] = {
{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
t4_fatal_err(adap);
}
/*
* NC-SI interrupt handler.
*/
static void ncsi_intr_handler(struct adapter *adap)
{
static const struct intr_info ncsi_intr_info[] = {
{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
{ RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
t4_fatal_err(adap);
}
/*
* XGMAC interrupt handler.
*/
static void xgmac_intr_handler(struct adapter *adap, int port)
{
u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
if (!v)
return;
if (v & TXFIFO_PRTY_ERR)
dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
port);
if (v & RXFIFO_PRTY_ERR)
dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
port);
t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
t4_fatal_err(adap);
}
/*
* PL interrupt handler.
*/
static void pl_intr_handler(struct adapter *adap)
{
static const struct intr_info pl_intr_info[] = {
{ FATALPERR, "T4 fatal parity error", -1, 1 },
{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
{ 0 }
};
if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
t4_fatal_err(adap);
}
#define PF_INTR_MASK (PFSW)
#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
CPL_SWITCH | SGE | ULP_TX)
/**
* t4_slow_intr_handler - control path interrupt handler
* @adapter: the adapter
*
* T4 interrupt handler for non-data global interrupt events, e.g., errors.
* The designation 'slow' is because it involves register reads, while
* data interrupts typically don't involve any MMIOs.
*/
int t4_slow_intr_handler(struct adapter *adapter)
{
u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
if (!(cause & GLBL_INTR_MASK))
return 0;
if (cause & CIM)
cim_intr_handler(adapter);
if (cause & MPS)
mps_intr_handler(adapter);
if (cause & NCSI)
ncsi_intr_handler(adapter);
if (cause & PL)
pl_intr_handler(adapter);
if (cause & SMB)
smb_intr_handler(adapter);
if (cause & XGMAC0)
xgmac_intr_handler(adapter, 0);
if (cause & XGMAC1)
xgmac_intr_handler(adapter, 1);
if (cause & XGMAC_KR0)
xgmac_intr_handler(adapter, 2);
if (cause & XGMAC_KR1)
xgmac_intr_handler(adapter, 3);
if (cause & PCIE)
pcie_intr_handler(adapter);
if (cause & MC)
mem_intr_handler(adapter, MEM_MC);
if (cause & EDC0)
mem_intr_handler(adapter, MEM_EDC0);
if (cause & EDC1)
mem_intr_handler(adapter, MEM_EDC1);
if (cause & LE)
le_intr_handler(adapter);
if (cause & TP)
tp_intr_handler(adapter);
if (cause & MA)
ma_intr_handler(adapter);
if (cause & PM_TX)
pmtx_intr_handler(adapter);
if (cause & PM_RX)
pmrx_intr_handler(adapter);
if (cause & ULP_RX)
ulprx_intr_handler(adapter);
if (cause & CPL_SWITCH)
cplsw_intr_handler(adapter);
if (cause & SGE)
sge_intr_handler(adapter);
if (cause & ULP_TX)
ulptx_intr_handler(adapter);
/* Clear the interrupts just processed for which we are the master. */
t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
(void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
return 1;
}
/**
* t4_intr_enable - enable interrupts
* @adapter: the adapter whose interrupts should be enabled
*
* Enable PF-specific interrupts for the calling function and the top-level
* interrupt concentrator for global interrupts. Interrupts are already
* enabled at each module, here we just enable the roots of the interrupt
* hierarchies.
*
* Note: this function should be called only when the driver manages
* non PF-specific interrupts from the various HW modules. Only one PCI
* function at a time should be doing this.
*/
void t4_intr_enable(struct adapter *adapter)
{
u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
EGRESS_SIZE_ERR);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
}
/**
* t4_intr_disable - disable interrupts
* @adapter: the adapter whose interrupts should be disabled
*
* Disable interrupts. We only disable the top-level interrupt
* concentrators. The caller must be a PCI function managing global
* interrupts.
*/
void t4_intr_disable(struct adapter *adapter)
{
u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
}
/**
* hash_mac_addr - return the hash value of a MAC address
* @addr: the 48-bit Ethernet MAC address
*
* Hashes a MAC address according to the hash function used by HW inexact
* (hash) address matching.
*/
static int hash_mac_addr(const u8 *addr)
{
u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
a ^= b;
a ^= (a >> 12);
a ^= (a >> 6);
return a & 0x3f;
}
/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
* @mbox: mbox to use for the FW command
* @viid: virtual interface whose RSS subtable is to be written
* @start: start entry in the table to write
* @n: how many table entries to write
* @rspq: values for the response queue lookup table
* @nrspq: number of values in @rspq
*
* Programs the selected part of the VI's RSS mapping table with the
* provided values. If @nrspq < @n the supplied values are used repeatedly
* until the full table range is populated.
*
* The caller must ensure the values in @rspq are in the range allowed for
* @viid.
*/
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq)
{
int ret;
const u16 *rsp = rspq;
const u16 *rsp_end = rspq + nrspq;
struct fw_rss_ind_tbl_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE |
FW_RSS_IND_TBL_CMD_VIID(viid));
cmd.retval_len16 = htonl(FW_LEN16(cmd));
/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
while (n > 0) {
int nq = min(n, 32);
__be32 *qp = &cmd.iq0_to_iq2;
cmd.niqid = htons(nq);
cmd.startidx = htons(start);
start += nq;
n -= nq;
while (nq > 0) {
unsigned int v;
v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
if (++rsp >= rsp_end)
rsp = rspq;
*qp++ = htonl(v);
nq -= 3;
}
ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
if (ret)
return ret;
}
return 0;
}
/**
* t4_config_glbl_rss - configure the global RSS mode
* @adapter: the adapter
* @mbox: mbox to use for the FW command
* @mode: global RSS mode
* @flags: mode-specific flags
*
* Sets the global RSS mode.
*/
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
unsigned int flags)
{
struct fw_rss_glb_config_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_WRITE);
c.retval_len16 = htonl(FW_LEN16(c));
if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
c.u.basicvirtual.mode_pkd =
htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
} else
return -EINVAL;
return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
}
/**
* t4_tp_get_tcp_stats - read TP's TCP MIB counters
* @adap: the adapter
* @v4: holds the TCP/IP counter values
* @v6: holds the TCP/IPv6 counter values
*
* Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
* Either @v4 or @v6 may be %NULL to skip the corresponding stats.
*/
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6)
{
u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
#define STAT(x) val[STAT_IDX(x)]
#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
if (v4) {
t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
v4->tcpOutRsts = STAT(OUT_RST);
v4->tcpInSegs = STAT64(IN_SEG);
v4->tcpOutSegs = STAT64(OUT_SEG);
v4->tcpRetransSegs = STAT64(RXT_SEG);
}
if (v6) {
t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
v6->tcpOutRsts = STAT(OUT_RST);
v6->tcpInSegs = STAT64(IN_SEG);
v6->tcpOutSegs = STAT64(OUT_SEG);
v6->tcpRetransSegs = STAT64(RXT_SEG);
}
#undef STAT64
#undef STAT
#undef STAT_IDX
}
/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
* @mtus: where to store the MTU values
* @mtu_log: where to store the MTU base-2 log (may be %NULL)
*
* Reads the HW path MTU table.
*/
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
{
u32 v;
int i;
for (i = 0; i < NMTUS; ++i) {
t4_write_reg(adap, TP_MTU_TABLE,
MTUINDEX(0xff) | MTUVALUE(i));
v = t4_read_reg(adap, TP_MTU_TABLE);
mtus[i] = MTUVALUE_GET(v);
if (mtu_log)
mtu_log[i] = MTUWIDTH_GET(v);
}
}
/**
* init_cong_ctrl - initialize congestion control parameters
* @a: the alpha values for congestion control
* @b: the beta values for congestion control
*
* Initialize the congestion control parameters.
*/
static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
{
a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
a[9] = 2;
a[10] = 3;
a[11] = 4;
a[12] = 5;
a[13] = 6;
a[14] = 7;
a[15] = 8;
a[16] = 9;
a[17] = 10;
a[18] = 14;
a[19] = 17;
a[20] = 21;
a[21] = 25;
a[22] = 30;
a[23] = 35;
a[24] = 45;
a[25] = 60;
a[26] = 80;
a[27] = 100;
a[28] = 200;
a[29] = 300;
a[30] = 400;
a[31] = 500;
b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
b[9] = b[10] = 1;
b[11] = b[12] = 2;
b[13] = b[14] = b[15] = b[16] = 3;
b[17] = b[18] = b[19] = b[20] = b[21] = 4;
b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
b[28] = b[29] = 6;
b[30] = b[31] = 7;
}
/* The minimum additive increment value for the congestion control table */
#define CC_MIN_INCR 2U
/**
* t4_load_mtus - write the MTU and congestion control HW tables
* @adap: the adapter
* @mtus: the values for the MTU table
* @alpha: the values for the congestion control alpha parameter
* @beta: the values for the congestion control beta parameter
*
* Write the HW MTU table with the supplied MTUs and the high-speed
* congestion control table with the supplied alpha, beta, and MTUs.
* We write the two tables together because the additive increments
* depend on the MTUs.
*/
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta)
{
static const unsigned int avg_pkts[NCCTRL_WIN] = {
2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
28672, 40960, 57344, 81920, 114688, 163840, 229376
};
unsigned int i, w;
for (i = 0; i < NMTUS; ++i) {
unsigned int mtu = mtus[i];
unsigned int log2 = fls(mtu);
if (!(mtu & ((1 << log2) >> 2))) /* round */
log2--;
t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
MTUWIDTH(log2) | MTUVALUE(mtu));
for (w = 0; w < NCCTRL_WIN; ++w) {
unsigned int inc;
inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
CC_MIN_INCR);
t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
(w << 16) | (beta[w] << 13) | inc);
}
}
}
/**
* get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
*
* Returns a bitmap indicating which MPS buffer groups are associated
* with the given port. Bit i is set if buffer group i is used by the
* port.
*/
static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
{
u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
if (n == 0)
return idx == 0 ? 0xf : 0;
if (n == 1)
return idx < 2 ? (3 << (2 * idx)) : 0;
return 1 << idx;
}
/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
* @p: the stats structure to fill
*
* Collect statistics related to the given port from HW.
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
u32 bgmap = get_mps_bg_map(adap, idx);
#define GET_STAT(name) \
t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
p->tx_octets = GET_STAT(TX_PORT_BYTES);
p->tx_frames = GET_STAT(TX_PORT_FRAMES);
p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
p->tx_frames_64 = GET_STAT(TX_PORT_64B);
p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
p->tx_drop = GET_STAT(TX_PORT_DROP);
p->tx_pause = GET_STAT(TX_PORT_PAUSE);
p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
p->rx_octets = GET_STAT(RX_PORT_BYTES);
p->rx_frames = GET_STAT(RX_PORT_FRAMES);
p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
p->rx_frames_64 = GET_STAT(RX_PORT_64B);
p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
p->rx_pause = GET_STAT(RX_PORT_PAUSE);
p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
#undef GET_STAT
#undef GET_STAT_COM
}
/**
* t4_wol_magic_enable - enable/disable magic packet WoL
* @adap: the adapter
* @port: the physical port index
* @addr: MAC address expected in magic packets, %NULL to disable
*
* Enables/disables magic packet wake-on-LAN for the selected port.
*/
void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
const u8 *addr)
{
if (addr) {
t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
(addr[2] << 24) | (addr[3] << 16) |
(addr[4] << 8) | addr[5]);
t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
(addr[0] << 8) | addr[1]);
}
t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
addr ? MAGICEN : 0);
}
/**
* t4_wol_pat_enable - enable/disable pattern-based WoL
* @adap: the adapter
* @port: the physical port index
* @map: bitmap of which HW pattern filters to set
* @mask0: byte mask for bytes 0-63 of a packet
* @mask1: byte mask for bytes 64-127 of a packet
* @crc: Ethernet CRC for selected bytes
* @enable: enable/disable switch
*
* Sets the pattern filters indicated in @map to mask out the bytes
* specified in @mask0/@mask1 in received packets and compare the CRC of
* the resulting packet against @crc. If @enable is %true pattern-based
* WoL is enabled, otherwise disabled.
*/
int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
u64 mask0, u64 mask1, unsigned int crc, bool enable)
{
int i;
if (!enable) {
t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
PATEN, 0);
return 0;
}
if (map > 0xff)
return -EINVAL;
#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
t4_write_reg(adap, EPIO_REG(DATA2), mask1);
t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
if (!(map & 1))
continue;
/* write byte masks */
t4_write_reg(adap, EPIO_REG(DATA0), mask0);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
return -ETIMEDOUT;
/* write CRC */
t4_write_reg(adap, EPIO_REG(DATA0), crc);
t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
t4_read_reg(adap, EPIO_REG(OP)); /* flush */
if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
return -ETIMEDOUT;
}
#undef EPIO_REG
t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
return 0;
}
#define INIT_CMD(var, cmd, rd_wr) do { \
(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
FW_CMD_REQUEST | FW_CMD_##rd_wr); \
(var).retval_len16 = htonl(FW_LEN16(var)); \
} while (0)
/**
* t4_mdio_rd - read a PHY register through MDIO
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @phy_addr: the PHY address
* @mmd: the PHY MMD to access (0 for clause 22 PHYs)
* @reg: the register to read
* @valp: where to store the value
*
* Issues a FW command through the given mailbox to read a PHY register.
*/
int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 *valp)
{
int ret;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
c.cycles_to_len16 = htonl(FW_LEN16(c));
c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
FW_LDST_CMD_MMD(mmd));
c.u.mdio.raddr = htons(reg);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
*valp = ntohs(c.u.mdio.rval);
return ret;
}
/**
* t4_mdio_wr - write a PHY register through MDIO
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @phy_addr: the PHY address
* @mmd: the PHY MMD to access (0 for clause 22 PHYs)
* @reg: the register to write
* @valp: value to write
*
* Issues a FW command through the given mailbox to write a PHY register.
*/
int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
unsigned int mmd, unsigned int reg, u16 val)
{
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
c.cycles_to_len16 = htonl(FW_LEN16(c));
c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
FW_LDST_CMD_MMD(mmd));
c.u.mdio.raddr = htons(reg);
c.u.mdio.rval = htons(val);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_fw_hello - establish communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @evt_mbox: mailbox to receive async FW events
* @master: specifies the caller's willingness to be the device master
* @state: returns the current device state
*
* Issues a command to establish communication with FW.
*/
int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state)
{
int ret;
struct fw_hello_cmd c;
INIT_CMD(c, HELLO, WRITE);
c.err_to_mbasyncnot = htonl(
FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0 && state) {
u32 v = ntohl(c.err_to_mbasyncnot);
if (v & FW_HELLO_CMD_INIT)
*state = DEV_STATE_INIT;
else if (v & FW_HELLO_CMD_ERR)
*state = DEV_STATE_ERR;
else
*state = DEV_STATE_UNINIT;
}
return ret;
}
/**
* t4_fw_bye - end communication with FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
*
* Issues a command to terminate communication with FW.
*/
int t4_fw_bye(struct adapter *adap, unsigned int mbox)
{
struct fw_bye_cmd c;
INIT_CMD(c, BYE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_init_cmd - ask FW to initialize the device
* @adap: the adapter
* @mbox: mailbox to use for the FW command
*
* Issues a command to FW to partially initialize the device. This
* performs initialization that generally doesn't depend on user input.
*/
int t4_early_init(struct adapter *adap, unsigned int mbox)
{
struct fw_initialize_cmd c;
INIT_CMD(c, INITIALIZE, WRITE);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_fw_reset - issue a reset to FW
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @reset: specifies the type of reset to perform
*
* Issues a reset command of the specified type to FW.
*/
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
{
struct fw_reset_cmd c;
INIT_CMD(c, RESET, WRITE);
c.val = htonl(reset);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_query_params - query FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
* @vf: the VF
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
*
* Reads the value of FW or device parameters. Up to 7 parameters can be
* queried at once.
*/
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val)
{
int i, ret;
struct fw_params_cmd c;
__be32 *p = &c.param[0].mnem;
if (nparams > 7)
return -EINVAL;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
FW_PARAMS_CMD_VFN(vf));
c.retval_len16 = htonl(FW_LEN16(c));
for (i = 0; i < nparams; i++, p += 2)
*p = htonl(*params++);
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0)
for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
*val++ = ntohl(*p);
return ret;
}
/**
* t4_set_params - sets FW or device parameters
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF
* @vf: the VF
* @nparams: the number of parameters
* @params: the parameter names
* @val: the parameter values
*
* Sets the value of FW or device parameters. Up to 7 parameters can be
* specified at once.
*/
int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
const u32 *val)
{
struct fw_params_cmd c;
__be32 *p = &c.param[0].mnem;
if (nparams > 7)
return -EINVAL;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
FW_PARAMS_CMD_VFN(vf));
c.retval_len16 = htonl(FW_LEN16(c));
while (nparams--) {
*p++ = htonl(*params++);
*p++ = htonl(*val++);
}
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_cfg_pfvf - configure PF/VF resource limits
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF being configured
* @vf: the VF being configured
* @txq: the max number of egress queues
* @txq_eth_ctrl: the max number of egress Ethernet or control queues
* @rxqi: the max number of interrupt-capable ingress queues
* @rxq: the max number of interruptless ingress queues
* @tc: the PCI traffic class
* @vi: the max number of virtual interfaces
* @cmask: the channel access rights mask for the PF/VF
* @pmask: the port access rights mask for the PF/VF
* @nexact: the maximum number of exact MPS filters
* @rcaps: read capabilities
* @wxcaps: write/execute capabilities
*
* Configures resource limits and capabilities for a physical or virtual
* function.
*/
int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
unsigned int rxqi, unsigned int rxq, unsigned int tc,
unsigned int vi, unsigned int cmask, unsigned int pmask,
unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
{
struct fw_pfvf_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
FW_PFVF_CMD_VFN(vf));
c.retval_len16 = htonl(FW_LEN16(c));
c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
FW_PFVF_CMD_NIQ(rxq));
c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
FW_PFVF_CMD_PMASK(pmask) |
FW_PFVF_CMD_NEQ(txq));
c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
FW_PFVF_CMD_NEXACTF(nexact));
c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
FW_PFVF_CMD_WX_CAPS(wxcaps) |
FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_alloc_vi - allocate a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @port: physical port associated with the VI
* @pf: the PF owning the VI
* @vf: the VF owning the VI
* @nmac: number of MAC addresses needed (1 to 5)
* @mac: the MAC addresses of the VI
* @rss_size: size of RSS table slice associated with this VI
*
* Allocates a virtual interface for the given physical port. If @mac is
* not %NULL it contains the MAC addresses of the VI as assigned by FW.
* @mac should be large enough to hold @nmac Ethernet addresses, they are
* stored consecutively so the space needed is @nmac * 6 bytes.
* Returns a negative error number or the non-negative VI id.
*/
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
unsigned int *rss_size)
{
int ret;
struct fw_vi_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_CMD_EXEC |
FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
c.portid_pkd = FW_VI_CMD_PORTID(port);
c.nmac = nmac - 1;
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret)
return ret;
if (mac) {
memcpy(mac, c.mac, sizeof(c.mac));
switch (nmac) {
case 5:
memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
case 4:
memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
case 3:
memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
case 2:
memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
}
}
if (rss_size)
*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
}
/**
* t4_set_rxmode - set Rx properties of a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @mtu: the new MTU or -1
* @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
* @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
* @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
* @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
* @sleep_ok: if true we may sleep while awaiting command completion
*
* Sets Rx properties of a virtual interface.
*/
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok)
{
struct fw_vi_rxmode_cmd c;
/* convert to FW values */
if (mtu < 0)
mtu = FW_RXMODE_MTU_NO_CHG;
if (promisc < 0)
promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
if (all_multi < 0)
all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
if (bcast < 0)
bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
if (vlanex < 0)
vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
c.retval_len16 = htonl(FW_LEN16(c));
c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
/**
* t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @free: if true any existing filters for this VI id are first removed
* @naddr: the number of MAC addresses to allocate filters for (up to 7)
* @addr: the MAC address(es)
* @idx: where to store the index of each allocated filter
* @hash: pointer to hash address filter bitmap
* @sleep_ok: call is allowed to sleep
*
* Allocates an exact-match filter for each of the supplied addresses and
* sets it to the corresponding address. If @idx is not %NULL it should
* have at least @naddr entries, each of which will be set to the index of
* the filter allocated for the corresponding MAC address. If a filter
* could not be allocated for an address its index is set to 0xffff.
* If @hash is not %NULL addresses that fail to allocate an exact filter
* are hashed and update the hash filter bitmap pointed at by @hash.
*
* Returns a negative error number or the number of filters allocated.
*/
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
{
int i, ret;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p;
if (naddr > 7)
return -EINVAL;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
FW_VI_MAC_CMD_VIID(viid));
c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
FW_CMD_LEN16((naddr + 2) / 2));
for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
}
ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
if (ret)
return ret;
for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
if (idx)
idx[i] = index >= NEXACT_MAC ? 0xffff : index;
if (index < NEXACT_MAC)
ret++;
else if (hash)
*hash |= (1ULL << hash_mac_addr(addr[i]));
}
return ret;
}
/**
* t4_change_mac - modifies the exact-match filter for a MAC address
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @idx: index of existing filter for old value of MAC address, or -1
* @addr: the new MAC address value
* @persist: whether a new MAC allocation should be persistent
* @add_smt: if true also add the address to the HW SMT
*
* Modifies an exact-match filter and sets it to the new MAC address.
* Note that in general it is not possible to modify the value of a given
* filter so the generic way to modify an address filter is to free the one
* being used by the old address value and allocate a new filter for the
* new address value. @idx can be -1 if the address is a new addition.
*
* Returns a negative error number or the index of the filter with the new
* MAC value.
*/
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int idx, const u8 *addr, bool persist, bool add_smt)
{
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
FW_VI_MAC_CMD_SMAC_RESULT(mode) |
FW_VI_MAC_CMD_IDX(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret == 0) {
ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
if (ret >= NEXACT_MAC)
ret = -ENOMEM;
}
return ret;
}
/**
* t4_set_addr_hash - program the MAC inexact-match hash filter
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @ucast: whether the hash filter should also match unicast addresses
* @vec: the value to be written to the hash filter
* @sleep_ok: call is allowed to sleep
*
* Sets the 64-bit inexact-match hash filter for a virtual interface.
*/
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok)
{
struct fw_vi_mac_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
FW_VI_MAC_CMD_HASHUNIEN(ucast) |
FW_CMD_LEN16(1));
c.u.hash.hashvec = cpu_to_be64(vec);
return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
}
/**
* t4_enable_vi - enable/disable a virtual interface
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @rx_en: 1=enable Rx, 0=disable Rx
* @tx_en: 1=enable Tx, 0=disable Tx
*
* Enables/disables a virtual interface.
*/
int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool rx_en, bool tx_en)
{
struct fw_vi_enable_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_identify_port - identify a VI's port by blinking its LED
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @viid: the VI id
* @nblinks: how many times to blink LED at 2.5 Hz
*
* Identifies a VI's port by blinking its LED.
*/
int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
unsigned int nblinks)
{
struct fw_vi_enable_cmd c;
c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
c.blinkdur = htons(nblinks);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_iq_free - free an ingress queue and its FLs
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the queues
* @vf: the VF owning the queues
* @iqtype: the ingress queue type
* @iqid: ingress queue id
* @fl0id: FL0 queue id or 0xffff if no attached FL0
* @fl1id: FL1 queue id or 0xffff if no attached FL1
*
* Frees an ingress queue and its associated FLs, if any.
*/
int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int iqtype, unsigned int iqid,
unsigned int fl0id, unsigned int fl1id)
{
struct fw_iq_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
FW_IQ_CMD_VFN(vf));
c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
c.iqid = htons(iqid);
c.fl0id = htons(fl0id);
c.fl1id = htons(fl1id);
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_eth_eq_free - free an Ethernet egress queue
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the queue
* @vf: the VF owning the queue
* @eqid: egress queue id
*
* Frees an Ethernet egress queue.
*/
int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid)
{
struct fw_eq_eth_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
FW_EQ_ETH_CMD_VFN(vf));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_ctrl_eq_free - free a control egress queue
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the queue
* @vf: the VF owning the queue
* @eqid: egress queue id
*
* Frees a control egress queue.
*/
int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid)
{
struct fw_eq_ctrl_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
FW_EQ_CTRL_CMD_VFN(vf));
c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_ofld_eq_free - free an offload egress queue
* @adap: the adapter
* @mbox: mailbox to use for the FW command
* @pf: the PF owning the queue
* @vf: the VF owning the queue
* @eqid: egress queue id
*
* Frees a control egress queue.
*/
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid)
{
struct fw_eq_ofld_cmd c;
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
FW_EQ_OFLD_CMD_VFN(vf));
c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
/**
* t4_handle_fw_rpl - process a FW reply message
* @adap: the adapter
* @rpl: start of the FW message
*
* Processes a FW message, such as link state change messages.
*/
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
{
u8 opcode = *(const u8 *)rpl;
if (opcode == FW_PORT_CMD) { /* link/module state change message */
int speed = 0, fc = 0;
const struct fw_port_cmd *p = (void *)rpl;
int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
int port = adap->chan_map[chan];
struct port_info *pi = adap2pinfo(adap, port);
struct link_config *lc = &pi->link_cfg;
u32 stat = ntohl(p->u.info.lstatus_to_modtype);
int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
if (stat & FW_PORT_CMD_RXPAUSE)
fc |= PAUSE_RX;
if (stat & FW_PORT_CMD_TXPAUSE)
fc |= PAUSE_TX;
if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
speed = SPEED_100;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
speed = SPEED_1000;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
speed = SPEED_10000;
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) { /* something changed */
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
t4_os_link_changed(adap, port, link_ok);
}
if (mod != pi->mod_type) {
pi->mod_type = mod;
t4_os_portmod_changed(adap, port);
}
}
return 0;
}
static void __devinit get_pci_mode(struct adapter *adapter,
struct pci_params *p)
{
u16 val;
u32 pcie_cap = pci_pcie_cap(adapter->pdev);
if (pcie_cap) {
pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
&val);
p->speed = val & PCI_EXP_LNKSTA_CLS;
p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
}
}
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
* @caps: link capabilities
*
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
static void __devinit init_link_config(struct link_config *lc,
unsigned int caps)
{
lc->supported = caps;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
if (lc->supported & FW_PORT_CAP_ANEG) {
lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
lc->advertising = 0;
lc->autoneg = AUTONEG_DISABLE;
}
}
int t4_wait_dev_ready(struct adapter *adap)
{
if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
return 0;
msleep(500);
return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
}
static int __devinit get_flash_params(struct adapter *adap)
{
int ret;
u32 info;
ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
if (!ret)
ret = sf1_read(adap, 3, 0, 1, &info);
t4_write_reg(adap, SF_OP, 0); /* unlock SF */
if (ret)
return ret;
if ((info & 0xff) != 0x20) /* not a Numonix flash */
return -EINVAL;
info >>= 16; /* log2 of size */
if (info >= 0x14 && info < 0x18)
adap->params.sf_nsec = 1 << (info - 16);
else if (info == 0x18)
adap->params.sf_nsec = 64;
else
return -EINVAL;
adap->params.sf_size = 1 << info;
adap->params.sf_fw_start =
t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
return 0;
}
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
* @reset: if true perform a HW reset
*
* Initialize adapter SW state for the various HW modules, set initial
* values for some adapter tunables, take PHYs out of reset, and
* initialize the MDIO interface.
*/
int __devinit t4_prep_adapter(struct adapter *adapter)
{
int ret;
ret = t4_wait_dev_ready(adapter);
if (ret < 0)
return ret;
get_pci_mode(adapter, &adapter->params.pci);
adapter->params.rev = t4_read_reg(adapter, PL_REV);
ret = get_flash_params(adapter);
if (ret < 0) {
dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
return ret;
}
ret = get_vpd_params(adapter, &adapter->params.vpd);
if (ret < 0)
return ret;
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
* Default port for debugging in case we can't reach FW.
*/
adapter->params.nports = 1;
adapter->params.portvec = 1;
return 0;
}
int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
int ret, i, j = 0;
struct fw_port_cmd c;
struct fw_rss_vi_config_cmd rvc;
memset(&c, 0, sizeof(c));
memset(&rvc, 0, sizeof(rvc));
for_each_port(adap, i) {
unsigned int rss_size;
struct port_info *p = adap2pinfo(adap, i);
while ((adap->params.portvec & (1 << j)) == 0)
j++;
c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
FW_CMD_REQUEST | FW_CMD_READ |
FW_PORT_CMD_PORTID(j));
c.action_to_len16 = htonl(
FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
FW_LEN16(c));
ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
if (ret)
return ret;
ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
if (ret < 0)
return ret;
p->viid = ret;
p->tx_chan = j;
p->lport = j;
p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
adap->port[i]->dev_id = j;
ret = ntohl(c.u.info.lstatus_to_modtype);
p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
p->mod_type = FW_PORT_MOD_TYPE_NA;
rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
FW_CMD_REQUEST | FW_CMD_READ |
FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
rvc.retval_len16 = htonl(FW_LEN16(rvc));
ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
if (ret)
return ret;
p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
j++;
}
return 0;
}
| gpl-2.0 |
civato/V30B-SithLord | net/sched/sch_blackhole.c | 14137 | 1240 | /*
* net/sched/sch_blackhole.c Black hole queue
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Thomas Graf <tgraf@suug.ch>
*
* Note: Quantum tunneling is not supported.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
qdisc_drop(skb, sch);
return NET_XMIT_SUCCESS;
}
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
{
return NULL;
}
static struct Qdisc_ops blackhole_qdisc_ops __read_mostly = {
.id = "blackhole",
.priv_size = 0,
.enqueue = blackhole_enqueue,
.dequeue = blackhole_dequeue,
.peek = blackhole_dequeue,
.owner = THIS_MODULE,
};
static int __init blackhole_module_init(void)
{
return register_qdisc(&blackhole_qdisc_ops);
}
static void __exit blackhole_module_exit(void)
{
unregister_qdisc(&blackhole_qdisc_ops);
}
module_init(blackhole_module_init)
module_exit(blackhole_module_exit)
MODULE_LICENSE("GPL");
| gpl-2.0 |
Tekcafe/Test-kernel | drivers/broadcast/oneseg/tcc3530/tcc353x_hal.c | 58 | 6462 |
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include "tcpal_os.h"
#include "tcc353x_hal.h"
#ifndef _MODEL_L05E_
#define INCLUDE_LGE_SRC_EAR_ANT_SEL
#endif
#ifdef _MODEL_L05E_
#include "../../../../arch/arm/mach-msm/lge/L05E/board-L05E.h" /* PM8921_GPIO_PM_TO_SYS() */
#endif
#ifndef _MODEL_L05E_
#define ISDB_EN 85 /* GPIO 85 */
#endif
#define ISDB_INT_N 77 /* GPIO 77 */
#define ISDB_RESET_N 1 /* GPIO 1 */
#ifdef INCLUDE_LGE_SRC_EAR_ANT_SEL
#define ONESEG_EAR_ANT_SEL_P PM8921_GPIO_PM_TO_SYS(11) /* Internel/Ear antenna switch */
#endif
#ifdef _MODEL_L05E_
static struct regulator *reg_s4; //vdd 1.8V
static int pm8921_s4_mode = -1;
static struct regulator *reg_l10; //vdd 2.8V
static int pm8921_l10_mode = -1;
static struct regulator *reg_l29; //1.8v
static int pm8921_l29_mode = -1;
static int power_set_for_pm8921_s4(unsigned char onoff)
{
int rc = -EINVAL;
if(!reg_s4) {
reg_s4 = regulator_get(NULL, "8921_s4");
if (IS_ERR(reg_s4)) {
pr_err("%s: line: %d, vreg_get failed (%ld)\n",
__func__, __LINE__, PTR_ERR(reg_s4));
rc = PTR_ERR(reg_s4);
return rc;
}
}
if (onoff)
{
rc = regulator_set_voltage(reg_s4, 1800000, 1800000);
if (rc) {
pr_err("%s: line: %d, unable to set pm8921_s4 voltage to 1.8 V\n",__func__,__LINE__);
goto vreg_s4_fail;
}
rc = regulator_enable(reg_s4);
if (rc) {
pr_err("%s: line: %d, vreg_enable failed %d\n", __func__, __LINE__, rc);
goto vreg_s4_fail;
}
pm8921_s4_mode = 0;
}
else
{
if(pm8921_s4_mode == 0)
{
rc = regulator_disable(reg_s4);
if (rc) {
pr_err("%s: line: %d, vreg_disable failed %d\n",__func__, __LINE__, rc);
goto vreg_s4_fail;
}
pm8921_s4_mode = -1;
}
}
printk(KERN_INFO "%s: line: %d\n", __func__, __LINE__);
return 0;
vreg_s4_fail:
regulator_put(reg_s4);
reg_s4 = NULL;
pm8921_s4_mode = -1;
return rc;
}
static int power_set_for_pm8921_l10(unsigned char onoff)
{
int rc = -EINVAL;
if(!reg_l10) {
reg_l10 = regulator_get(NULL, "8921_l10");
if (IS_ERR(reg_l10)) {
pr_err("%s: line: %d, vreg_get failed (%ld)\n",
__func__, __LINE__, PTR_ERR(reg_l10));
rc = PTR_ERR(reg_l10);
return rc;
}
}
if (onoff)
{
rc = regulator_set_voltage(reg_l10, 2800000, 2800000);
if (rc) {
pr_err("%s: line: %d, unable to set pm8921_l10 voltage to 2.8 V\n",__func__,__LINE__);
goto vreg_l10_fail;
}
rc = regulator_enable(reg_l10);
if (rc) {
pr_err("%s: line: %d, vreg_enable failed %d\n", __func__, __LINE__, rc);
goto vreg_l10_fail;
}
pm8921_l10_mode = 0;
}
else
{
if(pm8921_l10_mode == 0)
{
rc = regulator_disable(reg_l10);
if (rc) {
pr_err("%s: line: %d, vreg_disable failed %d\n",__func__, __LINE__, rc);
goto vreg_l10_fail;
}
pm8921_l10_mode = -1;
}
}
printk(KERN_INFO "%s: line: %d\n", __func__, __LINE__);
return 0;
vreg_l10_fail:
regulator_put(reg_l10);
reg_l10 = NULL;
pm8921_l10_mode = -1;
return rc;
}
static int power_set_for_pm8921_l29(unsigned char onoff)
{
int rc = -EINVAL;
if(!reg_l29) {
reg_l29 = regulator_get(NULL, "8921_l29");
if (IS_ERR(reg_l29)) {
pr_err("%s: line: %d, vreg_get failed (%ld)\n",
__func__, __LINE__, PTR_ERR(reg_l29));
rc = PTR_ERR(reg_l29);
return rc;
}
}
if (onoff)
{
rc = regulator_set_voltage(reg_l29, 1800000, 1800000);
if (rc) {
pr_err("%s: line: %d, unable to set pm8921_l29 voltage to 1.8 V\n",__func__,__LINE__);
goto vreg_l29_fail;
}
rc = regulator_enable(reg_l29);
if (rc) {
pr_err("%s: line: %d, vreg_enable failed %d\n", __func__, __LINE__, rc);
goto vreg_l29_fail;
}
pm8921_l29_mode = 0;
}
else
{
if(pm8921_l29_mode == 0)
{
rc = regulator_disable(reg_l29);
if (rc) {
pr_err("%s: line: %d, vreg_disable failed %d\n",__func__, __LINE__, rc);
goto vreg_l29_fail;
}
pm8921_l29_mode = -1;
}
}
printk(KERN_INFO "%s: line: %d\n", __func__, __LINE__);
return 0;
vreg_l29_fail:
regulator_put(reg_l29);
reg_l29 = NULL;
pm8921_l29_mode = -1;
return rc;
}
#endif
void TchalInit(void)
{
gpio_request(ISDB_RESET_N, "ISDB_RESET");
#ifdef _MODEL_L05E_
#else
gpio_request(ISDB_EN, "ISDB_EN");
#endif
gpio_request(ISDB_INT_N, "ISDB_INT");
#ifdef INCLUDE_LGE_SRC_EAR_ANT_SEL
/* Internel antenna:OFF, Ear antenna: ON, GPIO11:LOW (Saving power)*/
gpio_set_value_cansleep(ONESEG_EAR_ANT_SEL_P, 0); /* PMIC Extended GPIO */
#endif
gpio_direction_output(ISDB_RESET_N, false); /* output low */
#ifdef _MODEL_L05E_
power_set_for_pm8921_s4(0);
power_set_for_pm8921_l10(0);
power_set_for_pm8921_l29(0);
#else
gpio_direction_output(ISDB_EN, false); /* output low */
#endif
gpio_direction_input(ISDB_INT_N); /* input */
TcpalPrintStatus((I08S *)"[%s:%d]\n", __func__, __LINE__);
}
void TchalResetDevice(void)
{
gpio_set_value(ISDB_RESET_N, 1); /* high ISDB_RESET_N */
TcpalmSleep(5);
gpio_set_value(ISDB_RESET_N, 0); /* low ISDB_RESET_N */
TcpalmSleep(5);
gpio_set_value(ISDB_RESET_N, 1); /* high ISDB_RESET_N */
TcpalmSleep(5);
TcpalPrintStatus((I08S *)"[%s:%d]\n", __func__, __LINE__);
}
void TchalPowerOnDevice(void)
{
#ifdef INCLUDE_LGE_SRC_EAR_ANT_SEL
/* Internel antenna:ON, Ear antenna: OFF, GPIO11: HIGH (Default: Use Internel Antenna )*/
gpio_set_value_cansleep(ONESEG_EAR_ANT_SEL_P, 1); /* PMIC Extended GPIO */
#endif
#ifndef _MODEL_L05E_
gpio_direction_output(ISDB_EN, false); /* output low */
#endif
gpio_direction_output(ISDB_RESET_N, false); /* output low */
#ifdef _MODEL_L05E_
power_set_for_pm8921_s4(1);
power_set_for_pm8921_l10(1);
power_set_for_pm8921_l29(1);
#else
gpio_set_value(ISDB_EN, 1); /* high ISDB_EN */
#endif
TcpalmSleep(10);
TchalResetDevice();
TchalIrqSetup();
TcpalPrintStatus((I08S *)"[%s:%d]\n", __func__, __LINE__);
}
void TchalPowerDownDevice(void)
{
gpio_set_value(ISDB_RESET_N, 0); /* low ISDB_RESET_N */
TcpalmSleep(5);
#ifdef _MODEL_L05E_
power_set_for_pm8921_s4(0);
power_set_for_pm8921_l10(0);
power_set_for_pm8921_l29(0);
#else
gpio_set_value(ISDB_EN, 0); /* low ISDB_EN */
#endif
#ifdef INCLUDE_LGE_SRC_EAR_ANT_SEL
/* Internel antenna:OFF, Ear antenna: ON, GPIO11:LOW (Saving power)*/
gpio_set_value_cansleep(ONESEG_EAR_ANT_SEL_P, 0); /* PMIC Extended GPIO */
#endif
TcpalPrintStatus((I08S *)"[%s:%d]\n", __func__, __LINE__);
}
void TchalIrqSetup(void)
{
gpio_direction_input(ISDB_INT_N); /* input mode */
}
| gpl-2.0 |
Think-Silicon/linux-thinksilicon | drivers/infiniband/hw/qib/qib_sysfs.c | 314 | 21447 | /*
* Copyright (c) 2012 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/ctype.h>
#include "qib.h"
#include "qib_mad.h"
/* start of per-port functions */
/*
* Get/Set heartbeat enable. OR of 1=enabled, 2=auto
*/
static ssize_t show_hrtbt_enb(struct qib_pportdata *ppd, char *buf)
{
struct qib_devdata *dd = ppd->dd;
int ret;
ret = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_HRTBT);
ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
return ret;
}
static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
size_t count)
{
struct qib_devdata *dd = ppd->dd;
int ret;
u16 val;
ret = kstrtou16(buf, 0, &val);
if (ret) {
qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
return ret;
}
/*
* Set the "intentional" heartbeat enable per either of
* "Enable" and "Auto", as these are normally set together.
* This bit is consulted when leaving loopback mode,
* because entering loopback mode overrides it and automatically
* disables heartbeat.
*/
ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
return ret < 0 ? ret : count;
}
static ssize_t store_loopback(struct qib_pportdata *ppd, const char *buf,
size_t count)
{
struct qib_devdata *dd = ppd->dd;
int ret = count, r;
r = dd->f_set_ib_loopback(ppd, buf);
if (r < 0)
ret = r;
return ret;
}
static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
size_t count)
{
struct qib_devdata *dd = ppd->dd;
int ret;
u16 val;
ret = kstrtou16(buf, 0, &val);
if (ret) {
qib_dev_err(dd, "attempt to set invalid LED override\n");
return ret;
}
qib_set_led_override(ppd, val);
return count;
}
static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
{
ssize_t ret;
if (!ppd->statusp)
ret = -EINVAL;
else
ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
(unsigned long long) *(ppd->statusp));
return ret;
}
/*
* For userland compatibility, these offsets must remain fixed.
* They are strings for QIB_STATUS_*
*/
static const char * const qib_status_str[] = {
"Initted",
"",
"",
"",
"",
"Present",
"IB_link_up",
"IB_configured",
"",
"Fatal_Hardware_Error",
NULL,
};
static ssize_t show_status_str(struct qib_pportdata *ppd, char *buf)
{
int i, any;
u64 s;
ssize_t ret;
if (!ppd->statusp) {
ret = -EINVAL;
goto bail;
}
s = *(ppd->statusp);
*buf = '\0';
for (any = i = 0; s && qib_status_str[i]; i++) {
if (s & 1) {
/* if overflow */
if (any && strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
break;
if (strlcat(buf, qib_status_str[i], PAGE_SIZE) >=
PAGE_SIZE)
break;
any = 1;
}
s >>= 1;
}
if (any)
strlcat(buf, "\n", PAGE_SIZE);
ret = strlen(buf);
bail:
return ret;
}
/* end of per-port functions */
/*
* Start of per-port file structures and support code
* Because we are fitting into other infrastructure, we have to supply the
* full set of kobject/sysfs_ops structures and routines.
*/
#define QIB_PORT_ATTR(name, mode, show, store) \
static struct qib_port_attr qib_port_attr_##name = \
__ATTR(name, mode, show, store)
struct qib_port_attr {
struct attribute attr;
ssize_t (*show)(struct qib_pportdata *, char *);
ssize_t (*store)(struct qib_pportdata *, const char *, size_t);
};
QIB_PORT_ATTR(loopback, S_IWUSR, NULL, store_loopback);
QIB_PORT_ATTR(led_override, S_IWUSR, NULL, store_led_override);
QIB_PORT_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
store_hrtbt_enb);
QIB_PORT_ATTR(status, S_IRUGO, show_status, NULL);
QIB_PORT_ATTR(status_str, S_IRUGO, show_status_str, NULL);
static struct attribute *port_default_attributes[] = {
&qib_port_attr_loopback.attr,
&qib_port_attr_led_override.attr,
&qib_port_attr_hrtbt_enable.attr,
&qib_port_attr_status.attr,
&qib_port_attr_status_str.attr,
NULL
};
/*
* Start of per-port congestion control structures and support code
*/
/*
* Congestion control table size followed by table entries
*/
static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
int ret;
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_cc_kobj);
if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
return -EINVAL;
ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
+ sizeof(__be16);
if (pos > ret)
return -EINVAL;
if (count > ret - pos)
count = ret - pos;
if (!count)
return count;
spin_lock(&ppd->cc_shadow_lock);
memcpy(buf, ppd->ccti_entries_shadow, count);
spin_unlock(&ppd->cc_shadow_lock);
return count;
}
static void qib_port_release(struct kobject *kobj)
{
/* nothing to do since memory is freed by qib_free_devdata() */
}
static struct kobj_type qib_port_cc_ktype = {
.release = qib_port_release,
};
static struct bin_attribute cc_table_bin_attr = {
.attr = {.name = "cc_table_bin", .mode = 0444},
.read = read_cc_table_bin,
.size = PAGE_SIZE,
};
/*
* Congestion settings: port control, control map and an array of 16
* entries for the congestion entries - increase, timer, event log
* trigger threshold and the minimum injection rate delay.
*/
static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
{
int ret;
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_cc_kobj);
if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
return -EINVAL;
ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
if (pos > ret)
return -EINVAL;
if (count > ret - pos)
count = ret - pos;
if (!count)
return count;
spin_lock(&ppd->cc_shadow_lock);
memcpy(buf, ppd->congestion_entries_shadow, count);
spin_unlock(&ppd->cc_shadow_lock);
return count;
}
static struct bin_attribute cc_setting_bin_attr = {
.attr = {.name = "cc_settings_bin", .mode = 0444},
.read = read_cc_setting_bin,
.size = PAGE_SIZE,
};
static ssize_t qib_portattr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct qib_port_attr *pattr =
container_of(attr, struct qib_port_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
return pattr->show(ppd, buf);
}
static ssize_t qib_portattr_store(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t len)
{
struct qib_port_attr *pattr =
container_of(attr, struct qib_port_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, pport_kobj);
return pattr->store(ppd, buf, len);
}
static const struct sysfs_ops qib_port_ops = {
.show = qib_portattr_show,
.store = qib_portattr_store,
};
static struct kobj_type qib_port_ktype = {
.release = qib_port_release,
.sysfs_ops = &qib_port_ops,
.default_attrs = port_default_attributes
};
/* Start sl2vl */
#define QIB_SL2VL_ATTR(N) \
static struct qib_sl2vl_attr qib_sl2vl_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0444 }, \
.sl = N \
}
struct qib_sl2vl_attr {
struct attribute attr;
int sl;
};
QIB_SL2VL_ATTR(0);
QIB_SL2VL_ATTR(1);
QIB_SL2VL_ATTR(2);
QIB_SL2VL_ATTR(3);
QIB_SL2VL_ATTR(4);
QIB_SL2VL_ATTR(5);
QIB_SL2VL_ATTR(6);
QIB_SL2VL_ATTR(7);
QIB_SL2VL_ATTR(8);
QIB_SL2VL_ATTR(9);
QIB_SL2VL_ATTR(10);
QIB_SL2VL_ATTR(11);
QIB_SL2VL_ATTR(12);
QIB_SL2VL_ATTR(13);
QIB_SL2VL_ATTR(14);
QIB_SL2VL_ATTR(15);
static struct attribute *sl2vl_default_attributes[] = {
&qib_sl2vl_attr_0.attr,
&qib_sl2vl_attr_1.attr,
&qib_sl2vl_attr_2.attr,
&qib_sl2vl_attr_3.attr,
&qib_sl2vl_attr_4.attr,
&qib_sl2vl_attr_5.attr,
&qib_sl2vl_attr_6.attr,
&qib_sl2vl_attr_7.attr,
&qib_sl2vl_attr_8.attr,
&qib_sl2vl_attr_9.attr,
&qib_sl2vl_attr_10.attr,
&qib_sl2vl_attr_11.attr,
&qib_sl2vl_attr_12.attr,
&qib_sl2vl_attr_13.attr,
&qib_sl2vl_attr_14.attr,
&qib_sl2vl_attr_15.attr,
NULL
};
static ssize_t sl2vl_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct qib_sl2vl_attr *sattr =
container_of(attr, struct qib_sl2vl_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, sl2vl_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
return sprintf(buf, "%u\n", qibp->sl_to_vl[sattr->sl]);
}
static const struct sysfs_ops qib_sl2vl_ops = {
.show = sl2vl_attr_show,
};
static struct kobj_type qib_sl2vl_ktype = {
.release = qib_port_release,
.sysfs_ops = &qib_sl2vl_ops,
.default_attrs = sl2vl_default_attributes
};
/* End sl2vl */
/* Start diag_counters */
#define QIB_DIAGC_ATTR(N) \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = { .name = __stringify(N), .mode = 0664 }, \
.counter = offsetof(struct qib_ibport, n_##N) \
}
struct qib_diagc_attr {
struct attribute attr;
size_t counter;
};
QIB_DIAGC_ATTR(rc_resends);
QIB_DIAGC_ATTR(rc_acks);
QIB_DIAGC_ATTR(rc_qacks);
QIB_DIAGC_ATTR(rc_delayed_comp);
QIB_DIAGC_ATTR(seq_naks);
QIB_DIAGC_ATTR(rdma_seq);
QIB_DIAGC_ATTR(rnr_naks);
QIB_DIAGC_ATTR(other_naks);
QIB_DIAGC_ATTR(rc_timeouts);
QIB_DIAGC_ATTR(loop_pkts);
QIB_DIAGC_ATTR(pkt_drops);
QIB_DIAGC_ATTR(dmawait);
QIB_DIAGC_ATTR(unaligned);
QIB_DIAGC_ATTR(rc_dupreq);
QIB_DIAGC_ATTR(rc_seqnak);
static struct attribute *diagc_default_attributes[] = {
&qib_diagc_attr_rc_resends.attr,
&qib_diagc_attr_rc_acks.attr,
&qib_diagc_attr_rc_qacks.attr,
&qib_diagc_attr_rc_delayed_comp.attr,
&qib_diagc_attr_seq_naks.attr,
&qib_diagc_attr_rdma_seq.attr,
&qib_diagc_attr_rnr_naks.attr,
&qib_diagc_attr_other_naks.attr,
&qib_diagc_attr_rc_timeouts.attr,
&qib_diagc_attr_loop_pkts.attr,
&qib_diagc_attr_pkt_drops.attr,
&qib_diagc_attr_dmawait.attr,
&qib_diagc_attr_unaligned.attr,
&qib_diagc_attr_rc_dupreq.attr,
&qib_diagc_attr_rc_seqnak.attr,
NULL
};
static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct qib_diagc_attr *dattr =
container_of(attr, struct qib_diagc_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter));
}
static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t size)
{
struct qib_diagc_attr *dattr =
container_of(attr, struct qib_diagc_attr, attr);
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
u32 val;
int ret;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
*(u32 *)((char *) qibp + dattr->counter) = val;
return size;
}
static const struct sysfs_ops qib_diagc_ops = {
.show = diagc_attr_show,
.store = diagc_attr_store,
};
static struct kobj_type qib_diagc_ktype = {
.release = qib_port_release,
.sysfs_ops = &qib_diagc_ops,
.default_attrs = diagc_default_attributes
};
/* End diag_counters */
/* end of per-port file structures and support code */
/*
* Start of per-unit (or driver, in some cases, but replicated
* per unit) functions (these get a device *)
*/
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
if (!dd->boardname)
ret = -EINVAL;
else
ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
return ret;
}
static ssize_t show_version(struct device *device,
struct device_attribute *attr, char *buf)
{
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
}
static ssize_t show_boardversion(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
static ssize_t show_localbus_info(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
}
static ssize_t show_nctxts(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of user ports (contexts) available. */
/* The calculation below deals with a special case where
* cfgctxts is set to 1 on a single-port board. */
return scnprintf(buf, PAGE_SIZE, "%u\n",
(dd->first_user_ctxt > dd->cfgctxts) ? 0 :
(dd->cfgctxts - dd->first_user_ctxt));
}
static ssize_t show_nfreectxts(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
/* Return the number of free user ports (contexts) available. */
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
}
static ssize_t show_serial(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
buf[sizeof dd->serial] = '\0';
memcpy(buf, dd->serial, sizeof dd->serial);
strcat(buf, "\n");
return strlen(buf);
}
static ssize_t store_chip_reset(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
if (count < 5 || memcmp(buf, "reset", 5) || !dd->diag_client) {
ret = -EINVAL;
goto bail;
}
ret = qib_reset_device(dd->unit);
bail:
return ret < 0 ? ret : count;
}
static ssize_t show_logged_errs(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int idx, count;
/* force consistency with actual EEPROM */
if (qib_update_eeprom_log(dd) != 0)
return -ENXIO;
count = 0;
for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
dd->eep_st_errs[idx],
idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
}
return count;
}
/*
* Dump tempsense regs. in decimal, to ease shell-scripts.
*/
static ssize_t show_tempsense(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, ibdev.dev);
struct qib_devdata *dd = dd_from_dev(dev);
int ret;
int idx;
u8 regvals[8];
ret = -ENXIO;
for (idx = 0; idx < 8; ++idx) {
if (idx == 6)
continue;
ret = dd->f_tempsense_rd(dd, idx);
if (ret < 0)
break;
regvals[idx] = ret;
}
if (idx == 8)
ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
*(signed char *)(regvals),
*(signed char *)(regvals + 1),
regvals[2], regvals[3],
*(signed char *)(regvals + 5),
*(signed char *)(regvals + 7));
return ret;
}
/*
* end of per-unit (or driver, in some cases, but replicated
* per unit) functions
*/
/* start of per-unit file structures and support code */
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
static struct device_attribute *qib_attributes[] = {
&dev_attr_hw_rev,
&dev_attr_hca_type,
&dev_attr_board_id,
&dev_attr_version,
&dev_attr_nctxts,
&dev_attr_nfreectxts,
&dev_attr_serial,
&dev_attr_boardversion,
&dev_attr_logged_errors,
&dev_attr_tempsense,
&dev_attr_localbus_info,
&dev_attr_chip_reset,
};
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
struct kobject *kobj)
{
struct qib_pportdata *ppd;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
int ret;
if (!port_num || port_num > dd->num_pports) {
qib_dev_err(dd,
"Skipping infiniband class with invalid port %u\n",
port_num);
ret = -ENODEV;
goto bail;
}
ppd = &dd->pport[port_num - 1];
ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
"linkcontrol");
if (ret) {
qib_dev_err(dd,
"Skipping linkcontrol sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail;
}
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
"sl2vl");
if (ret) {
qib_dev_err(dd,
"Skipping sl2vl sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_link;
}
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
"diag_counters");
if (ret) {
qib_dev_err(dd,
"Skipping diag_counters sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_sl;
}
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
return 0;
ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
kobj, "CCMgtA");
if (ret) {
qib_dev_err(dd,
"Skipping Congestion Control sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_diagc;
}
kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
&cc_setting_bin_attr);
if (ret) {
qib_dev_err(dd,
"Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_cc;
}
ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
&cc_table_bin_attr);
if (ret) {
qib_dev_err(dd,
"Skipping Congestion Control table sysfs info, (err %d) port %u\n",
ret, port_num);
goto bail_cc_entry_bin;
}
qib_devinfo(dd->pcidev,
"IB%u: Congestion Control Agent enabled for port %d\n",
dd->unit, port_num);
return 0;
bail_cc_entry_bin:
sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
bail_cc:
kobject_put(&ppd->pport_cc_kobj);
bail_diagc:
kobject_put(&ppd->diagc_kobj);
bail_sl:
kobject_put(&ppd->sl2vl_kobj);
bail_link:
kobject_put(&ppd->pport_kobj);
bail:
return ret;
}
/*
* Register and create our files in /sys/class/infiniband.
*/
int qib_verbs_register_sysfs(struct qib_devdata *dd)
{
struct ib_device *dev = &dd->verbs_dev.ibdev;
int i, ret;
for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
ret = device_create_file(&dev->dev, qib_attributes[i]);
if (ret)
return ret;
}
return 0;
}
/*
* Unregister and remove our files in /sys/class/infiniband.
*/
void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
{
struct qib_pportdata *ppd;
int i;
for (i = 0; i < dd->num_pports; i++) {
ppd = &dd->pport[i];
if (qib_cc_table_size &&
ppd->congestion_entries_shadow) {
sysfs_remove_bin_file(&ppd->pport_cc_kobj,
&cc_setting_bin_attr);
sysfs_remove_bin_file(&ppd->pport_cc_kobj,
&cc_table_bin_attr);
kobject_put(&ppd->pport_cc_kobj);
}
kobject_put(&ppd->sl2vl_kobj);
kobject_put(&ppd->pport_kobj);
}
}
| gpl-2.0 |
evil-at-wow/cmangos-wotlk | dep/ACE_wrappers/ace/Stream.cpp | 314 | 17708 | // Stream.cpp
// $Id: Stream.cpp 90072 2010-05-04 21:34:39Z cbeaulac $
#ifndef ACE_STREAM_CPP
#define ACE_STREAM_CPP
//#include "ace/Module.h"
#include "ace/Stream.h"
#if !defined (ACE_LACKS_PRAGMA_ONCE)
# pragma once
#endif /* ACE_LACKS_PRAGMA_ONCE */
#include "ace/Stream_Modules.h"
#include "ace/OS_NS_string.h"
#if !defined (__ACE_INLINE__)
#include "ace/Stream.inl"
#endif /* __ACE_INLINE__ */
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
ACE_ALLOC_HOOK_DEFINE(ACE_Stream)
// Give some idea of what the heck is going on in a stream!
template <ACE_SYNCH_DECL> void
ACE_Stream<ACE_SYNCH_USE>::dump (void) const
{
#if defined (ACE_HAS_DUMP)
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::dump");
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("-------- module links --------\n")));
for (ACE_Module<ACE_SYNCH_USE> *mp = this->stream_head_;
;
mp = mp->next ())
{
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("module name = %s\n"), mp->name ()));
if (mp == this->stream_tail_)
break;
}
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("-------- writer links --------\n")));
ACE_Task<ACE_SYNCH_USE> *tp;
for (tp = this->stream_head_->writer ();
;
tp = tp->next ())
{
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("writer queue name = %s\n"), tp->name ()));
tp->dump ();
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("-------\n")));
if (tp == this->stream_tail_->writer ()
|| (this->linked_us_
&& tp == this->linked_us_->stream_head_->reader ()))
break;
}
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("-------- reader links --------\n")));
for (tp = this->stream_tail_->reader (); ; tp = tp->next ())
{
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("reader queue name = %s\n"), tp->name ()));
tp->dump ();
ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("-------\n")));
if (tp == this->stream_head_->reader ()
|| (this->linked_us_
&& tp == this->linked_us_->stream_head_->writer ()))
break;
}
#endif /* ACE_HAS_DUMP */
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::push (ACE_Module<ACE_SYNCH_USE> *new_top)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::push");
if (this->push_module (new_top,
this->stream_head_->next (),
this->stream_head_) == -1)
return -1;
else
return 0;
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::put (ACE_Message_Block *mb, ACE_Time_Value *tv)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::put");
return this->stream_head_->writer ()->put (mb, tv);
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::get (ACE_Message_Block *&mb, ACE_Time_Value *tv)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::get");
return this->stream_head_->reader ()->getq (mb, tv);
}
// Return the "top" ACE_Module in a ACE_Stream, skipping over the
// stream_head.
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::top (ACE_Module<ACE_SYNCH_USE> *&m)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::top");
if (this->stream_head_->next () == this->stream_tail_)
return -1;
else
{
m = this->stream_head_->next ();
return 0;
}
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::insert (const ACE_TCHAR *prev_name,
ACE_Module<ACE_SYNCH_USE> *mod)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::insert");
for (ACE_Module<ACE_SYNCH_USE> *prev_mod = this->stream_head_;
prev_mod != 0;
prev_mod = prev_mod->next ())
if (ACE_OS::strcmp (prev_mod->name (), prev_name) == 0)
{
ACE_Module<ACE_SYNCH_USE> *next_mod = prev_mod->next ();
// We can't insert a module below <stream_tail_>.
if (next_mod == 0)
return -1;
mod->link (next_mod);
prev_mod->link (mod);
if (mod->reader ()->open (mod->arg ()) == -1)
return -1;
if (mod->writer ()->open (mod->arg ()) == -1)
return -1;
return 0;
}
return -1;
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::replace (const ACE_TCHAR *replace_name,
ACE_Module<ACE_SYNCH_USE> *mod,
int flags)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::replace");
ACE_Module<ACE_SYNCH_USE> *prev_mod = 0;
for (ACE_Module<ACE_SYNCH_USE> *rep_mod = this->stream_head_;
rep_mod != 0;
rep_mod = rep_mod->next ())
if (ACE_OS::strcmp (rep_mod->name (), replace_name) == 0)
{
ACE_Module<ACE_SYNCH_USE> *next_mod = rep_mod->next ();
if (next_mod)
mod->link (next_mod);
else // In case the <next_mod> is <stream_tail_>.
{
mod->writer ()->next (0);
mod->next (0);
this->stream_tail_ = mod;
}
if (prev_mod)
prev_mod->link (mod);
else // In case the <rep_mod> is <stream_head_>.
{
mod->reader ()->next (0);
this->stream_head_ = mod;
}
if (mod->reader ()->open (mod->arg ()) == -1)
return -1;
if (mod->writer ()->open (mod->arg ()) == -1)
return -1;
if (flags != ACE_Module<ACE_SYNCH_USE>::M_DELETE_NONE)
{
rep_mod->close (flags);
delete rep_mod;
}
return 0;
}
else
prev_mod = rep_mod;
return -1;
}
// Remove the "top" ACE_Module in a ACE_Stream, skipping over the
// stream_head.
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::pop (int flags)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::pop");
if (this->stream_head_->next () == this->stream_tail_)
return -1;
else
{
// Skip over the ACE_Stream head.
ACE_Module<ACE_SYNCH_USE> *top_mod = this->stream_head_->next ();
ACE_Module<ACE_SYNCH_USE> *new_top = top_mod->next ();
this->stream_head_->next (new_top);
// Close the top ACE_Module.
top_mod->close (flags);
// Don't delete the Module unless the flags request this.
if (flags != ACE_Module<ACE_SYNCH_USE>::M_DELETE_NONE)
delete top_mod;
this->stream_head_->writer ()->next (new_top->writer ());
new_top->reader ()->next (this->stream_head_->reader ());
return 0;
}
}
// Remove a named ACE_Module from an arbitrary place in the
// ACE_Stream.
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::remove (const ACE_TCHAR *name,
int flags)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::remove");
ACE_Module<ACE_SYNCH_USE> *prev = 0;
for (ACE_Module<ACE_SYNCH_USE> *mod = this->stream_head_;
mod != 0;
mod = mod->next ())
{
#ifndef ACE_NLOGGING
if (ACE::debug ())
{
ACE_DEBUG ((LM_DEBUG,
ACE_TEXT ("ACE_Stream::remove comparing existing module :%s: with :%s:\n"),
mod->name (),
name));
}
#endif
if (ACE_OS::strcmp (mod->name (), name) == 0)
{
if (prev == 0) // Deleting ACE_Stream Head
this->stream_head_->link (mod->next ());
else
prev->link (mod->next ());
// Don't delete the Module unless the flags request this.
if (flags != ACE_Module<ACE_SYNCH_USE>::M_DELETE_NONE)
{
// Close down the module and release the memory.
mod->close (flags);
delete mod;
}
return 0;
}
else
prev = mod;
}
ACE_DEBUG ((LM_WARNING, ACE_TEXT ("ACE_Stream::remove failed to find module with name %s to remove\n"),name));
return -1;
}
template <ACE_SYNCH_DECL> ACE_Module<ACE_SYNCH_USE> *
ACE_Stream<ACE_SYNCH_USE>::find (const ACE_TCHAR *name)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::find");
for (ACE_Module<ACE_SYNCH_USE> *mod = this->stream_head_;
mod != 0;
mod = mod->next ())
if (ACE_OS::strcmp (mod->name (), name) == 0)
return mod;
return 0;
}
// Actually push a module onto the stack...
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::push_module (ACE_Module<ACE_SYNCH_USE> *new_top,
ACE_Module<ACE_SYNCH_USE> *current_top,
ACE_Module<ACE_SYNCH_USE> *head)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::push_module");
ACE_Task<ACE_SYNCH_USE> *nt_reader = new_top->reader ();
ACE_Task<ACE_SYNCH_USE> *nt_writer = new_top->writer ();
ACE_Task<ACE_SYNCH_USE> *ct_reader = 0;
ACE_Task<ACE_SYNCH_USE> *ct_writer = 0;
if (current_top)
{
ct_reader = current_top->reader ();
ct_writer = current_top->writer ();
ct_reader->next (nt_reader);
}
nt_writer->next (ct_writer);
if (head)
{
if (head != new_top)
head->link (new_top);
}
else
nt_reader->next (0);
new_top->next (current_top);
if (nt_reader->open (new_top->arg ()) == -1)
return -1;
if (nt_writer->open (new_top->arg ()) == -1)
return -1;
return 0;
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::open (void *a,
ACE_Module<ACE_SYNCH_USE> *head,
ACE_Module<ACE_SYNCH_USE> *tail)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::open");
ACE_GUARD_RETURN (ACE_SYNCH_MUTEX_T, ace_mon, this->lock_, -1);
ACE_Task<ACE_SYNCH_USE> *h1 = 0, *h2 = 0;
ACE_Task<ACE_SYNCH_USE> *t1 = 0, *t2 = 0;
if (head == 0)
{
ACE_NEW_RETURN (h1,
ACE_Stream_Head<ACE_SYNCH_USE>,
-1);
ACE_NEW_RETURN (h2,
ACE_Stream_Head<ACE_SYNCH_USE>,
-1);
ACE_NEW_RETURN (head,
ACE_Module<ACE_SYNCH_USE> (ACE_TEXT ("ACE_Stream_Head"),
h1, h2,
a,
M_DELETE),
-1);
}
if (tail == 0)
{
ACE_NEW_RETURN (t1,
ACE_Stream_Tail<ACE_SYNCH_USE>,
-1);
ACE_NEW_RETURN (t2,
ACE_Stream_Tail<ACE_SYNCH_USE>,
-1);
ACE_NEW_RETURN (tail,
ACE_Module<ACE_SYNCH_USE> (ACE_TEXT ("ACE_Stream_Tail"),
t1, t2,
a,
M_DELETE),
-1);
}
// Make sure *all* the allocation succeeded!
if ((head == 0 && (h1 == 0 || h2 == 0))
|| (tail == 0 && (t1 == 0 || t2 == 0)))
{
delete h1;
delete h2;
delete t1;
delete t2;
delete head;
delete tail;
errno = ENOMEM;
return -1;
}
this->stream_head_ = head;
this->stream_tail_ = tail;
if (this->push_module (this->stream_tail_) == -1)
return -1;
else if (this->push_module (this->stream_head_,
this->stream_tail_,
this->stream_head_) == -1)
return -1;
return 0;
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::close (int flags)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::close");
ACE_GUARD_RETURN (ACE_SYNCH_MUTEX_T, ace_mon, this->lock_, -1);
if (this->stream_head_ != 0
&& this->stream_tail_ != 0)
{
// Don't bother checking return value here.
this->unlink_i ();
int result = 0;
// Remove and cleanup all the intermediate modules.
while (this->stream_head_->next () != this->stream_tail_)
if (this->pop (flags) == -1)
result = -1;
// Clean up the head and tail of the stream.
if (this->stream_head_->close (flags) == -1)
result = -1;
if (this->stream_tail_->close (flags) == -1)
result = -1;
// Cleanup the memory.
delete this->stream_head_;
delete this->stream_tail_;
this->stream_head_ = 0;
this->stream_tail_ = 0;
// Tell all threads waiting on the close that we are done.
this->final_close_.broadcast ();
return result;
}
return 0;
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::control (ACE_IO_Cntl_Msg::ACE_IO_Cntl_Cmds cmd,
void *a)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::control");
ACE_IO_Cntl_Msg ioc (cmd);
ACE_Message_Block *db;
// Try to create a data block that contains the user-supplied data.
ACE_NEW_RETURN (db,
ACE_Message_Block (sizeof (int),
ACE_Message_Block::MB_IOCTL,
0,
(char *) a),
-1);
// Try to create a control block <cb> that contains the control
// field and a pointer to the data block <db> in <cb>'s continuation
// field.
ACE_Message_Block *cb = 0;
ACE_NEW_RETURN (cb,
ACE_Message_Block (sizeof ioc,
ACE_Message_Block::MB_IOCTL,
db,
(char *) &ioc),
-1);
// @@ Michael: The old semantic assumed that cb returns == 0
// if no memory was available. We will now return immediately
// without release (errno is set to ENOMEM by the macro).
// If we can't allocate <cb> then we need to delete db and return
// -1.
if (cb == 0)
{
db->release ();
errno = ENOMEM;
return -1;
}
int result;
if (this->stream_head_->writer ()->put (cb) == -1)
result = -1;
else if (this->stream_head_->reader ()->getq (cb) == -1)
result = -1;
else
result = ((ACE_IO_Cntl_Msg *) cb->rd_ptr ())->rval ();
// This will also release db if it's reference count == 0.
cb->release ();
return result;
}
// Link two streams together at their bottom-most Modules (i.e., the
// one just above the Stream tail). Note that all of this is premised
// on the fact that the Stream head and Stream tail are non-NULL...
// This must be called with locks held.
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::link_i (ACE_Stream<ACE_SYNCH_USE> &us)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::link_i");
this->linked_us_ = &us;
// Make sure the other side is also linked to us!
us.linked_us_ = this;
ACE_Module<ACE_SYNCH_USE> *my_tail = this->stream_head_;
if (my_tail == 0)
return -1;
// Locate the module just above our Stream tail.
while (my_tail->next () != this->stream_tail_)
my_tail = my_tail->next ();
ACE_Module<ACE_SYNCH_USE> *other_tail = us.stream_head_;
if (other_tail == 0)
return -1;
// Locate the module just above the other Stream's tail.
while (other_tail->next () != us.stream_tail_)
other_tail = other_tail->next ();
// Reattach the pointers so that the two streams are linked!
my_tail->writer ()->next (other_tail->reader ());
other_tail->writer ()->next (my_tail->reader ());
return 0;
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::link (ACE_Stream<ACE_SYNCH_USE> &us)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::link");
ACE_GUARD_RETURN (ACE_SYNCH_MUTEX_T, ace_mon, this->lock_, -1);
return this->link_i (us);
}
// Must be called with locks held...
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::unlink_i (void)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::unlink_i");
// Only try to unlink if we are in fact still linked!
if (this->linked_us_ != 0)
{
ACE_Module<ACE_SYNCH_USE> *my_tail = this->stream_head_;
// Only relink if we still exist!
if (my_tail)
{
// Find the module that's just before our stream tail.
while (my_tail->next () != this->stream_tail_)
my_tail = my_tail->next ();
// Restore the writer's next() link to our tail.
my_tail->writer ()->next (this->stream_tail_->writer ());
}
ACE_Module<ACE_SYNCH_USE> *other_tail =
this->linked_us_->stream_head_;
// Only fiddle with the other side if it in fact still remains.
if (other_tail != 0)
{
while (other_tail->next () != this->linked_us_->stream_tail_)
other_tail = other_tail->next ();
other_tail->writer ()->next (this->linked_us_->stream_tail_->writer ());
}
// Make sure the other side is also aware that it's been unlinked!
this->linked_us_->linked_us_ = 0;
this->linked_us_ = 0;
return 0;
}
else
return -1;
}
template <ACE_SYNCH_DECL> int
ACE_Stream<ACE_SYNCH_USE>::unlink (void)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::unlink");
ACE_GUARD_RETURN (ACE_SYNCH_MUTEX_T, ace_mon, this->lock_, -1);
return this->unlink_i ();
}
template <ACE_SYNCH_DECL>
ACE_Stream<ACE_SYNCH_USE>::ACE_Stream (void * a,
ACE_Module<ACE_SYNCH_USE> *head,
ACE_Module<ACE_SYNCH_USE> *tail)
: linked_us_ (0),
final_close_ (lock_)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::ACE_Stream");
if (this->open (a, head, tail) == -1)
ACE_ERROR ((LM_ERROR,
ACE_TEXT ("ACE_Stream<ACE_SYNCH_USE>::open (%s, %s)\n"),
head->name (), tail->name ()));
}
template <ACE_SYNCH_DECL>
ACE_Stream<ACE_SYNCH_USE>::~ACE_Stream (void)
{
ACE_TRACE ("ACE_Stream<ACE_SYNCH_USE>::~ACE_Stream");
if (this->stream_head_ != 0)
this->close ();
}
template <ACE_SYNCH_DECL>
ACE_Stream_Iterator<ACE_SYNCH_USE>::ACE_Stream_Iterator (const ACE_Stream<ACE_SYNCH_USE> &sr)
: next_ (sr.stream_head_)
{
ACE_TRACE ("ACE_Stream_Iterator<ACE_SYNCH_USE>::ACE_Stream_Iterator");
}
ACE_END_VERSIONED_NAMESPACE_DECL
#endif /* ACE_STREAM_CPP */
| gpl-2.0 |
Clouded/linux-rt-rpi2 | arch/powerpc/platforms/powermac/setup.c | 314 | 16058 | /*
* Powermac setup and early boot code plus other random bits.
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@samba.org)
*
* Derived from "arch/alpha/kernel/setup.c"
* Copyright (C) 1995 Linus Torvalds
*
* Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
/*
* bootup setup stuff..
*/
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/export.h>
#include <linux/user.h>
#include <linux/tty.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/initrd.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <linux/pci.h>
#include <linux/adb.h>
#include <linux/cuda.h>
#include <linux/pmu.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/bitops.h>
#include <linux/suspend.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/memblock.h>
#include <asm/reg.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
#include <asm/mediabay.h>
#include <asm/machdep.h>
#include <asm/dma.h>
#include <asm/cputable.h>
#include <asm/btext.h>
#include <asm/pmac_feature.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/smu.h>
#include <asm/pmc.h>
#include <asm/udbg.h>
#include "pmac.h"
#undef SHOW_GATWICK_IRQS
int ppc_override_l2cr = 0;
int ppc_override_l2cr_value;
int has_l2cache = 0;
int pmac_newworld;
static int current_root_goodness = -1;
extern struct machdep_calls pmac_md;
#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
#ifdef CONFIG_PPC64
int sccdbg;
#endif
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
EXPORT_SYMBOL(sys_ctrler);
#ifdef CONFIG_PMAC_SMU
unsigned long smu_cmdbuf_abs;
EXPORT_SYMBOL(smu_cmdbuf_abs);
#endif
static void pmac_show_cpuinfo(struct seq_file *m)
{
struct device_node *np;
const char *pp;
int plen;
int mbmodel;
unsigned int mbflags;
char* mbname;
mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
PMAC_MB_INFO_MODEL, 0);
mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL,
PMAC_MB_INFO_FLAGS, 0);
if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME,
(long) &mbname) != 0)
mbname = "Unknown";
/* find motherboard type */
seq_printf(m, "machine\t\t: ");
np = of_find_node_by_path("/");
if (np != NULL) {
pp = of_get_property(np, "model", NULL);
if (pp != NULL)
seq_printf(m, "%s\n", pp);
else
seq_printf(m, "PowerMac\n");
pp = of_get_property(np, "compatible", &plen);
if (pp != NULL) {
seq_printf(m, "motherboard\t:");
while (plen > 0) {
int l = strlen(pp) + 1;
seq_printf(m, " %s", pp);
plen -= l;
pp += l;
}
seq_printf(m, "\n");
}
of_node_put(np);
} else
seq_printf(m, "PowerMac\n");
/* print parsed model */
seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname);
seq_printf(m, "pmac flags\t: %08x\n", mbflags);
/* find l2 cache info */
np = of_find_node_by_name(NULL, "l2-cache");
if (np == NULL)
np = of_find_node_by_type(NULL, "cache");
if (np != NULL) {
const unsigned int *ic =
of_get_property(np, "i-cache-size", NULL);
const unsigned int *dc =
of_get_property(np, "d-cache-size", NULL);
seq_printf(m, "L2 cache\t:");
has_l2cache = 1;
if (of_get_property(np, "cache-unified", NULL) != 0 && dc) {
seq_printf(m, " %dK unified", *dc / 1024);
} else {
if (ic)
seq_printf(m, " %dK instruction", *ic / 1024);
if (dc)
seq_printf(m, "%s %dK data",
(ic? " +": ""), *dc / 1024);
}
pp = of_get_property(np, "ram-type", NULL);
if (pp)
seq_printf(m, " %s", pp);
seq_printf(m, "\n");
of_node_put(np);
}
/* Indicate newworld/oldworld */
seq_printf(m, "pmac-generation\t: %s\n",
pmac_newworld ? "NewWorld" : "OldWorld");
}
#ifndef CONFIG_ADB_CUDA
int find_via_cuda(void)
{
struct device_node *dn = of_find_node_by_name(NULL, "via-cuda");
if (!dn)
return 0;
of_node_put(dn);
printk("WARNING ! Your machine is CUDA-based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n");
return 0;
}
#endif
#ifndef CONFIG_ADB_PMU
int find_via_pmu(void)
{
struct device_node *dn = of_find_node_by_name(NULL, "via-pmu");
if (!dn)
return 0;
of_node_put(dn);
printk("WARNING ! Your machine is PMU-based but your kernel\n");
printk(" wasn't compiled with CONFIG_ADB_PMU option !\n");
return 0;
}
#endif
#ifndef CONFIG_PMAC_SMU
int smu_init(void)
{
/* should check and warn if SMU is present */
return 0;
}
#endif
#ifdef CONFIG_PPC32
static volatile u32 *sysctrl_regs;
static void __init ohare_init(void)
{
struct device_node *dn;
/* this area has the CPU identification register
and some registers used by smp boards */
sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000);
/*
* Turn on the L2 cache.
* We assume that we have a PSX memory controller iff
* we have an ohare I/O controller.
*/
dn = of_find_node_by_name(NULL, "ohare");
if (dn) {
of_node_put(dn);
if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) {
if (sysctrl_regs[4] & 0x10)
sysctrl_regs[4] |= 0x04000020;
else
sysctrl_regs[4] |= 0x04000000;
if(has_l2cache)
printk(KERN_INFO "Level 2 cache enabled\n");
}
}
}
static void __init l2cr_init(void)
{
/* Checks "l2cr-value" property in the registry */
if (cpu_has_feature(CPU_FTR_L2CR)) {
struct device_node *np = of_find_node_by_name(NULL, "cpus");
if (np == 0)
np = of_find_node_by_type(NULL, "cpu");
if (np != 0) {
const unsigned int *l2cr =
of_get_property(np, "l2cr-value", NULL);
if (l2cr != 0) {
ppc_override_l2cr = 1;
ppc_override_l2cr_value = *l2cr;
_set_L2CR(0);
_set_L2CR(ppc_override_l2cr_value);
}
of_node_put(np);
}
}
if (ppc_override_l2cr)
printk(KERN_INFO "L2CR overridden (0x%x), "
"backside cache is %s\n",
ppc_override_l2cr_value,
(ppc_override_l2cr_value & 0x80000000)
? "enabled" : "disabled");
}
#endif
static void __init pmac_setup_arch(void)
{
struct device_node *cpu, *ic;
const int *fp;
unsigned long pvr;
pvr = PVR_VER(mfspr(SPRN_PVR));
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
loops_per_jiffy = 50000000 / HZ;
cpu = of_find_node_by_type(NULL, "cpu");
if (cpu != NULL) {
fp = of_get_property(cpu, "clock-frequency", NULL);
if (fp != NULL) {
if (pvr >= 0x30 && pvr < 0x80)
/* PPC970 etc. */
loops_per_jiffy = *fp / (3 * HZ);
else if (pvr == 4 || pvr >= 8)
/* 604, G3, G4 etc. */
loops_per_jiffy = *fp / HZ;
else
/* 601, 603, etc. */
loops_per_jiffy = *fp / (2 * HZ);
}
of_node_put(cpu);
}
/* See if newworld or oldworld */
ic = of_find_node_with_property(NULL, "interrupt-controller");
if (ic) {
pmac_newworld = 1;
of_node_put(ic);
}
/* Lookup PCI hosts */
pmac_pci_init();
#ifdef CONFIG_PPC32
ohare_init();
l2cr_init();
#endif /* CONFIG_PPC32 */
find_via_cuda();
find_via_pmu();
smu_init();
#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \
defined(CONFIG_PPC64)
pmac_nvram_init();
#endif
#ifdef CONFIG_PPC32
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
ROOT_DEV = Root_RAM0;
else
#endif
ROOT_DEV = DEFAULT_ROOT_DEVICE;
#endif
#ifdef CONFIG_ADB
if (strstr(boot_command_line, "adb_sync")) {
extern int __adb_probe_sync;
__adb_probe_sync = 1;
}
#endif /* CONFIG_ADB */
}
#ifdef CONFIG_SCSI
void note_scsi_host(struct device_node *node, void *host)
{
}
EXPORT_SYMBOL(note_scsi_host);
#endif
static int initializing = 1;
static int pmac_late_init(void)
{
initializing = 0;
return 0;
}
machine_late_initcall(powermac, pmac_late_init);
/*
* This is __init_refok because we check for "initializing" before
* touching any of the __init sensitive things and "initializing"
* will be false after __init time. This can't be __init because it
* can be called whenever a disk is first accessed.
*/
void __init_refok note_bootable_part(dev_t dev, int part, int goodness)
{
char *p;
if (!initializing)
return;
if ((goodness <= current_root_goodness) &&
ROOT_DEV != DEFAULT_ROOT_DEVICE)
return;
p = strstr(boot_command_line, "root=");
if (p != NULL && (p == boot_command_line || p[-1] == ' '))
return;
ROOT_DEV = dev + part;
current_root_goodness = goodness;
}
#ifdef CONFIG_ADB_CUDA
static void cuda_restart(void)
{
struct adb_request req;
cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM);
for (;;)
cuda_poll();
}
static void cuda_shutdown(void)
{
struct adb_request req;
cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN);
for (;;)
cuda_poll();
}
#else
#define cuda_restart()
#define cuda_shutdown()
#endif
#ifndef CONFIG_ADB_PMU
#define pmu_restart()
#define pmu_shutdown()
#endif
#ifndef CONFIG_PMAC_SMU
#define smu_restart()
#define smu_shutdown()
#endif
static void pmac_restart(char *cmd)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
cuda_restart();
break;
case SYS_CTRLER_PMU:
pmu_restart();
break;
case SYS_CTRLER_SMU:
smu_restart();
break;
default: ;
}
}
static void pmac_power_off(void)
{
switch (sys_ctrler) {
case SYS_CTRLER_CUDA:
cuda_shutdown();
break;
case SYS_CTRLER_PMU:
pmu_shutdown();
break;
case SYS_CTRLER_SMU:
smu_shutdown();
break;
default: ;
}
}
static void
pmac_halt(void)
{
pmac_power_off();
}
/*
* Early initialization.
*/
static void __init pmac_init_early(void)
{
/* Enable early btext debug if requested */
if (strstr(boot_command_line, "btextdbg")) {
udbg_adb_init_early();
register_early_udbg_console();
}
/* Probe motherboard chipset */
pmac_feature_init();
/* Initialize debug stuff */
udbg_scc_init(!!strstr(boot_command_line, "sccdbg"));
udbg_adb_init(!!strstr(boot_command_line, "btextdbg"));
#ifdef CONFIG_PPC64
iommu_init_early_dart();
#endif
/* SMP Init has to be done early as we need to patch up
* cpu_possible_mask before interrupt stacks are allocated
* or kaboom...
*/
#ifdef CONFIG_SMP
pmac_setup_smp();
#endif
}
static int __init pmac_declare_of_platform_devices(void)
{
struct device_node *np;
if (machine_is(chrp))
return -1;
np = of_find_node_by_name(NULL, "valkyrie");
if (np) {
of_platform_device_create(np, "valkyrie", NULL);
of_node_put(np);
}
np = of_find_node_by_name(NULL, "platinum");
if (np) {
of_platform_device_create(np, "platinum", NULL);
of_node_put(np);
}
np = of_find_node_by_type(NULL, "smu");
if (np) {
of_platform_device_create(np, "smu", NULL);
of_node_put(np);
}
np = of_find_node_by_type(NULL, "fcu");
if (np == NULL) {
/* Some machines have strangely broken device-tree */
np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
}
if (np) {
of_platform_device_create(np, "temperature", NULL);
of_node_put(np);
}
return 0;
}
machine_device_initcall(powermac, pmac_declare_of_platform_devices);
#ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE
/*
* This is called very early, as part of console_init() (typically just after
* time_init()). This function is respondible for trying to find a good
* default console on serial ports. It tries to match the open firmware
* default output with one of the available serial console drivers.
*/
static int __init check_pmac_serial_console(void)
{
struct device_node *prom_stdout = NULL;
int offset = 0;
const char *name;
#ifdef CONFIG_SERIAL_PMACZILOG_TTYS
char *devname = "ttyS";
#else
char *devname = "ttyPZ";
#endif
pr_debug(" -> check_pmac_serial_console()\n");
/* The user has requested a console so this is already set up. */
if (strstr(boot_command_line, "console=")) {
pr_debug(" console was specified !\n");
return -EBUSY;
}
if (!of_chosen) {
pr_debug(" of_chosen is NULL !\n");
return -ENODEV;
}
/* We are getting a weird phandle from OF ... */
/* ... So use the full path instead */
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name == NULL) {
pr_debug(" no linux,stdout-path !\n");
return -ENODEV;
}
prom_stdout = of_find_node_by_path(name);
if (!prom_stdout) {
pr_debug(" can't find stdout package %s !\n", name);
return -ENODEV;
}
pr_debug("stdout is %s\n", prom_stdout->full_name);
name = of_get_property(prom_stdout, "name", NULL);
if (!name) {
pr_debug(" stdout package has no name !\n");
goto not_found;
}
if (strcmp(name, "ch-a") == 0)
offset = 0;
else if (strcmp(name, "ch-b") == 0)
offset = 1;
else
goto not_found;
of_node_put(prom_stdout);
pr_debug("Found serial console at %s%d\n", devname, offset);
return add_preferred_console(devname, offset, NULL);
not_found:
pr_debug("No preferred console found !\n");
of_node_put(prom_stdout);
return -ENODEV;
}
console_initcall(check_pmac_serial_console);
#endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init pmac_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "Power Macintosh") &&
!of_flat_dt_is_compatible(root, "MacRISC"))
return 0;
#ifdef CONFIG_PPC64
/*
* On U3, the DART (iommu) must be allocated now since it
* has an impact on htab_initialize (due to the large page it
* occupies having to be broken up so the DART itself is not
* part of the cacheable linar mapping
*/
alloc_dart_table();
hpte_init_native();
#endif
#ifdef CONFIG_PPC32
/* isa_io_base gets set in pmac_pci_init */
ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 1;
DMA_MODE_WRITE = 2;
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PMAC_SMU
/*
* SMU based G5s need some memory below 2Gb, at least the current
* driver needs that. We have to allocate it now. We allocate 4k
* (1 small page) for now.
*/
smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
#endif /* CONFIG_PMAC_SMU */
return 1;
}
#ifdef CONFIG_PPC64
/* Move that to pci.c */
static int pmac_pci_probe_mode(struct pci_bus *bus)
{
struct device_node *node = pci_bus_to_OF_node(bus);
/* We need to use normal PCI probing for the AGP bus,
* since the device for the AGP bridge isn't in the tree.
* Same for the PCIe host on U4 and the HT host bridge.
*/
if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
of_device_is_compatible(node, "u4-pcie") ||
of_device_is_compatible(node, "u3-ht")))
return PCI_PROBE_NORMAL;
return PCI_PROBE_DEVTREE;
}
#endif /* CONFIG_PPC64 */
define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
.setup_arch = pmac_setup_arch,
.init_early = pmac_init_early,
.show_cpuinfo = pmac_show_cpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = NULL, /* changed later */
.pci_irq_fixup = pmac_pci_irq_fixup,
.restart = pmac_restart,
.power_off = pmac_power_off,
.halt = pmac_halt,
.time_init = pmac_time_init,
.get_boot_time = pmac_get_boot_time,
.set_rtc_time = pmac_set_rtc_time,
.get_rtc_time = pmac_get_rtc_time,
.calibrate_decr = pmac_calibrate_decr,
.feature_call = pmac_do_feature_call,
.progress = udbg_progress,
#ifdef CONFIG_PPC64
.pci_probe_mode = pmac_pci_probe_mode,
.power_save = power4_idle,
.enable_pmcs = power4_enable_pmcs,
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
.pcibios_enable_device_hook = pmac_pci_enable_device_hook,
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
};
| gpl-2.0 |
cattleprod/XCeLL-X69 | drivers/gpio/cs5535-gpio.c | 314 | 9787 | /*
* AMD CS5535/CS5536 GPIO driver
* Copyright (C) 2006 Advanced Micro Devices, Inc.
* Copyright (C) 2007-2009 Andres Salomon <dilinger@collabora.co.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/cs5535.h>
#define DRV_NAME "cs5535-gpio"
#define GPIO_BAR 1
/*
* Some GPIO pins
* 31-29,23 : reserved (always mask out)
* 28 : Power Button
* 26 : PME#
* 22-16 : LPC
* 14,15 : SMBus
* 9,8 : UART1
* 7 : PCI INTB
* 3,4 : UART2/DDC
* 2 : IDE_IRQ0
* 1 : AC_BEEP
* 0 : PCI INTA
*
* If a mask was not specified, allow all except
* reserved and Power Button
*/
#define GPIO_DEFAULT_MASK 0x0F7FFFFF
static ulong mask = GPIO_DEFAULT_MASK;
module_param_named(mask, mask, ulong, 0444);
MODULE_PARM_DESC(mask, "GPIO channel mask.");
static struct cs5535_gpio_chip {
struct gpio_chip chip;
resource_size_t base;
struct pci_dev *pdev;
spinlock_t lock;
} cs5535_gpio_chip;
/*
* The CS5535/CS5536 GPIOs support a number of extra features not defined
* by the gpio_chip API, so these are exported. For a full list of the
* registers, see include/linux/cs5535.h.
*/
static void errata_outl(struct cs5535_gpio_chip *chip, u32 val,
unsigned int reg)
{
unsigned long addr = chip->base + 0x80 + reg;
/*
* According to the CS5536 errata (#36), after suspend
* a write to the high bank GPIO register will clear all
* non-selected bits; the recommended workaround is a
* read-modify-write operation.
*
* Don't apply this errata to the edge status GPIOs, as writing
* to their lower bits will clear them.
*/
if (reg != GPIO_POSITIVE_EDGE_STS && reg != GPIO_NEGATIVE_EDGE_STS) {
if (val & 0xffff)
val |= (inl(addr) & 0xffff); /* ignore the high bits */
else
val |= (inl(addr) ^ (val >> 16));
}
outl(val, addr);
}
static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
unsigned int reg)
{
if (offset < 16)
/* low bank register */
outl(1 << offset, chip->base + reg);
else
/* high bank register */
errata_outl(chip, 1 << (offset - 16), reg);
}
void cs5535_gpio_set(unsigned offset, unsigned int reg)
{
struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_set(chip, offset, reg);
spin_unlock_irqrestore(&chip->lock, flags);
}
EXPORT_SYMBOL_GPL(cs5535_gpio_set);
static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
unsigned int reg)
{
if (offset < 16)
/* low bank register */
outl(1 << (offset + 16), chip->base + reg);
else
/* high bank register */
errata_outl(chip, 1 << offset, reg);
}
void cs5535_gpio_clear(unsigned offset, unsigned int reg)
{
struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_clear(chip, offset, reg);
spin_unlock_irqrestore(&chip->lock, flags);
}
EXPORT_SYMBOL_GPL(cs5535_gpio_clear);
int cs5535_gpio_isset(unsigned offset, unsigned int reg)
{
struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
unsigned long flags;
long val;
spin_lock_irqsave(&chip->lock, flags);
if (offset < 16)
/* low bank register */
val = inl(chip->base + reg);
else {
/* high bank register */
val = inl(chip->base + 0x80 + reg);
offset -= 16;
}
spin_unlock_irqrestore(&chip->lock, flags);
return (val & (1 << offset)) ? 1 : 0;
}
EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
/*
* Generic gpio_chip API support.
*/
static int chip_gpio_request(struct gpio_chip *c, unsigned offset)
{
struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
/* check if this pin is available */
if ((mask & (1 << offset)) == 0) {
dev_info(&chip->pdev->dev,
"pin %u is not available (check mask)\n", offset);
spin_unlock_irqrestore(&chip->lock, flags);
return -EINVAL;
}
/* disable output aux 1 & 2 on this pin */
__cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX1);
__cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_AUX2);
/* disable input aux 1 on this pin */
__cs5535_gpio_clear(chip, offset, GPIO_INPUT_AUX1);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
{
return cs5535_gpio_isset(offset, GPIO_READ_BACK);
}
static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
{
if (val)
cs5535_gpio_set(offset, GPIO_OUTPUT_VAL);
else
cs5535_gpio_clear(offset, GPIO_OUTPUT_VAL);
}
static int chip_direction_input(struct gpio_chip *c, unsigned offset)
{
struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
__cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_ENABLE);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
{
struct cs5535_gpio_chip *chip = (struct cs5535_gpio_chip *) c;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
__cs5535_gpio_set(chip, offset, GPIO_OUTPUT_ENABLE);
if (val)
__cs5535_gpio_set(chip, offset, GPIO_OUTPUT_VAL);
else
__cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_VAL);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static const char * const cs5535_gpio_names[] = {
"GPIO0", "GPIO1", "GPIO2", "GPIO3",
"GPIO4", "GPIO5", "GPIO6", "GPIO7",
"GPIO8", "GPIO9", "GPIO10", "GPIO11",
"GPIO12", "GPIO13", "GPIO14", "GPIO15",
"GPIO16", "GPIO17", "GPIO18", "GPIO19",
"GPIO20", "GPIO21", "GPIO22", NULL,
"GPIO24", "GPIO25", "GPIO26", "GPIO27",
"GPIO28", NULL, NULL, NULL,
};
static struct cs5535_gpio_chip cs5535_gpio_chip = {
.chip = {
.owner = THIS_MODULE,
.label = DRV_NAME,
.base = 0,
.ngpio = 32,
.names = cs5535_gpio_names,
.request = chip_gpio_request,
.get = chip_gpio_get,
.set = chip_gpio_set,
.direction_input = chip_direction_input,
.direction_output = chip_direction_output,
},
};
static int __init cs5535_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
int err;
ulong mask_orig = mask;
/* There are two ways to get the GPIO base address; one is by
* fetching it from MSR_LBAR_GPIO, the other is by reading the
* PCI BAR info. The latter method is easier (especially across
* different architectures), so we'll stick with that for now. If
* it turns out to be unreliable in the face of crappy BIOSes, we
* can always go back to using MSRs.. */
err = pci_enable_device_io(pdev);
if (err) {
dev_err(&pdev->dev, "can't enable device IO\n");
goto done;
}
err = pci_request_region(pdev, GPIO_BAR, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "can't alloc PCI BAR #%d\n", GPIO_BAR);
goto done;
}
/* set up the driver-specific struct */
cs5535_gpio_chip.base = pci_resource_start(pdev, GPIO_BAR);
cs5535_gpio_chip.pdev = pdev;
spin_lock_init(&cs5535_gpio_chip.lock);
dev_info(&pdev->dev, "allocated PCI BAR #%d: base 0x%llx\n", GPIO_BAR,
(unsigned long long) cs5535_gpio_chip.base);
/* mask out reserved pins */
mask &= 0x1F7FFFFF;
/* do not allow pin 28, Power Button, as there's special handling
* in the PMC needed. (note 12, p. 48) */
mask &= ~(1 << 28);
if (mask_orig != mask)
dev_info(&pdev->dev, "mask changed from 0x%08lX to 0x%08lX\n",
mask_orig, mask);
/* finally, register with the generic GPIO API */
err = gpiochip_add(&cs5535_gpio_chip.chip);
if (err)
goto release_region;
dev_info(&pdev->dev, DRV_NAME ": GPIO support successfully loaded.\n");
return 0;
release_region:
pci_release_region(pdev, GPIO_BAR);
done:
return err;
}
static void __exit cs5535_gpio_remove(struct pci_dev *pdev)
{
int err;
err = gpiochip_remove(&cs5535_gpio_chip.chip);
if (err) {
/* uhh? */
dev_err(&pdev->dev, "unable to remove gpio_chip?\n");
}
pci_release_region(pdev, GPIO_BAR);
}
static struct pci_device_id cs5535_gpio_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, cs5535_gpio_pci_tbl);
/*
* We can't use the standard PCI driver registration stuff here, since
* that allows only one driver to bind to each PCI device (and we want
* multiple drivers to be able to bind to the device). Instead, manually
* scan for the PCI device, request a single region, and keep track of the
* devices that we're using.
*/
static int __init cs5535_gpio_scan_pci(void)
{
struct pci_dev *pdev;
int err = -ENODEV;
int i;
for (i = 0; i < ARRAY_SIZE(cs5535_gpio_pci_tbl); i++) {
pdev = pci_get_device(cs5535_gpio_pci_tbl[i].vendor,
cs5535_gpio_pci_tbl[i].device, NULL);
if (pdev) {
err = cs5535_gpio_probe(pdev, &cs5535_gpio_pci_tbl[i]);
if (err)
pci_dev_put(pdev);
/* we only support a single CS5535/6 southbridge */
break;
}
}
return err;
}
static void __exit cs5535_gpio_free_pci(void)
{
cs5535_gpio_remove(cs5535_gpio_chip.pdev);
pci_dev_put(cs5535_gpio_chip.pdev);
}
static int __init cs5535_gpio_init(void)
{
return cs5535_gpio_scan_pci();
}
static void __exit cs5535_gpio_exit(void)
{
cs5535_gpio_free_pci();
}
module_init(cs5535_gpio_init);
module_exit(cs5535_gpio_exit);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
embeddedarm/linux-2.6.34-ts471x | arch/x86/kernel/ds_selftest.c | 570 | 9393 | /*
* Debug Store support - selftest
*
*
* Copyright (C) 2009 Intel Corporation.
* Markus Metzger <markus.t.metzger@intel.com>, 2009
*/
#include "ds_selftest.h"
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <asm/ds.h>
#define BUFFER_SIZE 521 /* Intentionally chose an odd size. */
#define SMALL_BUFFER_SIZE 24 /* A single bts entry. */
struct ds_selftest_bts_conf {
struct bts_tracer *tracer;
int error;
int (*suspend)(struct bts_tracer *);
int (*resume)(struct bts_tracer *);
};
static int ds_selftest_bts_consistency(const struct bts_trace *trace)
{
int error = 0;
if (!trace) {
printk(KERN_CONT "failed to access trace...");
/* Bail out. Other tests are pointless. */
return -1;
}
if (!trace->read) {
printk(KERN_CONT "bts read not available...");
error = -1;
}
/* Do some sanity checks on the trace configuration. */
if (!trace->ds.n) {
printk(KERN_CONT "empty bts buffer...");
error = -1;
}
if (!trace->ds.size) {
printk(KERN_CONT "bad bts trace setup...");
error = -1;
}
if (trace->ds.end !=
(char *)trace->ds.begin + (trace->ds.n * trace->ds.size)) {
printk(KERN_CONT "bad bts buffer setup...");
error = -1;
}
/*
* We allow top in [begin; end], since its not clear when the
* overflow adjustment happens: after the increment or before the
* write.
*/
if ((trace->ds.top < trace->ds.begin) ||
(trace->ds.end < trace->ds.top)) {
printk(KERN_CONT "bts top out of bounds...");
error = -1;
}
return error;
}
static int ds_selftest_bts_read(struct bts_tracer *tracer,
const struct bts_trace *trace,
const void *from, const void *to)
{
const unsigned char *at;
/*
* Check a few things which do not belong to this test.
* They should be covered by other tests.
*/
if (!trace)
return -1;
if (!trace->read)
return -1;
if (to < from)
return -1;
if (from < trace->ds.begin)
return -1;
if (trace->ds.end < to)
return -1;
if (!trace->ds.size)
return -1;
/* Now to the test itself. */
for (at = from; (void *)at < to; at += trace->ds.size) {
struct bts_struct bts;
unsigned long index;
int error;
if (((void *)at - trace->ds.begin) % trace->ds.size) {
printk(KERN_CONT
"read from non-integer index...");
return -1;
}
index = ((void *)at - trace->ds.begin) / trace->ds.size;
memset(&bts, 0, sizeof(bts));
error = trace->read(tracer, at, &bts);
if (error < 0) {
printk(KERN_CONT
"error reading bts trace at [%lu] (0x%p)...",
index, at);
return error;
}
switch (bts.qualifier) {
case BTS_BRANCH:
break;
default:
printk(KERN_CONT
"unexpected bts entry %llu at [%lu] (0x%p)...",
bts.qualifier, index, at);
return -1;
}
}
return 0;
}
static void ds_selftest_bts_cpu(void *arg)
{
struct ds_selftest_bts_conf *conf = arg;
const struct bts_trace *trace;
void *top;
if (IS_ERR(conf->tracer)) {
conf->error = PTR_ERR(conf->tracer);
conf->tracer = NULL;
printk(KERN_CONT
"initialization failed (err: %d)...", conf->error);
return;
}
/* We should meanwhile have enough trace. */
conf->error = conf->suspend(conf->tracer);
if (conf->error < 0)
return;
/* Let's see if we can access the trace. */
trace = ds_read_bts(conf->tracer);
conf->error = ds_selftest_bts_consistency(trace);
if (conf->error < 0)
return;
/* If everything went well, we should have a few trace entries. */
if (trace->ds.top == trace->ds.begin) {
/*
* It is possible but highly unlikely that we got a
* buffer overflow and end up at exactly the same
* position we started from.
* Let's issue a warning, but continue.
*/
printk(KERN_CONT "no trace/overflow...");
}
/* Let's try to read the trace we collected. */
conf->error =
ds_selftest_bts_read(conf->tracer, trace,
trace->ds.begin, trace->ds.top);
if (conf->error < 0)
return;
/*
* Let's read the trace again.
* Since we suspended tracing, we should get the same result.
*/
top = trace->ds.top;
trace = ds_read_bts(conf->tracer);
conf->error = ds_selftest_bts_consistency(trace);
if (conf->error < 0)
return;
if (top != trace->ds.top) {
printk(KERN_CONT "suspend not working...");
conf->error = -1;
return;
}
/* Let's collect some more trace - see if resume is working. */
conf->error = conf->resume(conf->tracer);
if (conf->error < 0)
return;
conf->error = conf->suspend(conf->tracer);
if (conf->error < 0)
return;
trace = ds_read_bts(conf->tracer);
conf->error = ds_selftest_bts_consistency(trace);
if (conf->error < 0)
return;
if (trace->ds.top == top) {
/*
* It is possible but highly unlikely that we got a
* buffer overflow and end up at exactly the same
* position we started from.
* Let's issue a warning and check the full trace.
*/
printk(KERN_CONT
"no resume progress/overflow...");
conf->error =
ds_selftest_bts_read(conf->tracer, trace,
trace->ds.begin, trace->ds.end);
} else if (trace->ds.top < top) {
/*
* We had a buffer overflow - the entire buffer should
* contain trace records.
*/
conf->error =
ds_selftest_bts_read(conf->tracer, trace,
trace->ds.begin, trace->ds.end);
} else {
/*
* It is quite likely that the buffer did not overflow.
* Let's just check the delta trace.
*/
conf->error =
ds_selftest_bts_read(conf->tracer, trace, top,
trace->ds.top);
}
if (conf->error < 0)
return;
conf->error = 0;
}
static int ds_suspend_bts_wrap(struct bts_tracer *tracer)
{
ds_suspend_bts(tracer);
return 0;
}
static int ds_resume_bts_wrap(struct bts_tracer *tracer)
{
ds_resume_bts(tracer);
return 0;
}
static void ds_release_bts_noirq_wrap(void *tracer)
{
(void)ds_release_bts_noirq(tracer);
}
static int ds_selftest_bts_bad_release_noirq(int cpu,
struct bts_tracer *tracer)
{
int error = -EPERM;
/* Try to release the tracer on the wrong cpu. */
get_cpu();
if (cpu != smp_processor_id()) {
error = ds_release_bts_noirq(tracer);
if (error != -EPERM)
printk(KERN_CONT "release on wrong cpu...");
}
put_cpu();
return error ? 0 : -1;
}
static int ds_selftest_bts_bad_request_cpu(int cpu, void *buffer)
{
struct bts_tracer *tracer;
int error;
/* Try to request cpu tracing while task tracing is active. */
tracer = ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE, NULL,
(size_t)-1, BTS_KERNEL);
error = PTR_ERR(tracer);
if (!IS_ERR(tracer)) {
ds_release_bts(tracer);
error = 0;
}
if (error != -EPERM)
printk(KERN_CONT "cpu/task tracing overlap...");
return error ? 0 : -1;
}
static int ds_selftest_bts_bad_request_task(void *buffer)
{
struct bts_tracer *tracer;
int error;
/* Try to request cpu tracing while task tracing is active. */
tracer = ds_request_bts_task(current, buffer, BUFFER_SIZE, NULL,
(size_t)-1, BTS_KERNEL);
error = PTR_ERR(tracer);
if (!IS_ERR(tracer)) {
error = 0;
ds_release_bts(tracer);
}
if (error != -EPERM)
printk(KERN_CONT "task/cpu tracing overlap...");
return error ? 0 : -1;
}
int ds_selftest_bts(void)
{
struct ds_selftest_bts_conf conf;
unsigned char buffer[BUFFER_SIZE], *small_buffer;
unsigned long irq;
int cpu;
printk(KERN_INFO "[ds] bts selftest...");
conf.error = 0;
small_buffer = (unsigned char *)ALIGN((unsigned long)buffer, 8) + 8;
get_online_cpus();
for_each_online_cpu(cpu) {
conf.suspend = ds_suspend_bts_wrap;
conf.resume = ds_resume_bts_wrap;
conf.tracer =
ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
NULL, (size_t)-1, BTS_KERNEL);
ds_selftest_bts_cpu(&conf);
if (conf.error >= 0)
conf.error = ds_selftest_bts_bad_request_task(buffer);
ds_release_bts(conf.tracer);
if (conf.error < 0)
goto out;
conf.suspend = ds_suspend_bts_noirq;
conf.resume = ds_resume_bts_noirq;
conf.tracer =
ds_request_bts_cpu(cpu, buffer, BUFFER_SIZE,
NULL, (size_t)-1, BTS_KERNEL);
smp_call_function_single(cpu, ds_selftest_bts_cpu, &conf, 1);
if (conf.error >= 0) {
conf.error =
ds_selftest_bts_bad_release_noirq(cpu,
conf.tracer);
/* We must not release the tracer twice. */
if (conf.error < 0)
conf.tracer = NULL;
}
if (conf.error >= 0)
conf.error = ds_selftest_bts_bad_request_task(buffer);
smp_call_function_single(cpu, ds_release_bts_noirq_wrap,
conf.tracer, 1);
if (conf.error < 0)
goto out;
}
conf.suspend = ds_suspend_bts_wrap;
conf.resume = ds_resume_bts_wrap;
conf.tracer =
ds_request_bts_task(current, buffer, BUFFER_SIZE,
NULL, (size_t)-1, BTS_KERNEL);
ds_selftest_bts_cpu(&conf);
if (conf.error >= 0)
conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
ds_release_bts(conf.tracer);
if (conf.error < 0)
goto out;
conf.suspend = ds_suspend_bts_noirq;
conf.resume = ds_resume_bts_noirq;
conf.tracer =
ds_request_bts_task(current, small_buffer, SMALL_BUFFER_SIZE,
NULL, (size_t)-1, BTS_KERNEL);
local_irq_save(irq);
ds_selftest_bts_cpu(&conf);
if (conf.error >= 0)
conf.error = ds_selftest_bts_bad_request_cpu(0, buffer);
ds_release_bts_noirq(conf.tracer);
local_irq_restore(irq);
if (conf.error < 0)
goto out;
conf.error = 0;
out:
put_online_cpus();
printk(KERN_CONT "%s.\n", (conf.error ? "failed" : "passed"));
return conf.error;
}
int ds_selftest_pebs(void)
{
return 0;
}
| gpl-2.0 |
ozone999/at91sam | arch/arm/kernel/armksyms.c | 1338 | 4032 | /*
* linux/arch/arm/kernel/armksyms.c
*
* Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/checksum.h>
#include <asm/system.h>
#include <asm/ftrace.h>
/*
* libgcc functions - functions that are used internally by the
* compiler... (prototypes are not correct though, but that
* doesn't really matter since they're not versioned).
*/
extern void __ashldi3(void);
extern void __ashrdi3(void);
extern void __divsi3(void);
extern void __lshrdi3(void);
extern void __modsi3(void);
extern void __muldi3(void);
extern void __ucmpdi2(void);
extern void __udivsi3(void);
extern void __umodsi3(void);
extern void __do_div64(void);
extern void __aeabi_idiv(void);
extern void __aeabi_idivmod(void);
extern void __aeabi_lasr(void);
extern void __aeabi_llsl(void);
extern void __aeabi_llsr(void);
extern void __aeabi_lmul(void);
extern void __aeabi_uidiv(void);
extern void __aeabi_uidivmod(void);
extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
EXPORT_SYMBOL(__backtrace);
/* platform dependent support */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__const_udelay);
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_ipv6_magic);
/* io */
#ifndef __raw_readsb
EXPORT_SYMBOL(__raw_readsb);
#endif
#ifndef __raw_readsw
EXPORT_SYMBOL(__raw_readsw);
#endif
#ifndef __raw_readsl
EXPORT_SYMBOL(__raw_readsl);
#endif
#ifndef __raw_writesb
EXPORT_SYMBOL(__raw_writesb);
#endif
#ifndef __raw_writesw
EXPORT_SYMBOL(__raw_writesw);
#endif
#ifndef __raw_writesl
EXPORT_SYMBOL(__raw_writesl);
#endif
/* string / mem functions */
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
/* user mem (segment) */
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
#endif
/* crypto hash */
EXPORT_SYMBOL(sha_transform);
/* gcc lib functions */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__do_div64);
#ifdef CONFIG_AEABI
EXPORT_SYMBOL(__aeabi_idiv);
EXPORT_SYMBOL(__aeabi_idivmod);
EXPORT_SYMBOL(__aeabi_lasr);
EXPORT_SYMBOL(__aeabi_llsl);
EXPORT_SYMBOL(__aeabi_llsr);
EXPORT_SYMBOL(__aeabi_lmul);
EXPORT_SYMBOL(__aeabi_uidiv);
EXPORT_SYMBOL(__aeabi_uidivmod);
EXPORT_SYMBOL(__aeabi_ulcmp);
#endif
/* bitops */
EXPORT_SYMBOL(_set_bit);
EXPORT_SYMBOL(_test_and_set_bit);
EXPORT_SYMBOL(_clear_bit);
EXPORT_SYMBOL(_test_and_clear_bit);
EXPORT_SYMBOL(_change_bit);
EXPORT_SYMBOL(_test_and_change_bit);
EXPORT_SYMBOL(_find_first_zero_bit_le);
EXPORT_SYMBOL(_find_next_zero_bit_le);
EXPORT_SYMBOL(_find_first_bit_le);
EXPORT_SYMBOL(_find_next_bit_le);
#ifdef __ARMEB__
EXPORT_SYMBOL(_find_first_zero_bit_be);
EXPORT_SYMBOL(_find_next_zero_bit_be);
EXPORT_SYMBOL(_find_first_bit_be);
EXPORT_SYMBOL(_find_next_bit_be);
#endif
#ifdef CONFIG_FUNCTION_TRACER
#ifdef CONFIG_OLD_MCOUNT
EXPORT_SYMBOL(mcount);
#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
EXPORT_SYMBOL(__pv_phys_offset);
#endif
| gpl-2.0 |
mericon/Xp_Kernel_LGH850 | drivers/usb/dwc2/hcd_ddma.c | 1594 | 34092 | /*
* hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
*
* Copyright (C) 2004-2013 Synopsys, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The names of the above-listed copyright holders may not be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") as published by the Free Software
* Foundation; either version 2 of the License, or (at your option) any
* later version.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* This file contains the Descriptor DMA implementation for Host mode
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/ch11.h>
#include "core.h"
#include "hcd.h"
static u16 dwc2_frame_list_idx(u16 frame)
{
return frame & (FRLISTEN_64_SIZE - 1);
}
static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
{
return (idx + inc) &
((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
MAX_DMA_DESC_NUM_GENERIC) - 1);
}
static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
{
return (idx - inc) &
((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
MAX_DMA_DESC_NUM_GENERIC) - 1);
}
static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
{
return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
qh->dev_speed == USB_SPEED_HIGH) ?
MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
}
static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
{
return qh->dev_speed == USB_SPEED_HIGH ?
(qh->interval + 8 - 1) / 8 : qh->interval;
}
static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
gfp_t flags)
{
qh->desc_list = dma_alloc_coherent(hsotg->dev,
sizeof(struct dwc2_hcd_dma_desc) *
dwc2_max_desc_num(qh), &qh->desc_list_dma,
flags);
if (!qh->desc_list)
return -ENOMEM;
memset(qh->desc_list, 0,
sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh));
qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
if (!qh->n_bytes) {
dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
* dwc2_max_desc_num(qh), qh->desc_list,
qh->desc_list_dma);
qh->desc_list = NULL;
return -ENOMEM;
}
return 0;
}
static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
if (qh->desc_list) {
dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
* dwc2_max_desc_num(qh), qh->desc_list,
qh->desc_list_dma);
qh->desc_list = NULL;
}
kfree(qh->n_bytes);
qh->n_bytes = NULL;
}
static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
{
if (hsotg->frame_list)
return 0;
hsotg->frame_list = dma_alloc_coherent(hsotg->dev,
4 * FRLISTEN_64_SIZE,
&hsotg->frame_list_dma,
mem_flags);
if (!hsotg->frame_list)
return -ENOMEM;
memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE);
return 0;
}
static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
{
u32 *frame_list;
dma_addr_t frame_list_dma;
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
if (!hsotg->frame_list) {
spin_unlock_irqrestore(&hsotg->lock, flags);
return;
}
frame_list = hsotg->frame_list;
frame_list_dma = hsotg->frame_list_dma;
hsotg->frame_list = NULL;
spin_unlock_irqrestore(&hsotg->lock, flags);
dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list,
frame_list_dma);
}
static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
{
u32 hcfg;
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
hcfg = readl(hsotg->regs + HCFG);
if (hcfg & HCFG_PERSCHEDENA) {
/* already enabled */
spin_unlock_irqrestore(&hsotg->lock, flags);
return;
}
writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
hcfg &= ~HCFG_FRLISTEN_MASK;
hcfg |= fr_list_en | HCFG_PERSCHEDENA;
dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
writel(hcfg, hsotg->regs + HCFG);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
{
u32 hcfg;
unsigned long flags;
spin_lock_irqsave(&hsotg->lock, flags);
hcfg = readl(hsotg->regs + HCFG);
if (!(hcfg & HCFG_PERSCHEDENA)) {
/* already disabled */
spin_unlock_irqrestore(&hsotg->lock, flags);
return;
}
hcfg &= ~HCFG_PERSCHEDENA;
dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
writel(hcfg, hsotg->regs + HCFG);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
/*
* Activates/Deactivates FrameList entries for the channel based on endpoint
* servicing period
*/
static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
int enable)
{
struct dwc2_host_chan *chan;
u16 i, j, inc;
if (!hsotg) {
pr_err("hsotg = %p\n", hsotg);
return;
}
if (!qh->channel) {
dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
return;
}
if (!hsotg->frame_list) {
dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
hsotg->frame_list);
return;
}
chan = qh->channel;
inc = dwc2_frame_incr_val(qh);
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
i = dwc2_frame_list_idx(qh->sched_frame);
else
i = 0;
j = i;
do {
if (enable)
hsotg->frame_list[j] |= 1 << chan->hc_num;
else
hsotg->frame_list[j] &= ~(1 << chan->hc_num);
j = (j + inc) & (FRLISTEN_64_SIZE - 1);
} while (j != i);
if (!enable)
return;
chan->schinfo = 0;
if (chan->speed == USB_SPEED_HIGH && qh->interval) {
j = 1;
/* TODO - check this */
inc = (8 + qh->interval - 1) / qh->interval;
for (i = 0; i < inc; i++) {
chan->schinfo |= j;
j = j << qh->interval;
}
} else {
chan->schinfo = 0xff;
}
}
static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
struct dwc2_host_chan *chan = qh->channel;
if (dwc2_qh_is_non_per(qh)) {
if (hsotg->core_params->uframe_sched > 0)
hsotg->available_host_channels++;
else
hsotg->non_periodic_channels--;
} else {
dwc2_update_frame_list(hsotg, qh, 0);
}
/*
* The condition is added to prevent double cleanup try in case of
* device disconnect. See channel cleanup in dwc2_hcd_disconnect().
*/
if (chan->qh) {
if (!list_empty(&chan->hc_list_entry))
list_del(&chan->hc_list_entry);
dwc2_hc_cleanup(hsotg, chan);
list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
chan->qh = NULL;
}
qh->channel = NULL;
qh->ntd = 0;
if (qh->desc_list)
memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
dwc2_max_desc_num(qh));
}
/**
* dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
* related members
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to init
*
* Return: 0 if successful, negative error code otherwise
*
* Allocates memory for the descriptor list. For the first periodic QH,
* allocates memory for the FrameList and enables periodic scheduling.
*/
int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
gfp_t mem_flags)
{
int retval;
if (qh->do_split) {
dev_err(hsotg->dev,
"SPLIT Transfers are not supported in Descriptor DMA mode.\n");
retval = -EINVAL;
goto err0;
}
retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
if (retval)
goto err0;
if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) {
if (!hsotg->frame_list) {
retval = dwc2_frame_list_alloc(hsotg, mem_flags);
if (retval)
goto err1;
/* Enable periodic schedule on first periodic QH */
dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
}
}
qh->ntd = 0;
return 0;
err1:
dwc2_desc_list_free(hsotg, qh);
err0:
return retval;
}
/**
* dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
* members
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to free
*
* Frees descriptor list memory associated with the QH. If QH is periodic and
* the last, frees FrameList memory and disables periodic scheduling.
*/
void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
dwc2_desc_list_free(hsotg, qh);
/*
* Channel still assigned due to some reasons.
* Seen on Isoc URB dequeue. Channel halted but no subsequent
* ChHalted interrupt to release the channel. Afterwards
* when it comes here from endpoint disable routine
* channel remains assigned.
*/
if (qh->channel)
dwc2_release_channel_ddma(hsotg, qh);
if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
qh->ep_type == USB_ENDPOINT_XFER_INT) &&
(hsotg->core_params->uframe_sched > 0 ||
!hsotg->periodic_channels) && hsotg->frame_list) {
dwc2_per_sched_disable(hsotg);
dwc2_frame_list_free(hsotg);
}
}
static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
{
if (qh->dev_speed == USB_SPEED_HIGH)
/* Descriptor set (8 descriptors) index which is 8-aligned */
return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
else
return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
}
/*
* Determine starting frame for Isochronous transfer.
* Few frames skipped to prevent race condition with HC.
*/
static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u16 *skip_frames)
{
u16 frame;
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
/* sched_frame is always frame number (not uFrame) both in FS and HS! */
/*
* skip_frames is used to limit activated descriptors number
* to avoid the situation when HC services the last activated
* descriptor firstly.
* Example for FS:
* Current frame is 1, scheduled frame is 3. Since HC always fetches
* the descriptor corresponding to curr_frame+1, the descriptor
* corresponding to frame 2 will be fetched. If the number of
* descriptors is max=64 (or greather) the list will be fully programmed
* with Active descriptors and it is possible case (rare) that the
* latest descriptor(considering rollback) corresponding to frame 2 will
* be serviced first. HS case is more probable because, in fact, up to
* 11 uframes (16 in the code) may be skipped.
*/
if (qh->dev_speed == USB_SPEED_HIGH) {
/*
* Consider uframe counter also, to start xfer asap. If half of
* the frame elapsed skip 2 frames otherwise just 1 frame.
* Starting descriptor index must be 8-aligned, so if the
* current frame is near to complete the next one is skipped as
* well.
*/
if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
*skip_frames = 2 * 8;
frame = dwc2_frame_num_inc(hsotg->frame_number,
*skip_frames);
} else {
*skip_frames = 1 * 8;
frame = dwc2_frame_num_inc(hsotg->frame_number,
*skip_frames);
}
frame = dwc2_full_frame_num(frame);
} else {
/*
* Two frames are skipped for FS - the current and the next.
* But for descriptor programming, 1 frame (descriptor) is
* enough, see example above.
*/
*skip_frames = 1;
frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
}
return frame;
}
/*
* Calculate initial descriptor index for isochronous transfer based on
* scheduled frame
*/
static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
u16 frame, fr_idx, fr_idx_tmp, skip_frames;
/*
* With current ISOC processing algorithm the channel is being released
* when no more QTDs in the list (qh->ntd == 0). Thus this function is
* called only when qh->ntd == 0 and qh->channel == 0.
*
* So qh->channel != NULL branch is not used and just not removed from
* the source file. It is required for another possible approach which
* is, do not disable and release the channel when ISOC session
* completed, just move QH to inactive schedule until new QTD arrives.
* On new QTD, the QH moved back to 'ready' schedule, starting frame and
* therefore starting desc_index are recalculated. In this case channel
* is released only on ep_disable.
*/
/*
* Calculate starting descriptor index. For INTERRUPT endpoint it is
* always 0.
*/
if (qh->channel) {
frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
/*
* Calculate initial descriptor index based on FrameList current
* bitmap and servicing period
*/
fr_idx_tmp = dwc2_frame_list_idx(frame);
fr_idx = (FRLISTEN_64_SIZE +
dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp)
% dwc2_frame_incr_val(qh);
fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
} else {
qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh,
&skip_frames);
fr_idx = dwc2_frame_list_idx(qh->sched_frame);
}
qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
return skip_frames;
}
#define ISOC_URB_GIVEBACK_ASAP
#define MAX_ISOC_XFER_SIZE_FS 1023
#define MAX_ISOC_XFER_SIZE_HS 3072
#define DESCNUM_THRESHOLD 4
static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u32 max_xfer_size,
u16 idx)
{
struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
struct dwc2_hcd_iso_packet_desc *frame_desc;
memset(dma_desc, 0, sizeof(*dma_desc));
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
if (frame_desc->length > max_xfer_size)
qh->n_bytes[idx] = max_xfer_size;
else
qh->n_bytes[idx] = frame_desc->length;
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
HOST_DMA_ISOC_NBYTES_MASK;
#ifdef ISOC_URB_GIVEBACK_ASAP
/* Set IOC for each descriptor corresponding to last frame of URB */
if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
dma_desc->status |= HOST_DMA_IOC;
#endif
qh->ntd++;
qtd->isoc_frame_index_last++;
}
static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh, u16 skip_frames)
{
struct dwc2_qtd *qtd;
u32 max_xfer_size;
u16 idx, inc, n_desc, ntd_max = 0;
idx = qh->td_last;
inc = qh->interval;
n_desc = 0;
if (qh->interval) {
ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
qh->interval;
if (skip_frames && !qh->channel)
ntd_max -= skip_frames / qh->interval;
}
max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
qtd->urb->packet_count) {
if (n_desc > 1)
qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
max_xfer_size, idx);
idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
n_desc++;
}
qtd->in_process = 1;
}
qh->td_last = idx;
#ifdef ISOC_URB_GIVEBACK_ASAP
/* Set IOC for last descriptor if descriptor list is full */
if (qh->ntd == ntd_max) {
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC;
}
#else
/*
* Set IOC bit only for one descriptor. Always try to be ahead of HW
* processing, i.e. on IOC generation driver activates next descriptor
* but core continues to process descriptors following the one with IOC
* set.
*/
if (n_desc > DESCNUM_THRESHOLD)
/*
* Move IOC "up". Required even if there is only one QTD
* in the list, because QTDs might continue to be queued,
* but during the activation it was only one queued.
* Actually more than one QTD might be in the list if this
* function called from XferCompletion - QTDs was queued during
* HW processing of the previous descriptor chunk.
*/
idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
qh->dev_speed);
else
/*
* Set the IOC for the latest descriptor if either number of
* descriptors is not greater than threshold or no more new
* descriptors activated
*/
idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
qh->desc_list[idx].status |= HOST_DMA_IOC;
#endif
if (n_desc) {
qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
if (n_desc > 1)
qh->desc_list[0].status |= HOST_DMA_A;
}
}
static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd, struct dwc2_qh *qh,
int n_desc)
{
struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
int len = chan->xfer_len;
if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
if (chan->ep_is_in) {
int num_packets;
if (len > 0 && chan->max_packet)
num_packets = (len + chan->max_packet - 1)
/ chan->max_packet;
else
/* Need 1 packet for transfer length of 0 */
num_packets = 1;
/* Always program an integral # of packets for IN transfers */
len = num_packets * chan->max_packet;
}
dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
qh->n_bytes[n_desc] = len;
if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
qtd->control_phase == DWC2_CONTROL_SETUP)
dma_desc->status |= HOST_DMA_SUP;
dma_desc->buf = (u32)chan->xfer_dma;
/*
* Last (or only) descriptor of IN transfer with actual size less
* than MaxPacket
*/
if (len > chan->xfer_len) {
chan->xfer_len = 0;
} else {
chan->xfer_dma += len;
chan->xfer_len -= len;
}
}
static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_qh *qh)
{
struct dwc2_qtd *qtd;
struct dwc2_host_chan *chan = qh->channel;
int n_desc = 0;
dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
(unsigned long)chan->xfer_dma, chan->xfer_len);
/*
* Start with chan->xfer_dma initialized in assign_and_init_hc(), then
* if SG transfer consists of multiple URBs, this pointer is re-assigned
* to the buffer of the currently processed QTD. For non-SG request
* there is always one QTD active.
*/
list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
if (n_desc) {
/* SG request - more than 1 QTD */
chan->xfer_dma = qtd->urb->dma +
qtd->urb->actual_length;
chan->xfer_len = qtd->urb->length -
qtd->urb->actual_length;
dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
(unsigned long)chan->xfer_dma, chan->xfer_len);
}
qtd->n_desc = 0;
do {
if (n_desc > 1) {
qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
dev_vdbg(hsotg->dev,
"set A bit in desc %d (%p)\n",
n_desc - 1,
&qh->desc_list[n_desc - 1]);
}
dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
dev_vdbg(hsotg->dev,
"desc %d (%p) buf=%08x status=%08x\n",
n_desc, &qh->desc_list[n_desc],
qh->desc_list[n_desc].buf,
qh->desc_list[n_desc].status);
qtd->n_desc++;
n_desc++;
} while (chan->xfer_len > 0 &&
n_desc != MAX_DMA_DESC_NUM_GENERIC);
dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
qtd->in_process = 1;
if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
break;
if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
break;
}
if (n_desc) {
qh->desc_list[n_desc - 1].status |=
HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
n_desc - 1, &qh->desc_list[n_desc - 1]);
if (n_desc > 1) {
qh->desc_list[0].status |= HOST_DMA_A;
dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
&qh->desc_list[0]);
}
chan->ntd = n_desc;
}
}
/**
* dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @qh: The QH to init
*
* Return: 0 if successful, negative error code otherwise
*
* For Control and Bulk endpoints, initializes descriptor list and starts the
* transfer. For Interrupt and Isochronous endpoints, initializes descriptor
* list then updates FrameList, marking appropriate entries as active.
*
* For Isochronous endpoints the starting descriptor index is calculated based
* on the scheduled frame, but only on the first transfer descriptor within a
* session. Then the transfer is started via enabling the channel.
*
* For Isochronous endpoints the channel is not halted on XferComplete
* interrupt so remains assigned to the endpoint(QH) until session is done.
*/
void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
/* Channel is already assigned */
struct dwc2_host_chan *chan = qh->channel;
u16 skip_frames = 0;
switch (chan->ep_type) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
dwc2_init_non_isoc_dma_desc(hsotg, qh);
dwc2_hc_start_transfer_ddma(hsotg, chan);
break;
case USB_ENDPOINT_XFER_INT:
dwc2_init_non_isoc_dma_desc(hsotg, qh);
dwc2_update_frame_list(hsotg, qh, 1);
dwc2_hc_start_transfer_ddma(hsotg, chan);
break;
case USB_ENDPOINT_XFER_ISOC:
if (!qh->ntd)
skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
if (!chan->xfer_started) {
dwc2_update_frame_list(hsotg, qh, 1);
/*
* Always set to max, instead of actual size. Otherwise
* ntd will be changed with channel being enabled. Not
* recommended.
*/
chan->ntd = dwc2_max_desc_num(qh);
/* Enable channel only once for ISOC */
dwc2_hc_start_transfer_ddma(hsotg, chan);
}
break;
default:
break;
}
}
#define DWC2_CMPL_DONE 1
#define DWC2_CMPL_STOP 2
static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd,
struct dwc2_qh *qh, u16 idx)
{
struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
if (chan->ep_is_in)
remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
HOST_DMA_ISOC_NBYTES_SHIFT;
if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
/*
* XactError, or unable to complete all the transactions
* in the scheduled micro-frame/frame, both indicated by
* HOST_DMA_STS_PKTERR
*/
qtd->urb->error_count++;
frame_desc->actual_length = qh->n_bytes[idx] - remain;
frame_desc->status = -EPROTO;
} else {
/* Success */
frame_desc->actual_length = qh->n_bytes[idx] - remain;
frame_desc->status = 0;
}
if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
/*
* urb->status is not used for isoc transfers here. The
* individual frame_desc status are used instead.
*/
dwc2_host_complete(hsotg, qtd, 0);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
/*
* This check is necessary because urb_dequeue can be called
* from urb complete callback (sound driver for example). All
* pending URBs are dequeued there, so no need for further
* processing.
*/
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
return -1;
rc = DWC2_CMPL_DONE;
}
qh->ntd--;
/* Stop if IOC requested descriptor reached */
if (dma_desc->status & HOST_DMA_IOC)
rc = DWC2_CMPL_STOP;
return rc;
}
static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
enum dwc2_halt_status halt_status)
{
struct dwc2_hcd_iso_packet_desc *frame_desc;
struct dwc2_qtd *qtd, *qtd_tmp;
struct dwc2_qh *qh;
u16 idx;
int rc;
qh = chan->qh;
idx = qh->td_first;
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
qtd->in_process = 0;
return;
}
if (halt_status == DWC2_HC_XFER_AHB_ERR ||
halt_status == DWC2_HC_XFER_BABBLE_ERR) {
/*
* Channel is halted in these error cases, considered as serious
* issues.
* Complete all URBs marking all frames as failed, irrespective
* whether some of the descriptors (frames) succeeded or not.
* Pass error code to completion routine as well, to update
* urb->status, some of class drivers might use it to stop
* queing transfer requests.
*/
int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
-EIO : -EOVERFLOW;
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry) {
if (qtd->urb) {
for (idx = 0; idx < qtd->urb->packet_count;
idx++) {
frame_desc = &qtd->urb->iso_descs[idx];
frame_desc->status = err;
}
dwc2_host_complete(hsotg, qtd, err);
}
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
}
return;
}
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
if (!qtd->in_process)
break;
do {
rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
idx);
if (rc < 0)
return;
idx = dwc2_desclist_idx_inc(idx, qh->interval,
chan->speed);
if (rc == DWC2_CMPL_STOP)
goto stop_scan;
if (rc == DWC2_CMPL_DONE)
break;
} while (idx != qh->td_first);
}
stop_scan:
qh->td_first = idx;
}
static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
struct dwc2_qtd *qtd,
struct dwc2_hcd_dma_desc *dma_desc,
enum dwc2_halt_status halt_status,
u32 n_bytes, int *xfer_done)
{
struct dwc2_hcd_urb *urb = qtd->urb;
u16 remain = 0;
if (chan->ep_is_in)
remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
HOST_DMA_NBYTES_SHIFT;
dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
if (halt_status == DWC2_HC_XFER_AHB_ERR) {
dev_err(hsotg->dev, "EIO\n");
urb->status = -EIO;
return 1;
}
if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
switch (halt_status) {
case DWC2_HC_XFER_STALL:
dev_vdbg(hsotg->dev, "Stall\n");
urb->status = -EPIPE;
break;
case DWC2_HC_XFER_BABBLE_ERR:
dev_err(hsotg->dev, "Babble\n");
urb->status = -EOVERFLOW;
break;
case DWC2_HC_XFER_XACT_ERR:
dev_err(hsotg->dev, "XactErr\n");
urb->status = -EPROTO;
break;
default:
dev_err(hsotg->dev,
"%s: Unhandled descriptor error status (%d)\n",
__func__, halt_status);
break;
}
return 1;
}
if (dma_desc->status & HOST_DMA_A) {
dev_vdbg(hsotg->dev,
"Active descriptor encountered on channel %d\n",
chan->hc_num);
return 0;
}
if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
if (qtd->control_phase == DWC2_CONTROL_DATA) {
urb->actual_length += n_bytes - remain;
if (remain || urb->actual_length >= urb->length) {
/*
* For Control Data stage do not set urb->status
* to 0, to prevent URB callback. Set it when
* Status phase is done. See below.
*/
*xfer_done = 1;
}
} else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
urb->status = 0;
*xfer_done = 1;
}
/* No handling for SETUP stage */
} else {
/* BULK and INTR */
urb->actual_length += n_bytes - remain;
dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
urb->actual_length);
if (remain || urb->actual_length >= urb->length) {
urb->status = 0;
*xfer_done = 1;
}
}
return 0;
}
static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
int chnum, struct dwc2_qtd *qtd,
int desc_num,
enum dwc2_halt_status halt_status,
int *xfer_done)
{
struct dwc2_qh *qh = chan->qh;
struct dwc2_hcd_urb *urb = qtd->urb;
struct dwc2_hcd_dma_desc *dma_desc;
u32 n_bytes;
int failed;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (!urb)
return -EINVAL;
dma_desc = &qh->desc_list[desc_num];
n_bytes = qh->n_bytes[desc_num];
dev_vdbg(hsotg->dev,
"qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
qtd, urb, desc_num, dma_desc, n_bytes);
failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
halt_status, n_bytes,
xfer_done);
if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
dwc2_host_complete(hsotg, qtd, urb->status);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
failed, *xfer_done, urb->status);
return failed;
}
if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
switch (qtd->control_phase) {
case DWC2_CONTROL_SETUP:
if (urb->length > 0)
qtd->control_phase = DWC2_CONTROL_DATA;
else
qtd->control_phase = DWC2_CONTROL_STATUS;
dev_vdbg(hsotg->dev,
" Control setup transaction done\n");
break;
case DWC2_CONTROL_DATA:
if (*xfer_done) {
qtd->control_phase = DWC2_CONTROL_STATUS;
dev_vdbg(hsotg->dev,
" Control data transfer done\n");
} else if (desc_num + 1 == qtd->n_desc) {
/*
* Last descriptor for Control data stage which
* is not completed yet
*/
dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
qtd);
}
break;
default:
break;
}
}
return 0;
}
static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan,
int chnum,
enum dwc2_halt_status halt_status)
{
struct list_head *qtd_item, *qtd_tmp;
struct dwc2_qh *qh = chan->qh;
struct dwc2_qtd *qtd = NULL;
int xfer_done;
int desc_num = 0;
if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
qtd->in_process = 0;
return;
}
list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
int i;
qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
xfer_done = 0;
for (i = 0; i < qtd->n_desc; i++) {
if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
&xfer_done)) {
qtd = NULL;
break;
}
desc_num++;
}
}
if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
/*
* Resetting the data toggle for bulk and interrupt endpoints
* in case of stall. See handle_hc_stall_intr().
*/
if (halt_status == DWC2_HC_XFER_STALL)
qh->data_toggle = DWC2_HC_PID_DATA0;
else if (qtd)
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
}
if (halt_status == DWC2_HC_XFER_COMPLETE) {
if (chan->hcint & HCINTMSK_NYET) {
/*
* Got a NYET on the last transaction of the transfer.
* It means that the endpoint should be in the PING
* state at the beginning of the next transfer.
*/
qh->ping_state = 1;
}
}
}
/**
* dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
* status and calls completion routine for the URB if it's done. Called from
* interrupt handlers.
*
* @hsotg: The HCD state structure for the DWC OTG controller
* @chan: Host channel the transfer is completed on
* @chnum: Index of Host channel registers
* @halt_status: Reason the channel is being halted or just XferComplete
* for isochronous transfers
*
* Releases the channel to be used by other transfers.
* In case of Isochronous endpoint the channel is not halted until the end of
* the session, i.e. QTD list is empty.
* If periodic channel released the FrameList is updated accordingly.
* Calls transaction selection routines to activate pending transfers.
*/
void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
struct dwc2_host_chan *chan, int chnum,
enum dwc2_halt_status halt_status)
{
struct dwc2_qh *qh = chan->qh;
int continue_isoc_xfer = 0;
enum dwc2_transaction_type tr_type;
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
/* Release the channel if halted or session completed */
if (halt_status != DWC2_HC_XFER_COMPLETE ||
list_empty(&qh->qtd_list)) {
/* Halt the channel if session completed */
if (halt_status == DWC2_HC_XFER_COMPLETE)
dwc2_hc_halt(hsotg, chan, halt_status);
dwc2_release_channel_ddma(hsotg, qh);
dwc2_hcd_qh_unlink(hsotg, qh);
} else {
/* Keep in assigned schedule to continue transfer */
list_move(&qh->qh_list_entry,
&hsotg->periodic_sched_assigned);
continue_isoc_xfer = 1;
}
/*
* Todo: Consider the case when period exceeds FrameList size.
* Frame Rollover interrupt should be used.
*/
} else {
/*
* Scan descriptor list to complete the URB(s), then release
* the channel
*/
dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
halt_status);
dwc2_release_channel_ddma(hsotg, qh);
dwc2_hcd_qh_unlink(hsotg, qh);
if (!list_empty(&qh->qtd_list)) {
/*
* Add back to inactive non-periodic schedule on normal
* completion
*/
dwc2_hcd_qh_add(hsotg, qh);
}
}
tr_type = dwc2_hcd_select_transactions(hsotg);
if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
if (continue_isoc_xfer) {
if (tr_type == DWC2_TRANSACTION_NONE)
tr_type = DWC2_TRANSACTION_PERIODIC;
else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
tr_type = DWC2_TRANSACTION_ALL;
}
dwc2_hcd_queue_transactions(hsotg, tr_type);
}
}
| gpl-2.0 |
NamelessRom/android_kernel_samsung_aries | fs/xfs/xfs_attr_leaf.c | 1850 | 86141 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_trace.h"
/*
* xfs_attr_leaf.c
*
* Routines to implement leaf blocks of attributes as Btrees of hashed names.
*/
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
/*
* Routines used for growing the Btree.
*/
STATIC int xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t which_block,
xfs_dabuf_t **bpp);
STATIC int xfs_attr_leaf_add_work(xfs_dabuf_t *leaf_buffer, xfs_da_args_t *args,
int freemap_index);
STATIC void xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *leaf_buffer);
STATIC void xfs_attr_leaf_rebalance(xfs_da_state_t *state,
xfs_da_state_blk_t *blk1,
xfs_da_state_blk_t *blk2);
STATIC int xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
xfs_da_state_blk_t *leaf_blk_1,
xfs_da_state_blk_t *leaf_blk_2,
int *number_entries_in_blk1,
int *number_usedbytes_in_blk1);
/*
* Routines used for shrinking the Btree.
*/
STATIC int xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dabuf_t *bp, int level);
STATIC int xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dabuf_t *bp);
STATIC int xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dablk_t blkno, int blkcnt);
/*
* Utility routines.
*/
STATIC void xfs_attr_leaf_moveents(xfs_attr_leafblock_t *src_leaf,
int src_start,
xfs_attr_leafblock_t *dst_leaf,
int dst_start, int move_count,
xfs_mount_t *mp);
STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
/*========================================================================
* Namespace helper routines
*========================================================================*/
/*
* If namespace bits don't match return 0.
* If all match then return 1.
*/
STATIC int
xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
{
return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
}
/*========================================================================
* External routines when attribute fork size < XFS_LITINO(mp).
*========================================================================*/
/*
* Query whether the requested number of additional bytes of extended
* attribute space will be able to fit inline.
*
* Returns zero if not, else the di_forkoff fork offset to be used in the
* literal area for attribute data once the new bytes have been added.
*
* di_forkoff must be 8 byte aligned, hence is stored as a >>3 value;
* special case for dev/uuid inodes, they have fixed size data forks.
*/
int
xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
{
int offset;
int minforkoff; /* lower limit on valid forkoff locations */
int maxforkoff; /* upper limit on valid forkoff locations */
int dsize;
xfs_mount_t *mp = dp->i_mount;
offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
switch (dp->i_d.di_format) {
case XFS_DINODE_FMT_DEV:
minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
return (offset >= minforkoff) ? minforkoff : 0;
case XFS_DINODE_FMT_UUID:
minforkoff = roundup(sizeof(uuid_t), 8) >> 3;
return (offset >= minforkoff) ? minforkoff : 0;
}
/*
* If the requested numbers of bytes is smaller or equal to the
* current attribute fork size we can always proceed.
*
* Note that if_bytes in the data fork might actually be larger than
* the current data fork size is due to delalloc extents. In that
* case either the extent count will go down when they are converted
* to real extents, or the delalloc conversion will take care of the
* literal area rebalancing.
*/
if (bytes <= XFS_IFORK_ASIZE(dp))
return dp->i_d.di_forkoff;
/*
* For attr2 we can try to move the forkoff if there is space in the
* literal area, but for the old format we are done if there is no
* space in the fixed attribute fork.
*/
if (!(mp->m_flags & XFS_MOUNT_ATTR2))
return 0;
dsize = dp->i_df.if_bytes;
switch (dp->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
/*
* If there is no attr fork and the data fork is extents,
* determine if creating the default attr fork will result
* in the extents form migrating to btree. If so, the
* minimum offset only needs to be the space required for
* the btree root.
*/
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
xfs_default_attroffset(dp))
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
break;
case XFS_DINODE_FMT_BTREE:
/*
* If we have a data btree then keep forkoff if we have one,
* otherwise we are adding a new attr, so then we set
* minforkoff to where the btree root can finish so we have
* plenty of room for attrs
*/
if (dp->i_d.di_forkoff) {
if (offset < dp->i_d.di_forkoff)
return 0;
return dp->i_d.di_forkoff;
}
dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
break;
}
/*
* A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */
maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
if (offset >= maxforkoff)
return maxforkoff;
if (offset >= minforkoff)
return offset;
return 0;
}
/*
* Switch on the ATTR2 superblock bit (implies also FEATURES2)
*/
STATIC void
xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
{
if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
!(xfs_sb_version_hasattr2(&mp->m_sb))) {
spin_lock(&mp->m_sb_lock);
if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
xfs_sb_version_addattr2(&mp->m_sb);
spin_unlock(&mp->m_sb_lock);
xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
} else
spin_unlock(&mp->m_sb_lock);
}
}
/*
* Create the initial contents of a shortform attribute list.
*/
void
xfs_attr_shortform_create(xfs_da_args_t *args)
{
xfs_attr_sf_hdr_t *hdr;
xfs_inode_t *dp;
xfs_ifork_t *ifp;
dp = args->dp;
ASSERT(dp != NULL);
ifp = dp->i_afp;
ASSERT(ifp != NULL);
ASSERT(ifp->if_bytes == 0);
if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) {
ifp->if_flags &= ~XFS_IFEXTENTS; /* just in case */
dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL;
ifp->if_flags |= XFS_IFINLINE;
} else {
ASSERT(ifp->if_flags & XFS_IFINLINE);
}
xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
hdr->count = 0;
hdr->totsize = cpu_to_be16(sizeof(*hdr));
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
/*
* Add a name/value pair to the shortform attribute list.
* Overflow from the inode has already been checked for.
*/
void
xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i, offset, size;
xfs_mount_t *mp;
xfs_inode_t *dp;
xfs_ifork_t *ifp;
dp = args->dp;
mp = dp->i_mount;
dp->i_d.di_forkoff = forkoff;
dp->i_df.if_ext_max =
XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
dp->i_afp->if_ext_max =
XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
ifp = dp->i_afp;
ASSERT(ifp->if_flags & XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
#ifdef DEBUG
if (sfe->namelen != args->namelen)
continue;
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
ASSERT(0);
#endif
}
offset = (char *)sfe - (char *)sf;
size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen);
xfs_idata_realloc(dp, size, XFS_ATTR_FORK);
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset);
sfe->namelen = args->namelen;
sfe->valuelen = args->valuelen;
sfe->flags = XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
memcpy(sfe->nameval, args->name, args->namelen);
memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
sf->hdr.count++;
be16_add_cpu(&sf->hdr.totsize, size);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
xfs_sbversion_add_attr2(mp, args->trans);
}
/*
* After the last attribute is removed revert to original inode format,
* making all literal area available to the data fork once more.
*/
STATIC void
xfs_attr_fork_reset(
struct xfs_inode *ip,
struct xfs_trans *tp)
{
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
ip->i_d.di_forkoff = 0;
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
ASSERT(ip->i_d.di_anextents == 0);
ASSERT(ip->i_afp == NULL);
ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
/*
* Remove an attribute from the shortform attribute list structure.
*/
int
xfs_attr_shortform_remove(xfs_da_args_t *args)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int base, size=0, end, totsize, i;
xfs_mount_t *mp;
xfs_inode_t *dp;
dp = args->dp;
mp = dp->i_mount;
base = sizeof(xfs_attr_sf_hdr_t);
sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
sfe = &sf->list[0];
end = sf->hdr.count;
for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
base += size, i++) {
size = XFS_ATTR_SF_ENTSIZE(sfe);
if (sfe->namelen != args->namelen)
continue;
if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
break;
}
if (i == end)
return(XFS_ERROR(ENOATTR));
/*
* Fix up the attribute fork data, covering the hole
*/
end = base + size;
totsize = be16_to_cpu(sf->hdr.totsize);
if (end != totsize)
memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end);
sf->hdr.count--;
be16_add_cpu(&sf->hdr.totsize, -size);
/*
* Fix up the start offset of the attribute fork
*/
totsize -= size;
if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
(mp->m_flags & XFS_MOUNT_ATTR2) &&
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
!(args->op_flags & XFS_DA_OP_ADDNAME)) {
xfs_attr_fork_reset(dp, args->trans);
} else {
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
ASSERT(dp->i_d.di_forkoff);
ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
(args->op_flags & XFS_DA_OP_ADDNAME) ||
!(mp->m_flags & XFS_MOUNT_ATTR2) ||
dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
dp->i_afp->if_ext_max =
XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
dp->i_df.if_ext_max =
XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
xfs_trans_log_inode(args->trans, dp,
XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
xfs_sbversion_add_attr2(mp, args->trans);
return(0);
}
/*
* Look up a name in a shortform attribute list structure.
*/
/*ARGSUSED*/
int
xfs_attr_shortform_lookup(xfs_da_args_t *args)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i;
xfs_ifork_t *ifp;
ifp = args->dp->i_afp;
ASSERT(ifp->if_flags & XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
if (sfe->namelen != args->namelen)
continue;
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
return(XFS_ERROR(EEXIST));
}
return(XFS_ERROR(ENOATTR));
}
/*
* Look up a name in a shortform attribute list structure.
*/
/*ARGSUSED*/
int
xfs_attr_shortform_getvalue(xfs_da_args_t *args)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i;
ASSERT(args->dp->i_d.di_aformat == XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
if (sfe->namelen != args->namelen)
continue;
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = sfe->valuelen;
return(XFS_ERROR(EEXIST));
}
if (args->valuelen < sfe->valuelen) {
args->valuelen = sfe->valuelen;
return(XFS_ERROR(ERANGE));
}
args->valuelen = sfe->valuelen;
memcpy(args->value, &sfe->nameval[args->namelen],
args->valuelen);
return(XFS_ERROR(EEXIST));
}
return(XFS_ERROR(ENOATTR));
}
/*
* Convert from using the shortform to the leaf.
*/
int
xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
{
xfs_inode_t *dp;
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
xfs_da_args_t nargs;
char *tmpbuffer;
int error, i, size;
xfs_dablk_t blkno;
xfs_dabuf_t *bp;
xfs_ifork_t *ifp;
dp = args->dp;
ifp = dp->i_afp;
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
size = be16_to_cpu(sf->hdr.totsize);
tmpbuffer = kmem_alloc(size, KM_SLEEP);
ASSERT(tmpbuffer != NULL);
memcpy(tmpbuffer, ifp->if_u1.if_data, size);
sf = (xfs_attr_shortform_t *)tmpbuffer;
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
bp = NULL;
error = xfs_da_grow_inode(args, &blkno);
if (error) {
/*
* If we hit an IO error middle of the transaction inside
* grow_inode(), we may have inconsistent data. Bail out.
*/
if (error == EIO)
goto out;
xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
goto out;
}
ASSERT(blkno == 0);
error = xfs_attr_leaf_create(args, blkno, &bp);
if (error) {
error = xfs_da_shrink_inode(args, 0, bp);
bp = NULL;
if (error)
goto out;
xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
goto out;
}
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
nargs.firstblock = args->firstblock;
nargs.flist = args->flist;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
nargs.op_flags = XFS_DA_OP_OKNOENT;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; i++) {
nargs.name = sfe->nameval;
nargs.namelen = sfe->namelen;
nargs.value = &sfe->nameval[nargs.namelen];
nargs.valuelen = sfe->valuelen;
nargs.hashval = xfs_da_hashname(sfe->nameval,
sfe->namelen);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */
ASSERT(error == ENOATTR);
error = xfs_attr_leaf_add(bp, &nargs);
ASSERT(error != ENOSPC);
if (error)
goto out;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
error = 0;
out:
if(bp)
xfs_da_buf_done(bp);
kmem_free(tmpbuffer);
return(error);
}
STATIC int
xfs_attr_shortform_compare(const void *a, const void *b)
{
xfs_attr_sf_sort_t *sa, *sb;
sa = (xfs_attr_sf_sort_t *)a;
sb = (xfs_attr_sf_sort_t *)b;
if (sa->hash < sb->hash) {
return(-1);
} else if (sa->hash > sb->hash) {
return(1);
} else {
return(sa->entno - sb->entno);
}
}
#define XFS_ISRESET_CURSOR(cursor) \
(!((cursor)->initted) && !((cursor)->hashval) && \
!((cursor)->blkno) && !((cursor)->offset))
/*
* Copy out entries of shortform attribute lists for attr_list().
* Shortform attribute lists are not stored in hashval sorted order.
* If the output buffer is not large enough to hold them all, then we
* we have to calculate each entries' hashvalue and sort them before
* we can begin returning them to the user.
*/
/*ARGSUSED*/
int
xfs_attr_shortform_list(xfs_attr_list_context_t *context)
{
attrlist_cursor_kern_t *cursor;
xfs_attr_sf_sort_t *sbuf, *sbp;
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
xfs_inode_t *dp;
int sbsize, nsbuf, count, i;
int error;
ASSERT(context != NULL);
dp = context->dp;
ASSERT(dp != NULL);
ASSERT(dp->i_afp != NULL);
sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
ASSERT(sf != NULL);
if (!sf->hdr.count)
return(0);
cursor = context->cursor;
ASSERT(cursor != NULL);
trace_xfs_attr_list_sf(context);
/*
* If the buffer is large enough and the cursor is at the start,
* do not bother with sorting since we will return everything in
* one buffer and another call using the cursor won't need to be
* made.
* Note the generous fudge factor of 16 overhead bytes per entry.
* If bufsize is zero then put_listent must be a search function
* and can just scan through what we have.
*/
if (context->bufsize == 0 ||
(XFS_ISRESET_CURSOR(cursor) &&
(dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) {
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
error = context->put_listent(context,
sfe->flags,
sfe->nameval,
(int)sfe->namelen,
(int)sfe->valuelen,
&sfe->nameval[sfe->namelen]);
/*
* Either search callback finished early or
* didn't fit it all in the buffer after all.
*/
if (context->seen_enough)
break;
if (error)
return error;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
trace_xfs_attr_list_sf_all(context);
return(0);
}
/* do no more for a search callback */
if (context->bufsize == 0)
return 0;
/*
* It didn't all fit, so we have to sort everything on hashval.
*/
sbsize = sf->hdr.count * sizeof(*sbuf);
sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
/*
* Scan the attribute list for the rest of the entries, storing
* the relevant info from only those that match into a buffer.
*/
nsbuf = 0;
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
if (unlikely(
((char *)sfe < (char *)sf) ||
((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) {
XFS_CORRUPTION_ERROR("xfs_attr_shortform_list",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, sfe);
kmem_free(sbuf);
return XFS_ERROR(EFSCORRUPTED);
}
sbp->entno = i;
sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
sbp->name = sfe->nameval;
sbp->namelen = sfe->namelen;
/* These are bytes, and both on-disk, don't endian-flip */
sbp->valuelen = sfe->valuelen;
sbp->flags = sfe->flags;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
sbp++;
nsbuf++;
}
/*
* Sort the entries on hash then entno.
*/
xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare);
/*
* Re-find our place IN THE SORTED LIST.
*/
count = 0;
cursor->initted = 1;
cursor->blkno = 0;
for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) {
if (sbp->hash == cursor->hashval) {
if (cursor->offset == count) {
break;
}
count++;
} else if (sbp->hash > cursor->hashval) {
break;
}
}
if (i == nsbuf) {
kmem_free(sbuf);
return(0);
}
/*
* Loop putting entries into the user buffer.
*/
for ( ; i < nsbuf; i++, sbp++) {
if (cursor->hashval != sbp->hash) {
cursor->hashval = sbp->hash;
cursor->offset = 0;
}
error = context->put_listent(context,
sbp->flags,
sbp->name,
sbp->namelen,
sbp->valuelen,
&sbp->name[sbp->namelen]);
if (error)
return error;
if (context->seen_enough)
break;
cursor->offset++;
}
kmem_free(sbuf);
return(0);
}
/*
* Check a leaf attribute block to see if all the entries would fit into
* a shortform attribute list.
*/
int
xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_local_t *name_loc;
int bytes, i;
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
entry = &leaf->entries[0];
bytes = sizeof(struct xfs_attr_sf_hdr);
for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */
if (!(entry->flags & XFS_ATTR_LOCAL))
return(0);
name_loc = xfs_attr_leaf_name_local(leaf, i);
if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
return(0);
if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
return(0);
bytes += sizeof(struct xfs_attr_sf_entry)-1
+ name_loc->namelen
+ be16_to_cpu(name_loc->valuelen);
}
if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
(bytes == sizeof(struct xfs_attr_sf_hdr)))
return(-1);
return(xfs_attr_shortform_bytesfit(dp, bytes));
}
/*
* Convert a leaf attribute list to shortform attribute list
*/
int
xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_local_t *name_loc;
xfs_da_args_t nargs;
xfs_inode_t *dp;
char *tmpbuffer;
int error, i;
dp = args->dp;
tmpbuffer = kmem_alloc(XFS_LBSIZE(dp->i_mount), KM_SLEEP);
ASSERT(tmpbuffer != NULL);
ASSERT(bp != NULL);
memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
/*
* Clean out the prior contents of the attribute list.
*/
error = xfs_da_shrink_inode(args, 0, bp);
if (error)
goto out;
if (forkoff == -1) {
ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
xfs_attr_fork_reset(dp, args->trans);
goto out;
}
xfs_attr_shortform_create(args);
/*
* Copy the attributes
*/
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
nargs.firstblock = args->firstblock;
nargs.flist = args->flist;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
nargs.op_flags = XFS_DA_OP_OKNOENT;
entry = &leaf->entries[0];
for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */
if (!entry->nameidx)
continue;
ASSERT(entry->flags & XFS_ATTR_LOCAL);
name_loc = xfs_attr_leaf_name_local(leaf, i);
nargs.name = name_loc->nameval;
nargs.namelen = name_loc->namelen;
nargs.value = &name_loc->nameval[nargs.namelen];
nargs.valuelen = be16_to_cpu(name_loc->valuelen);
nargs.hashval = be32_to_cpu(entry->hashval);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
xfs_attr_shortform_add(&nargs, forkoff);
}
error = 0;
out:
kmem_free(tmpbuffer);
return(error);
}
/*
* Convert from using a single leaf to a root node and a leaf.
*/
int
xfs_attr_leaf_to_node(xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf;
xfs_da_intnode_t *node;
xfs_inode_t *dp;
xfs_dabuf_t *bp1, *bp2;
xfs_dablk_t blkno;
int error;
dp = args->dp;
bp1 = bp2 = NULL;
error = xfs_da_grow_inode(args, &blkno);
if (error)
goto out;
error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp1,
XFS_ATTR_FORK);
if (error)
goto out;
ASSERT(bp1 != NULL);
bp2 = NULL;
error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp2,
XFS_ATTR_FORK);
if (error)
goto out;
ASSERT(bp2 != NULL);
memcpy(bp2->data, bp1->data, XFS_LBSIZE(dp->i_mount));
xfs_da_buf_done(bp1);
bp1 = NULL;
xfs_da_log_buf(args->trans, bp2, 0, XFS_LBSIZE(dp->i_mount) - 1);
/*
* Set up the new root node.
*/
error = xfs_da_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
if (error)
goto out;
node = bp1->data;
leaf = bp2->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
/* both on-disk, don't endian-flip twice */
node->btree[0].hashval =
leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval;
node->btree[0].before = cpu_to_be32(blkno);
node->hdr.count = cpu_to_be16(1);
xfs_da_log_buf(args->trans, bp1, 0, XFS_LBSIZE(dp->i_mount) - 1);
error = 0;
out:
if (bp1)
xfs_da_buf_done(bp1);
if (bp2)
xfs_da_buf_done(bp2);
return(error);
}
/*========================================================================
* Routines used for growing the Btree.
*========================================================================*/
/*
* Create the initial contents of a leaf attribute list
* or a leaf in a node attribute list.
*/
STATIC int
xfs_attr_leaf_create(xfs_da_args_t *args, xfs_dablk_t blkno, xfs_dabuf_t **bpp)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_hdr_t *hdr;
xfs_inode_t *dp;
xfs_dabuf_t *bp;
int error;
dp = args->dp;
ASSERT(dp != NULL);
error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp,
XFS_ATTR_FORK);
if (error)
return(error);
ASSERT(bp != NULL);
leaf = bp->data;
memset((char *)leaf, 0, XFS_LBSIZE(dp->i_mount));
hdr = &leaf->hdr;
hdr->info.magic = cpu_to_be16(XFS_ATTR_LEAF_MAGIC);
hdr->firstused = cpu_to_be16(XFS_LBSIZE(dp->i_mount));
if (!hdr->firstused) {
hdr->firstused = cpu_to_be16(
XFS_LBSIZE(dp->i_mount) - XFS_ATTR_LEAF_NAME_ALIGN);
}
hdr->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
hdr->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr->firstused) -
sizeof(xfs_attr_leaf_hdr_t));
xfs_da_log_buf(args->trans, bp, 0, XFS_LBSIZE(dp->i_mount) - 1);
*bpp = bp;
return(0);
}
/*
* Split the leaf node, rebalance, then add the new entry.
*/
int
xfs_attr_leaf_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
xfs_da_state_blk_t *newblk)
{
xfs_dablk_t blkno;
int error;
/*
* Allocate space for a new leaf node.
*/
ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC);
error = xfs_da_grow_inode(state->args, &blkno);
if (error)
return(error);
error = xfs_attr_leaf_create(state->args, blkno, &newblk->bp);
if (error)
return(error);
newblk->blkno = blkno;
newblk->magic = XFS_ATTR_LEAF_MAGIC;
/*
* Rebalance the entries across the two leaves.
* NOTE: rebalance() currently depends on the 2nd block being empty.
*/
xfs_attr_leaf_rebalance(state, oldblk, newblk);
error = xfs_da_blk_link(state, oldblk, newblk);
if (error)
return(error);
/*
* Save info on "old" attribute for "atomic rename" ops, leaf_add()
* modifies the index/blkno/rmtblk/rmtblkcnt fields to show the
* "new" attrs info. Will need the "old" info to remove it later.
*
* Insert the "new" entry in the correct block.
*/
if (state->inleaf)
error = xfs_attr_leaf_add(oldblk->bp, state->args);
else
error = xfs_attr_leaf_add(newblk->bp, state->args);
/*
* Update last hashval in each block since we added the name.
*/
oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL);
newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL);
return(error);
}
/*
* Add a name to the leaf attribute list structure.
*/
int
xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_hdr_t *hdr;
xfs_attr_leaf_map_t *map;
int tablesize, entsize, sum, tmp, i;
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT((args->index >= 0)
&& (args->index <= be16_to_cpu(leaf->hdr.count)));
hdr = &leaf->hdr;
entsize = xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
args->trans->t_mountp->m_sb.sb_blocksize, NULL);
/*
* Search through freemap for first-fit on new name length.
* (may need to figure in size of entry struct too)
*/
tablesize = (be16_to_cpu(hdr->count) + 1)
* sizeof(xfs_attr_leaf_entry_t)
+ sizeof(xfs_attr_leaf_hdr_t);
map = &hdr->freemap[XFS_ATTR_LEAF_MAPSIZE-1];
for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE-1; i >= 0; map--, i--) {
if (tablesize > be16_to_cpu(hdr->firstused)) {
sum += be16_to_cpu(map->size);
continue;
}
if (!map->size)
continue; /* no space in this map */
tmp = entsize;
if (be16_to_cpu(map->base) < be16_to_cpu(hdr->firstused))
tmp += sizeof(xfs_attr_leaf_entry_t);
if (be16_to_cpu(map->size) >= tmp) {
tmp = xfs_attr_leaf_add_work(bp, args, i);
return(tmp);
}
sum += be16_to_cpu(map->size);
}
/*
* If there are no holes in the address space of the block,
* and we don't have enough freespace, then compaction will do us
* no good and we should just give up.
*/
if (!hdr->holes && (sum < entsize))
return(XFS_ERROR(ENOSPC));
/*
* Compact the entries to coalesce free space.
* This may change the hdr->count via dropping INCOMPLETE entries.
*/
xfs_attr_leaf_compact(args->trans, bp);
/*
* After compaction, the block is guaranteed to have only one
* free region, in freemap[0]. If it is not big enough, give up.
*/
if (be16_to_cpu(hdr->freemap[0].size)
< (entsize + sizeof(xfs_attr_leaf_entry_t)))
return(XFS_ERROR(ENOSPC));
return(xfs_attr_leaf_add_work(bp, args, 0));
}
/*
* Add a name to a leaf attribute list structure.
*/
STATIC int
xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_hdr_t *hdr;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_local_t *name_loc;
xfs_attr_leaf_name_remote_t *name_rmt;
xfs_attr_leaf_map_t *map;
xfs_mount_t *mp;
int tmp, i;
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
hdr = &leaf->hdr;
ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count)));
/*
* Force open some space in the entry array and fill it in.
*/
entry = &leaf->entries[args->index];
if (args->index < be16_to_cpu(hdr->count)) {
tmp = be16_to_cpu(hdr->count) - args->index;
tmp *= sizeof(xfs_attr_leaf_entry_t);
memmove((char *)(entry+1), (char *)entry, tmp);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
}
be16_add_cpu(&hdr->count, 1);
/*
* Allocate space for the new string (at the end of the run).
*/
map = &hdr->freemap[mapindex];
mp = args->trans->t_mountp;
ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
ASSERT((be16_to_cpu(map->base) & 0x3) == 0);
ASSERT(be16_to_cpu(map->size) >=
xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
mp->m_sb.sb_blocksize, NULL));
ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
ASSERT((be16_to_cpu(map->size) & 0x3) == 0);
be16_add_cpu(&map->size,
-xfs_attr_leaf_newentsize(args->namelen, args->valuelen,
mp->m_sb.sb_blocksize, &tmp));
entry->nameidx = cpu_to_be16(be16_to_cpu(map->base) +
be16_to_cpu(map->size));
entry->hashval = cpu_to_be32(args->hashval);
entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
if (args->op_flags & XFS_DA_OP_RENAME) {
entry->flags |= XFS_ATTR_INCOMPLETE;
if ((args->blkno2 == args->blkno) &&
(args->index2 <= args->index)) {
args->index2++;
}
}
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
ASSERT((args->index == 0) ||
(be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
ASSERT((args->index == be16_to_cpu(hdr->count)-1) ||
(be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval)));
/*
* Copy the attribute name and value into the new space.
*
* For "remote" attribute values, simply note that we need to
* allocate space for the "remote" value. We can't actually
* allocate the extents in this transaction, and we can't decide
* which blocks they should be as we might allocate more blocks
* as part of this transaction (a split operation for example).
*/
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr_leaf_name_local(leaf, args->index);
name_loc->namelen = args->namelen;
name_loc->valuelen = cpu_to_be16(args->valuelen);
memcpy((char *)name_loc->nameval, args->name, args->namelen);
memcpy((char *)&name_loc->nameval[args->namelen], args->value,
be16_to_cpu(name_loc->valuelen));
} else {
name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
name_rmt->namelen = args->namelen;
memcpy((char *)name_rmt->name, args->name, args->namelen);
entry->flags |= XFS_ATTR_INCOMPLETE;
/* just in case */
name_rmt->valuelen = 0;
name_rmt->valueblk = 0;
args->rmtblkno = 1;
args->rmtblkcnt = XFS_B_TO_FSB(mp, args->valuelen);
}
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
xfs_attr_leaf_entsize(leaf, args->index)));
/*
* Update the control info for this leaf node
*/
if (be16_to_cpu(entry->nameidx) < be16_to_cpu(hdr->firstused)) {
/* both on-disk, don't endian-flip twice */
hdr->firstused = entry->nameidx;
}
ASSERT(be16_to_cpu(hdr->firstused) >=
((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));
tmp = (be16_to_cpu(hdr->count)-1) * sizeof(xfs_attr_leaf_entry_t)
+ sizeof(xfs_attr_leaf_hdr_t);
map = &hdr->freemap[0];
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
if (be16_to_cpu(map->base) == tmp) {
be16_add_cpu(&map->base, sizeof(xfs_attr_leaf_entry_t));
be16_add_cpu(&map->size,
-((int)sizeof(xfs_attr_leaf_entry_t)));
}
}
be16_add_cpu(&hdr->usedbytes, xfs_attr_leaf_entsize(leaf, args->index));
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
return(0);
}
/*
* Garbage collect a leaf attribute list block by copying it to a new buffer.
*/
STATIC void
xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
{
xfs_attr_leafblock_t *leaf_s, *leaf_d;
xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
xfs_mount_t *mp;
char *tmpbuffer;
mp = trans->t_mountp;
tmpbuffer = kmem_alloc(XFS_LBSIZE(mp), KM_SLEEP);
ASSERT(tmpbuffer != NULL);
memcpy(tmpbuffer, bp->data, XFS_LBSIZE(mp));
memset(bp->data, 0, XFS_LBSIZE(mp));
/*
* Copy basic information
*/
leaf_s = (xfs_attr_leafblock_t *)tmpbuffer;
leaf_d = bp->data;
hdr_s = &leaf_s->hdr;
hdr_d = &leaf_d->hdr;
hdr_d->info = hdr_s->info; /* struct copy */
hdr_d->firstused = cpu_to_be16(XFS_LBSIZE(mp));
/* handle truncation gracefully */
if (!hdr_d->firstused) {
hdr_d->firstused = cpu_to_be16(
XFS_LBSIZE(mp) - XFS_ATTR_LEAF_NAME_ALIGN);
}
hdr_d->usedbytes = 0;
hdr_d->count = 0;
hdr_d->holes = 0;
hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused) -
sizeof(xfs_attr_leaf_hdr_t));
/*
* Copy all entry's in the same (sorted) order,
* but allocate name/value pairs packed and in sequence.
*/
xfs_attr_leaf_moveents(leaf_s, 0, leaf_d, 0,
be16_to_cpu(hdr_s->count), mp);
xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
kmem_free(tmpbuffer);
}
/*
* Redistribute the attribute list entries between two leaf nodes,
* taking into account the size of the new entry.
*
* NOTE: if new block is empty, then it will get the upper half of the
* old block. At present, all (one) callers pass in an empty second block.
*
* This code adjusts the args->index/blkno and args->index2/blkno2 fields
* to match what it is doing in splitting the attribute leaf block. Those
* values are used in "atomic rename" operations on attributes. Note that
* the "new" and "old" values can end up in different blocks.
*/
STATIC void
xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
xfs_da_state_blk_t *blk2)
{
xfs_da_args_t *args;
xfs_da_state_blk_t *tmp_blk;
xfs_attr_leafblock_t *leaf1, *leaf2;
xfs_attr_leaf_hdr_t *hdr1, *hdr2;
int count, totallen, max, space, swap;
/*
* Set up environment.
*/
ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
leaf1 = blk1->bp->data;
leaf2 = blk2->bp->data;
ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
args = state->args;
/*
* Check ordering of blocks, reverse if it makes things simpler.
*
* NOTE: Given that all (current) callers pass in an empty
* second block, this code should never set "swap".
*/
swap = 0;
if (xfs_attr_leaf_order(blk1->bp, blk2->bp)) {
tmp_blk = blk1;
blk1 = blk2;
blk2 = tmp_blk;
leaf1 = blk1->bp->data;
leaf2 = blk2->bp->data;
swap = 1;
}
hdr1 = &leaf1->hdr;
hdr2 = &leaf2->hdr;
/*
* Examine entries until we reduce the absolute difference in
* byte usage between the two blocks to a minimum. Then get
* the direction to copy and the number of elements to move.
*
* "inleaf" is true if the new entry should be inserted into blk1.
* If "swap" is also true, then reverse the sense of "inleaf".
*/
state->inleaf = xfs_attr_leaf_figure_balance(state, blk1, blk2,
&count, &totallen);
if (swap)
state->inleaf = !state->inleaf;
/*
* Move any entries required from leaf to leaf:
*/
if (count < be16_to_cpu(hdr1->count)) {
/*
* Figure the total bytes to be added to the destination leaf.
*/
/* number entries being moved */
count = be16_to_cpu(hdr1->count) - count;
space = be16_to_cpu(hdr1->usedbytes) - totallen;
space += count * sizeof(xfs_attr_leaf_entry_t);
/*
* leaf2 is the destination, compact it if it looks tight.
*/
max = be16_to_cpu(hdr2->firstused)
- sizeof(xfs_attr_leaf_hdr_t);
max -= be16_to_cpu(hdr2->count) * sizeof(xfs_attr_leaf_entry_t);
if (space > max) {
xfs_attr_leaf_compact(args->trans, blk2->bp);
}
/*
* Move high entries from leaf1 to low end of leaf2.
*/
xfs_attr_leaf_moveents(leaf1, be16_to_cpu(hdr1->count) - count,
leaf2, 0, count, state->mp);
xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
} else if (count > be16_to_cpu(hdr1->count)) {
/*
* I assert that since all callers pass in an empty
* second buffer, this code should never execute.
*/
/*
* Figure the total bytes to be added to the destination leaf.
*/
/* number entries being moved */
count -= be16_to_cpu(hdr1->count);
space = totallen - be16_to_cpu(hdr1->usedbytes);
space += count * sizeof(xfs_attr_leaf_entry_t);
/*
* leaf1 is the destination, compact it if it looks tight.
*/
max = be16_to_cpu(hdr1->firstused)
- sizeof(xfs_attr_leaf_hdr_t);
max -= be16_to_cpu(hdr1->count) * sizeof(xfs_attr_leaf_entry_t);
if (space > max) {
xfs_attr_leaf_compact(args->trans, blk1->bp);
}
/*
* Move low entries from leaf2 to high end of leaf1.
*/
xfs_attr_leaf_moveents(leaf2, 0, leaf1,
be16_to_cpu(hdr1->count), count, state->mp);
xfs_da_log_buf(args->trans, blk1->bp, 0, state->blocksize-1);
xfs_da_log_buf(args->trans, blk2->bp, 0, state->blocksize-1);
}
/*
* Copy out last hashval in each block for B-tree code.
*/
blk1->hashval = be32_to_cpu(
leaf1->entries[be16_to_cpu(leaf1->hdr.count)-1].hashval);
blk2->hashval = be32_to_cpu(
leaf2->entries[be16_to_cpu(leaf2->hdr.count)-1].hashval);
/*
* Adjust the expected index for insertion.
* NOTE: this code depends on the (current) situation that the
* second block was originally empty.
*
* If the insertion point moved to the 2nd block, we must adjust
* the index. We must also track the entry just following the
* new entry for use in an "atomic rename" operation, that entry
* is always the "old" entry and the "new" entry is what we are
* inserting. The index/blkno fields refer to the "old" entry,
* while the index2/blkno2 fields refer to the "new" entry.
*/
if (blk1->index > be16_to_cpu(leaf1->hdr.count)) {
ASSERT(state->inleaf == 0);
blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count);
args->index = args->index2 = blk2->index;
args->blkno = args->blkno2 = blk2->blkno;
} else if (blk1->index == be16_to_cpu(leaf1->hdr.count)) {
if (state->inleaf) {
args->index = blk1->index;
args->blkno = blk1->blkno;
args->index2 = 0;
args->blkno2 = blk2->blkno;
} else {
blk2->index = blk1->index
- be16_to_cpu(leaf1->hdr.count);
args->index = args->index2 = blk2->index;
args->blkno = args->blkno2 = blk2->blkno;
}
} else {
ASSERT(state->inleaf == 1);
args->index = args->index2 = blk1->index;
args->blkno = args->blkno2 = blk1->blkno;
}
}
/*
* Examine entries until we reduce the absolute difference in
* byte usage between the two blocks to a minimum.
* GROT: Is this really necessary? With other than a 512 byte blocksize,
* GROT: there will always be enough room in either block for a new entry.
* GROT: Do a double-split for this case?
*/
STATIC int
xfs_attr_leaf_figure_balance(xfs_da_state_t *state,
xfs_da_state_blk_t *blk1,
xfs_da_state_blk_t *blk2,
int *countarg, int *usedbytesarg)
{
xfs_attr_leafblock_t *leaf1, *leaf2;
xfs_attr_leaf_hdr_t *hdr1, *hdr2;
xfs_attr_leaf_entry_t *entry;
int count, max, index, totallen, half;
int lastdelta, foundit, tmp;
/*
* Set up environment.
*/
leaf1 = blk1->bp->data;
leaf2 = blk2->bp->data;
hdr1 = &leaf1->hdr;
hdr2 = &leaf2->hdr;
foundit = 0;
totallen = 0;
/*
* Examine entries until we reduce the absolute difference in
* byte usage between the two blocks to a minimum.
*/
max = be16_to_cpu(hdr1->count) + be16_to_cpu(hdr2->count);
half = (max+1) * sizeof(*entry);
half += be16_to_cpu(hdr1->usedbytes) +
be16_to_cpu(hdr2->usedbytes) +
xfs_attr_leaf_newentsize(
state->args->namelen,
state->args->valuelen,
state->blocksize, NULL);
half /= 2;
lastdelta = state->blocksize;
entry = &leaf1->entries[0];
for (count = index = 0; count < max; entry++, index++, count++) {
#define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A))
/*
* The new entry is in the first block, account for it.
*/
if (count == blk1->index) {
tmp = totallen + sizeof(*entry) +
xfs_attr_leaf_newentsize(
state->args->namelen,
state->args->valuelen,
state->blocksize, NULL);
if (XFS_ATTR_ABS(half - tmp) > lastdelta)
break;
lastdelta = XFS_ATTR_ABS(half - tmp);
totallen = tmp;
foundit = 1;
}
/*
* Wrap around into the second block if necessary.
*/
if (count == be16_to_cpu(hdr1->count)) {
leaf1 = leaf2;
entry = &leaf1->entries[0];
index = 0;
}
/*
* Figure out if next leaf entry would be too much.
*/
tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1,
index);
if (XFS_ATTR_ABS(half - tmp) > lastdelta)
break;
lastdelta = XFS_ATTR_ABS(half - tmp);
totallen = tmp;
#undef XFS_ATTR_ABS
}
/*
* Calculate the number of usedbytes that will end up in lower block.
* If new entry not in lower block, fix up the count.
*/
totallen -= count * sizeof(*entry);
if (foundit) {
totallen -= sizeof(*entry) +
xfs_attr_leaf_newentsize(
state->args->namelen,
state->args->valuelen,
state->blocksize, NULL);
}
*countarg = count;
*usedbytesarg = totallen;
return(foundit);
}
/*========================================================================
* Routines used for shrinking the Btree.
*========================================================================*/
/*
* Check a leaf block and its neighbors to see if the block should be
* collapsed into one or the other neighbor. Always keep the block
* with the smaller block number.
* If the current block is over 50% full, don't try to join it, return 0.
* If the block is empty, fill in the state structure and return 2.
* If it can be collapsed, fill in the state structure and return 1.
* If nothing can be done, return 0.
*
* GROT: allow for INCOMPLETE entries in calculation.
*/
int
xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
{
xfs_attr_leafblock_t *leaf;
xfs_da_state_blk_t *blk;
xfs_da_blkinfo_t *info;
int count, bytes, forward, error, retval, i;
xfs_dablk_t blkno;
xfs_dabuf_t *bp;
/*
* Check for the degenerate case of the block being over 50% full.
* If so, it's not worth even looking to see if we might be able
* to coalesce with a sibling.
*/
blk = &state->path.blk[ state->path.active-1 ];
info = blk->bp->data;
ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
leaf = (xfs_attr_leafblock_t *)info;
count = be16_to_cpu(leaf->hdr.count);
bytes = sizeof(xfs_attr_leaf_hdr_t) +
count * sizeof(xfs_attr_leaf_entry_t) +
be16_to_cpu(leaf->hdr.usedbytes);
if (bytes > (state->blocksize >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return(0);
}
/*
* Check for the degenerate case of the block being empty.
* If the block is empty, we'll simply delete it, no need to
* coalesce it with a sibling block. We choose (arbitrarily)
* to merge with the forward block unless it is NULL.
*/
if (count == 0) {
/*
* Make altpath point to the block we want to keep and
* path point to the block we want to drop (this one).
*/
forward = (info->forw != 0);
memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
return(error);
if (retval) {
*action = 0;
} else {
*action = 2;
}
return(0);
}
/*
* Examine each sibling block to see if we can coalesce with
* at least 25% free space to spare. We need to figure out
* whether to merge with the forward or the backward block.
* We prefer coalescing with the lower numbered sibling so as
* to shrink an attribute list over time.
*/
/* start with smaller blk num */
forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
for (i = 0; i < 2; forward = !forward, i++) {
if (forward)
blkno = be32_to_cpu(info->forw);
else
blkno = be32_to_cpu(info->back);
if (blkno == 0)
continue;
error = xfs_da_read_buf(state->args->trans, state->args->dp,
blkno, -1, &bp, XFS_ATTR_FORK);
if (error)
return(error);
ASSERT(bp != NULL);
leaf = (xfs_attr_leafblock_t *)info;
count = be16_to_cpu(leaf->hdr.count);
bytes = state->blocksize - (state->blocksize>>2);
bytes -= be16_to_cpu(leaf->hdr.usedbytes);
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
count += be16_to_cpu(leaf->hdr.count);
bytes -= be16_to_cpu(leaf->hdr.usedbytes);
bytes -= count * sizeof(xfs_attr_leaf_entry_t);
bytes -= sizeof(xfs_attr_leaf_hdr_t);
xfs_da_brelse(state->args->trans, bp);
if (bytes >= 0)
break; /* fits with at least 25% to spare */
}
if (i >= 2) {
*action = 0;
return(0);
}
/*
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
error = xfs_da_path_shift(state, &state->altpath, forward,
0, &retval);
} else {
error = xfs_da_path_shift(state, &state->path, forward,
0, &retval);
}
if (error)
return(error);
if (retval) {
*action = 0;
} else {
*action = 1;
}
return(0);
}
/*
* Remove a name from the leaf attribute list structure.
*
* Return 1 if leaf is less than 37% full, 0 if >= 37% full.
* If two leaves are 37% full, when combined they will leave 25% free.
*/
int
xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_hdr_t *hdr;
xfs_attr_leaf_map_t *map;
xfs_attr_leaf_entry_t *entry;
int before, after, smallest, entsize;
int tablesize, tmp, i;
xfs_mount_t *mp;
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
hdr = &leaf->hdr;
mp = args->trans->t_mountp;
ASSERT((be16_to_cpu(hdr->count) > 0)
&& (be16_to_cpu(hdr->count) < (XFS_LBSIZE(mp)/8)));
ASSERT((args->index >= 0)
&& (args->index < be16_to_cpu(hdr->count)));
ASSERT(be16_to_cpu(hdr->firstused) >=
((be16_to_cpu(hdr->count) * sizeof(*entry)) + sizeof(*hdr)));
entry = &leaf->entries[args->index];
ASSERT(be16_to_cpu(entry->nameidx) >= be16_to_cpu(hdr->firstused));
ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
/*
* Scan through free region table:
* check for adjacency of free'd entry with an existing one,
* find smallest free region in case we need to replace it,
* adjust any map that borders the entry table,
*/
tablesize = be16_to_cpu(hdr->count) * sizeof(xfs_attr_leaf_entry_t)
+ sizeof(xfs_attr_leaf_hdr_t);
map = &hdr->freemap[0];
tmp = be16_to_cpu(map->size);
before = after = -1;
smallest = XFS_ATTR_LEAF_MAPSIZE - 1;
entsize = xfs_attr_leaf_entsize(leaf, args->index);
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; map++, i++) {
ASSERT(be16_to_cpu(map->base) < XFS_LBSIZE(mp));
ASSERT(be16_to_cpu(map->size) < XFS_LBSIZE(mp));
if (be16_to_cpu(map->base) == tablesize) {
be16_add_cpu(&map->base,
-((int)sizeof(xfs_attr_leaf_entry_t)));
be16_add_cpu(&map->size, sizeof(xfs_attr_leaf_entry_t));
}
if ((be16_to_cpu(map->base) + be16_to_cpu(map->size))
== be16_to_cpu(entry->nameidx)) {
before = i;
} else if (be16_to_cpu(map->base)
== (be16_to_cpu(entry->nameidx) + entsize)) {
after = i;
} else if (be16_to_cpu(map->size) < tmp) {
tmp = be16_to_cpu(map->size);
smallest = i;
}
}
/*
* Coalesce adjacent freemap regions,
* or replace the smallest region.
*/
if ((before >= 0) || (after >= 0)) {
if ((before >= 0) && (after >= 0)) {
map = &hdr->freemap[before];
be16_add_cpu(&map->size, entsize);
be16_add_cpu(&map->size,
be16_to_cpu(hdr->freemap[after].size));
hdr->freemap[after].base = 0;
hdr->freemap[after].size = 0;
} else if (before >= 0) {
map = &hdr->freemap[before];
be16_add_cpu(&map->size, entsize);
} else {
map = &hdr->freemap[after];
/* both on-disk, don't endian flip twice */
map->base = entry->nameidx;
be16_add_cpu(&map->size, entsize);
}
} else {
/*
* Replace smallest region (if it is smaller than free'd entry)
*/
map = &hdr->freemap[smallest];
if (be16_to_cpu(map->size) < entsize) {
map->base = cpu_to_be16(be16_to_cpu(entry->nameidx));
map->size = cpu_to_be16(entsize);
}
}
/*
* Did we remove the first entry?
*/
if (be16_to_cpu(entry->nameidx) == be16_to_cpu(hdr->firstused))
smallest = 1;
else
smallest = 0;
/*
* Compress the remaining entries and zero out the removed stuff.
*/
memset(xfs_attr_leaf_name(leaf, args->index), 0, entsize);
be16_add_cpu(&hdr->usedbytes, -entsize);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr_leaf_name(leaf, args->index),
entsize));
tmp = (be16_to_cpu(hdr->count) - args->index)
* sizeof(xfs_attr_leaf_entry_t);
memmove((char *)entry, (char *)(entry+1), tmp);
be16_add_cpu(&hdr->count, -1);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
entry = &leaf->entries[be16_to_cpu(hdr->count)];
memset((char *)entry, 0, sizeof(xfs_attr_leaf_entry_t));
/*
* If we removed the first entry, re-find the first used byte
* in the name area. Note that if the entry was the "firstused",
* then we don't have a "hole" in our block resulting from
* removing the name.
*/
if (smallest) {
tmp = XFS_LBSIZE(mp);
entry = &leaf->entries[0];
for (i = be16_to_cpu(hdr->count)-1; i >= 0; entry++, i--) {
ASSERT(be16_to_cpu(entry->nameidx) >=
be16_to_cpu(hdr->firstused));
ASSERT(be16_to_cpu(entry->nameidx) < XFS_LBSIZE(mp));
if (be16_to_cpu(entry->nameidx) < tmp)
tmp = be16_to_cpu(entry->nameidx);
}
hdr->firstused = cpu_to_be16(tmp);
if (!hdr->firstused) {
hdr->firstused = cpu_to_be16(
tmp - XFS_ATTR_LEAF_NAME_ALIGN);
}
} else {
hdr->holes = 1; /* mark as needing compaction */
}
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, hdr, sizeof(*hdr)));
/*
* Check if leaf is less than 50% full, caller may want to
* "join" the leaf with a sibling if so.
*/
tmp = sizeof(xfs_attr_leaf_hdr_t);
tmp += be16_to_cpu(leaf->hdr.count) * sizeof(xfs_attr_leaf_entry_t);
tmp += be16_to_cpu(leaf->hdr.usedbytes);
return(tmp < mp->m_attr_magicpct); /* leaf is < 37% full */
}
/*
* Move all the attribute list entries from drop_leaf into save_leaf.
*/
void
xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
xfs_da_state_blk_t *save_blk)
{
xfs_attr_leafblock_t *drop_leaf, *save_leaf, *tmp_leaf;
xfs_attr_leaf_hdr_t *drop_hdr, *save_hdr, *tmp_hdr;
xfs_mount_t *mp;
char *tmpbuffer;
/*
* Set up environment.
*/
mp = state->mp;
ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
drop_leaf = drop_blk->bp->data;
save_leaf = save_blk->bp->data;
ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
drop_hdr = &drop_leaf->hdr;
save_hdr = &save_leaf->hdr;
/*
* Save last hashval from dying block for later Btree fixup.
*/
drop_blk->hashval = be32_to_cpu(
drop_leaf->entries[be16_to_cpu(drop_leaf->hdr.count)-1].hashval);
/*
* Check if we need a temp buffer, or can we do it in place.
* Note that we don't check "leaf" for holes because we will
* always be dropping it, toosmall() decided that for us already.
*/
if (save_hdr->holes == 0) {
/*
* dest leaf has no holes, so we add there. May need
* to make some room in the entry array.
*/
if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf, 0,
be16_to_cpu(drop_hdr->count), mp);
} else {
xfs_attr_leaf_moveents(drop_leaf, 0, save_leaf,
be16_to_cpu(save_hdr->count),
be16_to_cpu(drop_hdr->count), mp);
}
} else {
/*
* Destination has holes, so we make a temporary copy
* of the leaf and add them both to that.
*/
tmpbuffer = kmem_alloc(state->blocksize, KM_SLEEP);
ASSERT(tmpbuffer != NULL);
memset(tmpbuffer, 0, state->blocksize);
tmp_leaf = (xfs_attr_leafblock_t *)tmpbuffer;
tmp_hdr = &tmp_leaf->hdr;
tmp_hdr->info = save_hdr->info; /* struct copy */
tmp_hdr->count = 0;
tmp_hdr->firstused = cpu_to_be16(state->blocksize);
if (!tmp_hdr->firstused) {
tmp_hdr->firstused = cpu_to_be16(
state->blocksize - XFS_ATTR_LEAF_NAME_ALIGN);
}
tmp_hdr->usedbytes = 0;
if (xfs_attr_leaf_order(save_blk->bp, drop_blk->bp)) {
xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf, 0,
be16_to_cpu(drop_hdr->count), mp);
xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf,
be16_to_cpu(tmp_leaf->hdr.count),
be16_to_cpu(save_hdr->count), mp);
} else {
xfs_attr_leaf_moveents(save_leaf, 0, tmp_leaf, 0,
be16_to_cpu(save_hdr->count), mp);
xfs_attr_leaf_moveents(drop_leaf, 0, tmp_leaf,
be16_to_cpu(tmp_leaf->hdr.count),
be16_to_cpu(drop_hdr->count), mp);
}
memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize);
kmem_free(tmpbuffer);
}
xfs_da_log_buf(state->args->trans, save_blk->bp, 0,
state->blocksize - 1);
/*
* Copy out last hashval in each block for B-tree code.
*/
save_blk->hashval = be32_to_cpu(
save_leaf->entries[be16_to_cpu(save_leaf->hdr.count)-1].hashval);
}
/*========================================================================
* Routines used for finding things in the Btree.
*========================================================================*/
/*
* Look up a name in a leaf attribute list structure.
* This is the internal routine, it uses the caller's buffer.
*
* Note that duplicate keys are allowed, but only check within the
* current leaf node. The Btree code must check in adjacent leaf nodes.
*
* Return in args->index the index into the entry[] array of either
* the found entry, or where the entry should have been (insert before
* that entry).
*
* Don't change the args->value unless we find the attribute.
*/
int
xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_local_t *name_loc;
xfs_attr_leaf_name_remote_t *name_rmt;
int probe, span;
xfs_dahash_t hashval;
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(be16_to_cpu(leaf->hdr.count)
< (XFS_LBSIZE(args->dp->i_mount)/8));
/*
* Binary search. (note: small blocks will skip this loop)
*/
hashval = args->hashval;
probe = span = be16_to_cpu(leaf->hdr.count) / 2;
for (entry = &leaf->entries[probe]; span > 4;
entry = &leaf->entries[probe]) {
span /= 2;
if (be32_to_cpu(entry->hashval) < hashval)
probe += span;
else if (be32_to_cpu(entry->hashval) > hashval)
probe -= span;
else
break;
}
ASSERT((probe >= 0) &&
(!leaf->hdr.count
|| (probe < be16_to_cpu(leaf->hdr.count))));
ASSERT((span <= 4) || (be32_to_cpu(entry->hashval) == hashval));
/*
* Since we may have duplicate hashval's, find the first matching
* hashval in the leaf.
*/
while ((probe > 0) && (be32_to_cpu(entry->hashval) >= hashval)) {
entry--;
probe--;
}
while ((probe < be16_to_cpu(leaf->hdr.count)) &&
(be32_to_cpu(entry->hashval) < hashval)) {
entry++;
probe++;
}
if ((probe == be16_to_cpu(leaf->hdr.count)) ||
(be32_to_cpu(entry->hashval) != hashval)) {
args->index = probe;
return(XFS_ERROR(ENOATTR));
}
/*
* Duplicate keys may be present, so search all of them for a match.
*/
for ( ; (probe < be16_to_cpu(leaf->hdr.count)) &&
(be32_to_cpu(entry->hashval) == hashval);
entry++, probe++) {
/*
* GROT: Add code to remove incomplete entries.
*/
/*
* If we are looking for INCOMPLETE entries, show only those.
* If we are looking for complete entries, show only those.
*/
if ((args->flags & XFS_ATTR_INCOMPLETE) !=
(entry->flags & XFS_ATTR_INCOMPLETE)) {
continue;
}
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr_leaf_name_local(leaf, probe);
if (name_loc->namelen != args->namelen)
continue;
if (memcmp(args->name, (char *)name_loc->nameval, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue;
args->index = probe;
return(XFS_ERROR(EEXIST));
} else {
name_rmt = xfs_attr_leaf_name_remote(leaf, probe);
if (name_rmt->namelen != args->namelen)
continue;
if (memcmp(args->name, (char *)name_rmt->name,
args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue;
args->index = probe;
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
be32_to_cpu(name_rmt->valuelen));
return(XFS_ERROR(EEXIST));
}
}
args->index = probe;
return(XFS_ERROR(ENOATTR));
}
/*
* Get the value associated with an attribute name from a leaf attribute
* list structure.
*/
int
xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
{
int valuelen;
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_local_t *name_loc;
xfs_attr_leaf_name_remote_t *name_rmt;
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(be16_to_cpu(leaf->hdr.count)
< (XFS_LBSIZE(args->dp->i_mount)/8));
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
entry = &leaf->entries[args->index];
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr_leaf_name_local(leaf, args->index);
ASSERT(name_loc->namelen == args->namelen);
ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
valuelen = be16_to_cpu(name_loc->valuelen);
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = valuelen;
return(0);
}
if (args->valuelen < valuelen) {
args->valuelen = valuelen;
return(XFS_ERROR(ERANGE));
}
args->valuelen = valuelen;
memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
} else {
name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
ASSERT(name_rmt->namelen == args->namelen);
ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
valuelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, valuelen);
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = valuelen;
return(0);
}
if (args->valuelen < valuelen) {
args->valuelen = valuelen;
return(XFS_ERROR(ERANGE));
}
args->valuelen = valuelen;
}
return(0);
}
/*========================================================================
* Utility routines.
*========================================================================*/
/*
* Move the indicated entries from one leaf to another.
* NOTE: this routine modifies both source and destination leaves.
*/
/*ARGSUSED*/
STATIC void
xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
xfs_attr_leafblock_t *leaf_d, int start_d,
int count, xfs_mount_t *mp)
{
xfs_attr_leaf_hdr_t *hdr_s, *hdr_d;
xfs_attr_leaf_entry_t *entry_s, *entry_d;
int desti, tmp, i;
/*
* Check for nothing to do.
*/
if (count == 0)
return;
/*
* Set up environment.
*/
ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
hdr_s = &leaf_s->hdr;
hdr_d = &leaf_d->hdr;
ASSERT((be16_to_cpu(hdr_s->count) > 0) &&
(be16_to_cpu(hdr_s->count) < (XFS_LBSIZE(mp)/8)));
ASSERT(be16_to_cpu(hdr_s->firstused) >=
((be16_to_cpu(hdr_s->count)
* sizeof(*entry_s))+sizeof(*hdr_s)));
ASSERT(be16_to_cpu(hdr_d->count) < (XFS_LBSIZE(mp)/8));
ASSERT(be16_to_cpu(hdr_d->firstused) >=
((be16_to_cpu(hdr_d->count)
* sizeof(*entry_d))+sizeof(*hdr_d)));
ASSERT(start_s < be16_to_cpu(hdr_s->count));
ASSERT(start_d <= be16_to_cpu(hdr_d->count));
ASSERT(count <= be16_to_cpu(hdr_s->count));
/*
* Move the entries in the destination leaf up to make a hole?
*/
if (start_d < be16_to_cpu(hdr_d->count)) {
tmp = be16_to_cpu(hdr_d->count) - start_d;
tmp *= sizeof(xfs_attr_leaf_entry_t);
entry_s = &leaf_d->entries[start_d];
entry_d = &leaf_d->entries[start_d + count];
memmove((char *)entry_d, (char *)entry_s, tmp);
}
/*
* Copy all entry's in the same (sorted) order,
* but allocate attribute info packed and in sequence.
*/
entry_s = &leaf_s->entries[start_s];
entry_d = &leaf_d->entries[start_d];
desti = start_d;
for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) {
ASSERT(be16_to_cpu(entry_s->nameidx)
>= be16_to_cpu(hdr_s->firstused));
tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i);
#ifdef GROT
/*
* Code to drop INCOMPLETE entries. Difficult to use as we
* may also need to change the insertion index. Code turned
* off for 6.2, should be revisited later.
*/
if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
memset(xfs_attr_leaf_name(leaf_s, start_s + i), 0, tmp);
be16_add_cpu(&hdr_s->usedbytes, -tmp);
be16_add_cpu(&hdr_s->count, -1);
entry_d--; /* to compensate for ++ in loop hdr */
desti--;
if ((start_s + i) < offset)
result++; /* insertion index adjustment */
} else {
#endif /* GROT */
be16_add_cpu(&hdr_d->firstused, -tmp);
/* both on-disk, don't endian flip twice */
entry_d->hashval = entry_s->hashval;
/* both on-disk, don't endian flip twice */
entry_d->nameidx = hdr_d->firstused;
entry_d->flags = entry_s->flags;
ASSERT(be16_to_cpu(entry_d->nameidx) + tmp
<= XFS_LBSIZE(mp));
memmove(xfs_attr_leaf_name(leaf_d, desti),
xfs_attr_leaf_name(leaf_s, start_s + i), tmp);
ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
<= XFS_LBSIZE(mp));
memset(xfs_attr_leaf_name(leaf_s, start_s + i), 0, tmp);
be16_add_cpu(&hdr_s->usedbytes, -tmp);
be16_add_cpu(&hdr_d->usedbytes, tmp);
be16_add_cpu(&hdr_s->count, -1);
be16_add_cpu(&hdr_d->count, 1);
tmp = be16_to_cpu(hdr_d->count)
* sizeof(xfs_attr_leaf_entry_t)
+ sizeof(xfs_attr_leaf_hdr_t);
ASSERT(be16_to_cpu(hdr_d->firstused) >= tmp);
#ifdef GROT
}
#endif /* GROT */
}
/*
* Zero out the entries we just copied.
*/
if (start_s == be16_to_cpu(hdr_s->count)) {
tmp = count * sizeof(xfs_attr_leaf_entry_t);
entry_s = &leaf_s->entries[start_s];
ASSERT(((char *)entry_s + tmp) <=
((char *)leaf_s + XFS_LBSIZE(mp)));
memset((char *)entry_s, 0, tmp);
} else {
/*
* Move the remaining entries down to fill the hole,
* then zero the entries at the top.
*/
tmp = be16_to_cpu(hdr_s->count) - count;
tmp *= sizeof(xfs_attr_leaf_entry_t);
entry_s = &leaf_s->entries[start_s + count];
entry_d = &leaf_s->entries[start_s];
memmove((char *)entry_d, (char *)entry_s, tmp);
tmp = count * sizeof(xfs_attr_leaf_entry_t);
entry_s = &leaf_s->entries[be16_to_cpu(hdr_s->count)];
ASSERT(((char *)entry_s + tmp) <=
((char *)leaf_s + XFS_LBSIZE(mp)));
memset((char *)entry_s, 0, tmp);
}
/*
* Fill in the freemap information
*/
hdr_d->freemap[0].base = cpu_to_be16(sizeof(xfs_attr_leaf_hdr_t));
be16_add_cpu(&hdr_d->freemap[0].base, be16_to_cpu(hdr_d->count) *
sizeof(xfs_attr_leaf_entry_t));
hdr_d->freemap[0].size = cpu_to_be16(be16_to_cpu(hdr_d->firstused)
- be16_to_cpu(hdr_d->freemap[0].base));
hdr_d->freemap[1].base = 0;
hdr_d->freemap[2].base = 0;
hdr_d->freemap[1].size = 0;
hdr_d->freemap[2].size = 0;
hdr_s->holes = 1; /* leaf may not be compact */
}
/*
* Compare two leaf blocks "order".
* Return 0 unless leaf2 should go before leaf1.
*/
int
xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
{
xfs_attr_leafblock_t *leaf1, *leaf2;
leaf1 = leaf1_bp->data;
leaf2 = leaf2_bp->data;
ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) &&
(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC));
if ((be16_to_cpu(leaf1->hdr.count) > 0) &&
(be16_to_cpu(leaf2->hdr.count) > 0) &&
((be32_to_cpu(leaf2->entries[0].hashval) <
be32_to_cpu(leaf1->entries[0].hashval)) ||
(be32_to_cpu(leaf2->entries[
be16_to_cpu(leaf2->hdr.count)-1].hashval) <
be32_to_cpu(leaf1->entries[
be16_to_cpu(leaf1->hdr.count)-1].hashval)))) {
return(1);
}
return(0);
}
/*
* Pick up the last hashvalue from a leaf block.
*/
xfs_dahash_t
xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
{
xfs_attr_leafblock_t *leaf;
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
if (count)
*count = be16_to_cpu(leaf->hdr.count);
if (!leaf->hdr.count)
return(0);
return be32_to_cpu(leaf->entries[be16_to_cpu(leaf->hdr.count)-1].hashval);
}
/*
* Calculate the number of bytes used to store the indicated attribute
* (whether local or remote only calculate bytes in this block).
*/
STATIC int
xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
{
xfs_attr_leaf_name_local_t *name_loc;
xfs_attr_leaf_name_remote_t *name_rmt;
int size;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
if (leaf->entries[index].flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr_leaf_name_local(leaf, index);
size = xfs_attr_leaf_entsize_local(name_loc->namelen,
be16_to_cpu(name_loc->valuelen));
} else {
name_rmt = xfs_attr_leaf_name_remote(leaf, index);
size = xfs_attr_leaf_entsize_remote(name_rmt->namelen);
}
return(size);
}
/*
* Calculate the number of bytes that would be required to store the new
* attribute (whether local or remote only calculate bytes in this block).
* This routine decides as a side effect whether the attribute will be
* a "local" or a "remote" attribute.
*/
int
xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize, int *local)
{
int size;
size = xfs_attr_leaf_entsize_local(namelen, valuelen);
if (size < xfs_attr_leaf_entsize_local_max(blocksize)) {
if (local) {
*local = 1;
}
} else {
size = xfs_attr_leaf_entsize_remote(namelen);
if (local) {
*local = 0;
}
}
return(size);
}
/*
* Copy out attribute list entries for attr_list(), for leaf attribute lists.
*/
int
xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
{
attrlist_cursor_kern_t *cursor;
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
int retval, i;
ASSERT(bp != NULL);
leaf = bp->data;
cursor = context->cursor;
cursor->initted = 1;
trace_xfs_attr_list_leaf(context);
/*
* Re-find our place in the leaf block if this is a new syscall.
*/
if (context->resynch) {
entry = &leaf->entries[0];
for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
if (be32_to_cpu(entry->hashval) == cursor->hashval) {
if (cursor->offset == context->dupcnt) {
context->dupcnt = 0;
break;
}
context->dupcnt++;
} else if (be32_to_cpu(entry->hashval) >
cursor->hashval) {
context->dupcnt = 0;
break;
}
}
if (i == be16_to_cpu(leaf->hdr.count)) {
trace_xfs_attr_list_notfound(context);
return(0);
}
} else {
entry = &leaf->entries[0];
i = 0;
}
context->resynch = 0;
/*
* We have found our place, start copying out the new attributes.
*/
retval = 0;
for ( ; (i < be16_to_cpu(leaf->hdr.count)); entry++, i++) {
if (be32_to_cpu(entry->hashval) != cursor->hashval) {
cursor->hashval = be32_to_cpu(entry->hashval);
cursor->offset = 0;
}
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* skip incomplete entries */
if (entry->flags & XFS_ATTR_LOCAL) {
xfs_attr_leaf_name_local_t *name_loc =
xfs_attr_leaf_name_local(leaf, i);
retval = context->put_listent(context,
entry->flags,
name_loc->nameval,
(int)name_loc->namelen,
be16_to_cpu(name_loc->valuelen),
&name_loc->nameval[name_loc->namelen]);
if (retval)
return retval;
} else {
xfs_attr_leaf_name_remote_t *name_rmt =
xfs_attr_leaf_name_remote(leaf, i);
int valuelen = be32_to_cpu(name_rmt->valuelen);
if (context->put_value) {
xfs_da_args_t args;
memset((char *)&args, 0, sizeof(args));
args.dp = context->dp;
args.whichfork = XFS_ATTR_FORK;
args.valuelen = valuelen;
args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
retval = xfs_attr_rmtval_get(&args);
if (retval)
return retval;
retval = context->put_listent(context,
entry->flags,
name_rmt->name,
(int)name_rmt->namelen,
valuelen,
args.value);
kmem_free(args.value);
} else {
retval = context->put_listent(context,
entry->flags,
name_rmt->name,
(int)name_rmt->namelen,
valuelen,
NULL);
}
if (retval)
return retval;
}
if (context->seen_enough)
break;
cursor->offset++;
}
trace_xfs_attr_list_leaf_end(context);
return(retval);
}
/*========================================================================
* Manage the INCOMPLETE flag in a leaf entry
*========================================================================*/
/*
* Clear the INCOMPLETE flag on an entry in a leaf block.
*/
int
xfs_attr_leaf_clearflag(xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_remote_t *name_rmt;
xfs_dabuf_t *bp;
int error;
#ifdef DEBUG
xfs_attr_leaf_name_local_t *name_loc;
int namelen;
char *name;
#endif /* DEBUG */
/*
* Set up the operation.
*/
error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
XFS_ATTR_FORK);
if (error) {
return(error);
}
ASSERT(bp != NULL);
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
ASSERT(args->index >= 0);
entry = &leaf->entries[ args->index ];
ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
#ifdef DEBUG
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr_leaf_name_local(leaf, args->index);
namelen = name_loc->namelen;
name = (char *)name_loc->nameval;
} else {
name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
namelen = name_rmt->namelen;
name = (char *)name_rmt->name;
}
ASSERT(be32_to_cpu(entry->hashval) == args->hashval);
ASSERT(namelen == args->namelen);
ASSERT(memcmp(name, args->name, namelen) == 0);
#endif /* DEBUG */
entry->flags &= ~XFS_ATTR_INCOMPLETE;
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
if (args->rmtblkno) {
ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
name_rmt->valuelen = cpu_to_be32(args->valuelen);
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
xfs_da_buf_done(bp);
/*
* Commit the flag value change and start the next trans in series.
*/
return xfs_trans_roll(&args->trans, args->dp);
}
/*
* Set the INCOMPLETE flag on an entry in a leaf block.
*/
int
xfs_attr_leaf_setflag(xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_remote_t *name_rmt;
xfs_dabuf_t *bp;
int error;
/*
* Set up the operation.
*/
error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp,
XFS_ATTR_FORK);
if (error) {
return(error);
}
ASSERT(bp != NULL);
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
ASSERT(args->index >= 0);
entry = &leaf->entries[ args->index ];
ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
entry->flags |= XFS_ATTR_INCOMPLETE;
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
name_rmt = xfs_attr_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = 0;
name_rmt->valuelen = 0;
xfs_da_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
xfs_da_buf_done(bp);
/*
* Commit the flag value change and start the next trans in series.
*/
return xfs_trans_roll(&args->trans, args->dp);
}
/*
* In a single transaction, clear the INCOMPLETE flag on the leaf entry
* given by args->blkno/index and set the INCOMPLETE flag on the leaf
* entry given by args->blkno2/index2.
*
* Note that they could be in different blocks, or in the same block.
*/
int
xfs_attr_leaf_flipflags(xfs_da_args_t *args)
{
xfs_attr_leafblock_t *leaf1, *leaf2;
xfs_attr_leaf_entry_t *entry1, *entry2;
xfs_attr_leaf_name_remote_t *name_rmt;
xfs_dabuf_t *bp1, *bp2;
int error;
#ifdef DEBUG
xfs_attr_leaf_name_local_t *name_loc;
int namelen1, namelen2;
char *name1, *name2;
#endif /* DEBUG */
/*
* Read the block containing the "old" attr
*/
error = xfs_da_read_buf(args->trans, args->dp, args->blkno, -1, &bp1,
XFS_ATTR_FORK);
if (error) {
return(error);
}
ASSERT(bp1 != NULL);
/*
* Read the block containing the "new" attr, if it is different
*/
if (args->blkno2 != args->blkno) {
error = xfs_da_read_buf(args->trans, args->dp, args->blkno2,
-1, &bp2, XFS_ATTR_FORK);
if (error) {
return(error);
}
ASSERT(bp2 != NULL);
} else {
bp2 = bp1;
}
leaf1 = bp1->data;
ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(args->index < be16_to_cpu(leaf1->hdr.count));
ASSERT(args->index >= 0);
entry1 = &leaf1->entries[ args->index ];
leaf2 = bp2->data;
ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count));
ASSERT(args->index2 >= 0);
entry2 = &leaf2->entries[ args->index2 ];
#ifdef DEBUG
if (entry1->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr_leaf_name_local(leaf1, args->index);
namelen1 = name_loc->namelen;
name1 = (char *)name_loc->nameval;
} else {
name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index);
namelen1 = name_rmt->namelen;
name1 = (char *)name_rmt->name;
}
if (entry2->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr_leaf_name_local(leaf2, args->index2);
namelen2 = name_loc->namelen;
name2 = (char *)name_loc->nameval;
} else {
name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2);
namelen2 = name_rmt->namelen;
name2 = (char *)name_rmt->name;
}
ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval));
ASSERT(namelen1 == namelen2);
ASSERT(memcmp(name1, name2, namelen1) == 0);
#endif /* DEBUG */
ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE);
ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
entry1->flags &= ~XFS_ATTR_INCOMPLETE;
xfs_da_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
if (args->rmtblkno) {
ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
name_rmt = xfs_attr_leaf_name_remote(leaf1, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
name_rmt->valuelen = cpu_to_be32(args->valuelen);
xfs_da_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
}
entry2->flags |= XFS_ATTR_INCOMPLETE;
xfs_da_log_buf(args->trans, bp2,
XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
name_rmt = xfs_attr_leaf_name_remote(leaf2, args->index2);
name_rmt->valueblk = 0;
name_rmt->valuelen = 0;
xfs_da_log_buf(args->trans, bp2,
XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
}
xfs_da_buf_done(bp1);
if (bp1 != bp2)
xfs_da_buf_done(bp2);
/*
* Commit the flag value change and start the next trans in series.
*/
error = xfs_trans_roll(&args->trans, args->dp);
return(error);
}
/*========================================================================
* Indiscriminately delete the entire attribute fork
*========================================================================*/
/*
* Recurse (gasp!) through the attribute nodes until we find leaves.
* We're doing a depth-first traversal in order to invalidate everything.
*/
int
xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
{
xfs_da_blkinfo_t *info;
xfs_daddr_t blkno;
xfs_dabuf_t *bp;
int error;
/*
* Read block 0 to see what we have to work with.
* We only get here if we have extents, since we remove
* the extents in reverse order the extent containing
* block 0 must still be there.
*/
error = xfs_da_read_buf(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK);
if (error)
return(error);
blkno = xfs_da_blkno(bp);
/*
* Invalidate the tree, even if the "tree" is only a single leaf block.
* This is a depth-first traversal!
*/
info = bp->data;
if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
error = xfs_attr_node_inactive(trans, dp, bp, 1);
} else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
error = xfs_attr_leaf_inactive(trans, dp, bp);
} else {
error = XFS_ERROR(EIO);
xfs_da_brelse(*trans, bp);
}
if (error)
return(error);
/*
* Invalidate the incore copy of the root block.
*/
error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK);
if (error)
return(error);
xfs_da_binval(*trans, bp); /* remove from cache */
/*
* Commit the invalidate and start the next transaction.
*/
error = xfs_trans_roll(trans, dp);
return (error);
}
/*
* Recurse (gasp!) through the attribute nodes until we find leaves.
* We're doing a depth-first traversal in order to invalidate everything.
*/
STATIC int
xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
int level)
{
xfs_da_blkinfo_t *info;
xfs_da_intnode_t *node;
xfs_dablk_t child_fsb;
xfs_daddr_t parent_blkno, child_blkno;
int error, count, i;
xfs_dabuf_t *child_bp;
/*
* Since this code is recursive (gasp!) we must protect ourselves.
*/
if (level > XFS_DA_NODE_MAXDEPTH) {
xfs_da_brelse(*trans, bp); /* no locks for later trans */
return(XFS_ERROR(EIO));
}
node = bp->data;
ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
parent_blkno = xfs_da_blkno(bp); /* save for re-read later */
count = be16_to_cpu(node->hdr.count);
if (!count) {
xfs_da_brelse(*trans, bp);
return(0);
}
child_fsb = be32_to_cpu(node->btree[0].before);
xfs_da_brelse(*trans, bp); /* no locks for later trans */
/*
* If this is the node level just above the leaves, simply loop
* over the leaves removing all of them. If this is higher up
* in the tree, recurse downward.
*/
for (i = 0; i < count; i++) {
/*
* Read the subsidiary block to see what we have to work with.
* Don't do this in a transaction. This is a depth-first
* traversal of the tree so we may deal with many blocks
* before we come back to this one.
*/
error = xfs_da_read_buf(*trans, dp, child_fsb, -2, &child_bp,
XFS_ATTR_FORK);
if (error)
return(error);
if (child_bp) {
/* save for re-read later */
child_blkno = xfs_da_blkno(child_bp);
/*
* Invalidate the subtree, however we have to.
*/
info = child_bp->data;
if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
error = xfs_attr_node_inactive(trans, dp,
child_bp, level+1);
} else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
error = xfs_attr_leaf_inactive(trans, dp,
child_bp);
} else {
error = XFS_ERROR(EIO);
xfs_da_brelse(*trans, child_bp);
}
if (error)
return(error);
/*
* Remove the subsidiary block from the cache
* and from the log.
*/
error = xfs_da_get_buf(*trans, dp, 0, child_blkno,
&child_bp, XFS_ATTR_FORK);
if (error)
return(error);
xfs_da_binval(*trans, child_bp);
}
/*
* If we're not done, re-read the parent to get the next
* child block number.
*/
if ((i+1) < count) {
error = xfs_da_read_buf(*trans, dp, 0, parent_blkno,
&bp, XFS_ATTR_FORK);
if (error)
return(error);
child_fsb = be32_to_cpu(node->btree[i+1].before);
xfs_da_brelse(*trans, bp);
}
/*
* Atomically commit the whole invalidate stuff.
*/
error = xfs_trans_roll(trans, dp);
if (error)
return (error);
}
return(0);
}
/*
* Invalidate all of the "remote" value regions pointed to by a particular
* leaf block.
* Note that we must release the lock on the buffer so that we are not
* caught holding something that the logging code wants to flush to disk.
*/
STATIC int
xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
{
xfs_attr_leafblock_t *leaf;
xfs_attr_leaf_entry_t *entry;
xfs_attr_leaf_name_remote_t *name_rmt;
xfs_attr_inactive_list_t *list, *lp;
int error, count, size, tmp, i;
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
/*
* Count the number of "remote" value extents.
*/
count = 0;
entry = &leaf->entries[0];
for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
if (be16_to_cpu(entry->nameidx) &&
((entry->flags & XFS_ATTR_LOCAL) == 0)) {
name_rmt = xfs_attr_leaf_name_remote(leaf, i);
if (name_rmt->valueblk)
count++;
}
}
/*
* If there are no "remote" values, we're done.
*/
if (count == 0) {
xfs_da_brelse(*trans, bp);
return(0);
}
/*
* Allocate storage for a list of all the "remote" value extents.
*/
size = count * sizeof(xfs_attr_inactive_list_t);
list = (xfs_attr_inactive_list_t *)kmem_alloc(size, KM_SLEEP);
/*
* Identify each of the "remote" value extents.
*/
lp = list;
entry = &leaf->entries[0];
for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
if (be16_to_cpu(entry->nameidx) &&
((entry->flags & XFS_ATTR_LOCAL) == 0)) {
name_rmt = xfs_attr_leaf_name_remote(leaf, i);
if (name_rmt->valueblk) {
lp->valueblk = be32_to_cpu(name_rmt->valueblk);
lp->valuelen = XFS_B_TO_FSB(dp->i_mount,
be32_to_cpu(name_rmt->valuelen));
lp++;
}
}
}
xfs_da_brelse(*trans, bp); /* unlock for trans. in freextent() */
/*
* Invalidate each of the "remote" value extents.
*/
error = 0;
for (lp = list, i = 0; i < count; i++, lp++) {
tmp = xfs_attr_leaf_freextent(trans, dp,
lp->valueblk, lp->valuelen);
if (error == 0)
error = tmp; /* save only the 1st errno */
}
kmem_free((xfs_caddr_t)list);
return(error);
}
/*
* Look at all the extents for this logical region,
* invalidate any buffers that are incore/in transactions.
*/
STATIC int
xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
xfs_dablk_t blkno, int blkcnt)
{
xfs_bmbt_irec_t map;
xfs_dablk_t tblkno;
int tblkcnt, dblkcnt, nmap, error;
xfs_daddr_t dblkno;
xfs_buf_t *bp;
/*
* Roll through the "value", invalidating the attribute value's
* blocks.
*/
tblkno = blkno;
tblkcnt = blkcnt;
while (tblkcnt > 0) {
/*
* Try to remember where we decided to put the value.
*/
nmap = 1;
error = xfs_bmapi(*trans, dp, (xfs_fileoff_t)tblkno, tblkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
NULL, 0, &map, &nmap, NULL);
if (error) {
return(error);
}
ASSERT(nmap == 1);
ASSERT(map.br_startblock != DELAYSTARTBLOCK);
/*
* If it's a hole, these are already unmapped
* so there's nothing to invalidate.
*/
if (map.br_startblock != HOLESTARTBLOCK) {
dblkno = XFS_FSB_TO_DADDR(dp->i_mount,
map.br_startblock);
dblkcnt = XFS_FSB_TO_BB(dp->i_mount,
map.br_blockcount);
bp = xfs_trans_get_buf(*trans,
dp->i_mount->m_ddev_targp,
dblkno, dblkcnt, XBF_LOCK);
xfs_trans_binval(*trans, bp);
/*
* Roll to next transaction.
*/
error = xfs_trans_roll(trans, dp);
if (error)
return (error);
}
tblkno += map.br_blockcount;
tblkcnt -= map.br_blockcount;
}
return(0);
}
| gpl-2.0 |
pbhide/net-next-rocker | arch/arm/mach-imx/irq-common.c | 2106 | 1227 | /*
* Copyright (C) BitBox Ltd 2010
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/platform_data/asoc-imx-ssi.h>
#include "irq-common.h"
int mxc_set_irq_fiq(unsigned int irq, unsigned int type)
{
struct irq_chip_generic *gc;
struct mxc_extra_irq *exirq;
int ret;
ret = -ENOSYS;
gc = irq_get_chip_data(irq);
if (gc && gc->private) {
exirq = gc->private;
if (exirq->set_irq_fiq)
ret = exirq->set_irq_fiq(irq, type);
}
return ret;
}
EXPORT_SYMBOL(mxc_set_irq_fiq);
| gpl-2.0 |
ddilber/telegrauq7_linux | drivers/tty/serial/mcf.c | 2106 | 19838 | /****************************************************************************/
/*
* mcf.c -- Freescale ColdFire UART driver
*
* (C) Copyright 2003-2007, Greg Ungerer <gerg@snapgear.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/****************************************************************************/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/nettel.h>
/****************************************************************************/
/*
* Some boards implement the DTR/DCD lines using GPIO lines, most
* don't. Dummy out the access macros for those that don't. Those
* that do should define these macros somewhere in there board
* specific inlude files.
*/
#if !defined(mcf_getppdcd)
#define mcf_getppdcd(p) (1)
#endif
#if !defined(mcf_getppdtr)
#define mcf_getppdtr(p) (1)
#endif
#if !defined(mcf_setppdtr)
#define mcf_setppdtr(p, v) do { } while (0)
#endif
/****************************************************************************/
/*
* Local per-uart structure.
*/
struct mcf_uart {
struct uart_port port;
unsigned int sigs; /* Local copy of line sigs */
unsigned char imr; /* Local IMR mirror */
struct serial_rs485 rs485; /* RS485 settings */
};
/****************************************************************************/
static unsigned int mcf_tx_empty(struct uart_port *port)
{
return (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXEMPTY) ?
TIOCSER_TEMT : 0;
}
/****************************************************************************/
static unsigned int mcf_get_mctrl(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned int sigs;
sigs = (readb(port->membase + MCFUART_UIPR) & MCFUART_UIPR_CTS) ?
0 : TIOCM_CTS;
sigs |= (pp->sigs & TIOCM_RTS);
sigs |= (mcf_getppdcd(port->line) ? TIOCM_CD : 0);
sigs |= (mcf_getppdtr(port->line) ? TIOCM_DTR : 0);
return sigs;
}
/****************************************************************************/
static void mcf_set_mctrl(struct uart_port *port, unsigned int sigs)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->sigs = sigs;
mcf_setppdtr(port->line, (sigs & TIOCM_DTR));
if (sigs & TIOCM_RTS)
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
else
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP0);
}
/****************************************************************************/
static void mcf_start_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
if (pp->rs485.flags & SER_RS485_ENABLED) {
/* Enable Transmitter */
writeb(MCFUART_UCR_TXENABLE, port->membase + MCFUART_UCR);
/* Manually assert RTS */
writeb(MCFUART_UOP_RTS, port->membase + MCFUART_UOP1);
}
pp->imr |= MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_stop_tx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_stop_rx(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
pp->imr &= ~MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
}
/****************************************************************************/
static void mcf_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
if (break_state == -1)
writeb(MCFUART_UCR_CMDBREAKSTART, port->membase + MCFUART_UCR);
else
writeb(MCFUART_UCR_CMDBREAKSTOP, port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static void mcf_enable_ms(struct uart_port *port)
{
}
/****************************************************************************/
static int mcf_startup(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Reset UART, get it into known state... */
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
/* Enable the UART transmitter and receiver */
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
port->membase + MCFUART_UCR);
/* Enable RX interrupts now */
pp->imr = MCFUART_UIR_RXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
spin_unlock_irqrestore(&port->lock, flags);
return 0;
}
/****************************************************************************/
static void mcf_shutdown(struct uart_port *port)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Disable all interrupts now */
pp->imr = 0;
writeb(pp->imr, port->membase + MCFUART_UIMR);
/* Disable UART transmitter and receiver */
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
unsigned int baud, baudclk;
#if defined(CONFIG_M5272)
unsigned int baudfr;
#endif
unsigned char mr1, mr2;
baud = uart_get_baud_rate(port, termios, old, 0, 230400);
#if defined(CONFIG_M5272)
baudclk = (MCF_BUSCLK / baud) / 32;
baudfr = (((MCF_BUSCLK / baud) + 1) / 2) % 16;
#else
baudclk = ((MCF_BUSCLK / baud) + 16) / 32;
#endif
mr1 = MCFUART_MR1_RXIRQRDY | MCFUART_MR1_RXERRCHAR;
mr2 = 0;
switch (termios->c_cflag & CSIZE) {
case CS5: mr1 |= MCFUART_MR1_CS5; break;
case CS6: mr1 |= MCFUART_MR1_CS6; break;
case CS7: mr1 |= MCFUART_MR1_CS7; break;
case CS8:
default: mr1 |= MCFUART_MR1_CS8; break;
}
if (termios->c_cflag & PARENB) {
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
mr1 |= MCFUART_MR1_PARITYMARK;
else
mr1 |= MCFUART_MR1_PARITYSPACE;
} else {
if (termios->c_cflag & PARODD)
mr1 |= MCFUART_MR1_PARITYODD;
else
mr1 |= MCFUART_MR1_PARITYEVEN;
}
} else {
mr1 |= MCFUART_MR1_PARITYNONE;
}
if (termios->c_cflag & CSTOPB)
mr2 |= MCFUART_MR2_STOP2;
else
mr2 |= MCFUART_MR2_STOP1;
if (termios->c_cflag & CRTSCTS) {
mr1 |= MCFUART_MR1_RXRTS;
mr2 |= MCFUART_MR2_TXCTS;
}
if (pp->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
mr2 |= MCFUART_MR2_TXRTS;
}
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, termios->c_cflag, baud);
writeb(MCFUART_UCR_CMDRESETRX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETTX, port->membase + MCFUART_UCR);
writeb(MCFUART_UCR_CMDRESETMRPTR, port->membase + MCFUART_UCR);
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
writeb((baudclk & 0xff00) >> 8, port->membase + MCFUART_UBG1);
writeb((baudclk & 0xff), port->membase + MCFUART_UBG2);
#if defined(CONFIG_M5272)
writeb((baudfr & 0x0f), port->membase + MCFUART_UFPD);
#endif
writeb(MCFUART_UCSR_RXCLKTIMER | MCFUART_UCSR_TXCLKTIMER,
port->membase + MCFUART_UCSR);
writeb(MCFUART_UCR_RXENABLE | MCFUART_UCR_TXENABLE,
port->membase + MCFUART_UCR);
spin_unlock_irqrestore(&port->lock, flags);
}
/****************************************************************************/
static void mcf_rx_chars(struct mcf_uart *pp)
{
struct uart_port *port = &pp->port;
unsigned char status, ch, flag;
while ((status = readb(port->membase + MCFUART_USR)) & MCFUART_USR_RXREADY) {
ch = readb(port->membase + MCFUART_URB);
flag = TTY_NORMAL;
port->icount.rx++;
if (status & MCFUART_USR_RXERR) {
writeb(MCFUART_UCR_CMDRESETERR,
port->membase + MCFUART_UCR);
if (status & MCFUART_USR_RXBREAK) {
port->icount.brk++;
if (uart_handle_break(port))
continue;
} else if (status & MCFUART_USR_RXPARITY) {
port->icount.parity++;
} else if (status & MCFUART_USR_RXOVERRUN) {
port->icount.overrun++;
} else if (status & MCFUART_USR_RXFRAMING) {
port->icount.frame++;
}
status &= port->read_status_mask;
if (status & MCFUART_USR_RXBREAK)
flag = TTY_BREAK;
else if (status & MCFUART_USR_RXPARITY)
flag = TTY_PARITY;
else if (status & MCFUART_USR_RXFRAMING)
flag = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, ch))
continue;
uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag);
}
tty_flip_buffer_push(&port->state->port);
}
/****************************************************************************/
static void mcf_tx_chars(struct mcf_uart *pp)
{
struct uart_port *port = &pp->port;
struct circ_buf *xmit = &port->state->xmit;
if (port->x_char) {
/* Send special char - probably flow control */
writeb(port->x_char, port->membase + MCFUART_UTB);
port->x_char = 0;
port->icount.tx++;
return;
}
while (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY) {
if (xmit->head == xmit->tail)
break;
writeb(xmit->buf[xmit->tail], port->membase + MCFUART_UTB);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1);
port->icount.tx++;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (xmit->head == xmit->tail) {
pp->imr &= ~MCFUART_UIR_TXREADY;
writeb(pp->imr, port->membase + MCFUART_UIMR);
/* Disable TX to negate RTS automatically */
if (pp->rs485.flags & SER_RS485_ENABLED)
writeb(MCFUART_UCR_TXDISABLE,
port->membase + MCFUART_UCR);
}
}
/****************************************************************************/
static irqreturn_t mcf_interrupt(int irq, void *data)
{
struct uart_port *port = data;
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned int isr;
irqreturn_t ret = IRQ_NONE;
isr = readb(port->membase + MCFUART_UISR) & pp->imr;
spin_lock(&port->lock);
if (isr & MCFUART_UIR_RXREADY) {
mcf_rx_chars(pp);
ret = IRQ_HANDLED;
}
if (isr & MCFUART_UIR_TXREADY) {
mcf_tx_chars(pp);
ret = IRQ_HANDLED;
}
spin_unlock(&port->lock);
return ret;
}
/****************************************************************************/
static void mcf_config_port(struct uart_port *port, int flags)
{
port->type = PORT_MCF;
port->fifosize = MCFUART_TXFIFOSIZE;
/* Clear mask, so no surprise interrupts. */
writeb(0, port->membase + MCFUART_UIMR);
if (request_irq(port->irq, mcf_interrupt, 0, "UART", port))
printk(KERN_ERR "MCF: unable to attach ColdFire UART %d "
"interrupt vector=%d\n", port->line, port->irq);
}
/****************************************************************************/
static const char *mcf_type(struct uart_port *port)
{
return (port->type == PORT_MCF) ? "ColdFire UART" : NULL;
}
/****************************************************************************/
static int mcf_request_port(struct uart_port *port)
{
/* UARTs always present */
return 0;
}
/****************************************************************************/
static void mcf_release_port(struct uart_port *port)
{
/* Nothing to release... */
}
/****************************************************************************/
static int mcf_verify_port(struct uart_port *port, struct serial_struct *ser)
{
if ((ser->type != PORT_UNKNOWN) && (ser->type != PORT_MCF))
return -EINVAL;
return 0;
}
/****************************************************************************/
/* Enable or disable the RS485 support */
static void mcf_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
{
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
unsigned long flags;
unsigned char mr1, mr2;
spin_lock_irqsave(&port->lock, flags);
/* Get mode registers */
mr1 = readb(port->membase + MCFUART_UMR);
mr2 = readb(port->membase + MCFUART_UMR);
if (rs485->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
/* Automatically negate RTS after TX completes */
mr2 |= MCFUART_MR2_TXRTS;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
mr2 &= ~MCFUART_MR2_TXRTS;
}
writeb(mr1, port->membase + MCFUART_UMR);
writeb(mr2, port->membase + MCFUART_UMR);
pp->rs485 = *rs485;
spin_unlock_irqrestore(&port->lock, flags);
}
static int mcf_ioctl(struct uart_port *port, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case TIOCSRS485: {
struct serial_rs485 rs485;
if (copy_from_user(&rs485, (struct serial_rs485 *)arg,
sizeof(struct serial_rs485)))
return -EFAULT;
mcf_config_rs485(port, &rs485);
break;
}
case TIOCGRS485: {
struct mcf_uart *pp = container_of(port, struct mcf_uart, port);
if (copy_to_user((struct serial_rs485 *)arg, &pp->rs485,
sizeof(struct serial_rs485)))
return -EFAULT;
break;
}
default:
return -ENOIOCTLCMD;
}
return 0;
}
/****************************************************************************/
/*
* Define the basic serial functions we support.
*/
static const struct uart_ops mcf_uart_ops = {
.tx_empty = mcf_tx_empty,
.get_mctrl = mcf_get_mctrl,
.set_mctrl = mcf_set_mctrl,
.start_tx = mcf_start_tx,
.stop_tx = mcf_stop_tx,
.stop_rx = mcf_stop_rx,
.enable_ms = mcf_enable_ms,
.break_ctl = mcf_break_ctl,
.startup = mcf_startup,
.shutdown = mcf_shutdown,
.set_termios = mcf_set_termios,
.type = mcf_type,
.request_port = mcf_request_port,
.release_port = mcf_release_port,
.config_port = mcf_config_port,
.verify_port = mcf_verify_port,
.ioctl = mcf_ioctl,
};
static struct mcf_uart mcf_ports[4];
#define MCF_MAXPORTS ARRAY_SIZE(mcf_ports)
/****************************************************************************/
#if defined(CONFIG_SERIAL_MCF_CONSOLE)
/****************************************************************************/
int __init early_mcf_setup(struct mcf_platform_uart *platp)
{
struct uart_port *port;
int i;
for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
port = &mcf_ports[i].port;
port->line = i;
port->type = PORT_MCF;
port->mapbase = platp[i].mapbase;
port->membase = (platp[i].membase) ? platp[i].membase :
(unsigned char __iomem *) port->mapbase;
port->iotype = SERIAL_IO_MEM;
port->irq = platp[i].irq;
port->uartclk = MCF_BUSCLK;
port->flags = ASYNC_BOOT_AUTOCONF;
port->ops = &mcf_uart_ops;
}
return 0;
}
/****************************************************************************/
static void mcf_console_putc(struct console *co, const char c)
{
struct uart_port *port = &(mcf_ports + co->index)->port;
int i;
for (i = 0; (i < 0x10000); i++) {
if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
break;
}
writeb(c, port->membase + MCFUART_UTB);
for (i = 0; (i < 0x10000); i++) {
if (readb(port->membase + MCFUART_USR) & MCFUART_USR_TXREADY)
break;
}
}
/****************************************************************************/
static void mcf_console_write(struct console *co, const char *s, unsigned int count)
{
for (; (count); count--, s++) {
mcf_console_putc(co, *s);
if (*s == '\n')
mcf_console_putc(co, '\r');
}
}
/****************************************************************************/
static int __init mcf_console_setup(struct console *co, char *options)
{
struct uart_port *port;
int baud = CONFIG_SERIAL_MCF_BAUDRATE;
int bits = 8;
int parity = 'n';
int flow = 'n';
if ((co->index < 0) || (co->index >= MCF_MAXPORTS))
co->index = 0;
port = &mcf_ports[co->index].port;
if (port->membase == 0)
return -ENODEV;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, co, baud, parity, bits, flow);
}
/****************************************************************************/
static struct uart_driver mcf_driver;
static struct console mcf_console = {
.name = "ttyS",
.write = mcf_console_write,
.device = uart_console_device,
.setup = mcf_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &mcf_driver,
};
static int __init mcf_console_init(void)
{
register_console(&mcf_console);
return 0;
}
console_initcall(mcf_console_init);
#define MCF_CONSOLE &mcf_console
/****************************************************************************/
#else
/****************************************************************************/
#define MCF_CONSOLE NULL
/****************************************************************************/
#endif /* CONFIG_MCF_CONSOLE */
/****************************************************************************/
/*
* Define the mcf UART driver structure.
*/
static struct uart_driver mcf_driver = {
.owner = THIS_MODULE,
.driver_name = "mcf",
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
.nr = MCF_MAXPORTS,
.cons = MCF_CONSOLE,
};
/****************************************************************************/
static int mcf_probe(struct platform_device *pdev)
{
struct mcf_platform_uart *platp = pdev->dev.platform_data;
struct uart_port *port;
int i;
for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
port = &mcf_ports[i].port;
port->line = i;
port->type = PORT_MCF;
port->mapbase = platp[i].mapbase;
port->membase = (platp[i].membase) ? platp[i].membase :
(unsigned char __iomem *) platp[i].mapbase;
port->iotype = SERIAL_IO_MEM;
port->irq = platp[i].irq;
port->uartclk = MCF_BUSCLK;
port->ops = &mcf_uart_ops;
port->flags = ASYNC_BOOT_AUTOCONF;
uart_add_one_port(&mcf_driver, port);
}
return 0;
}
/****************************************************************************/
static int mcf_remove(struct platform_device *pdev)
{
struct uart_port *port;
int i;
for (i = 0; (i < MCF_MAXPORTS); i++) {
port = &mcf_ports[i].port;
if (port)
uart_remove_one_port(&mcf_driver, port);
}
return 0;
}
/****************************************************************************/
static struct platform_driver mcf_platform_driver = {
.probe = mcf_probe,
.remove = mcf_remove,
.driver = {
.name = "mcfuart",
.owner = THIS_MODULE,
},
};
/****************************************************************************/
static int __init mcf_init(void)
{
int rc;
printk("ColdFire internal UART serial driver\n");
rc = uart_register_driver(&mcf_driver);
if (rc)
return rc;
rc = platform_driver_register(&mcf_platform_driver);
if (rc) {
uart_unregister_driver(&mcf_driver);
return rc;
}
return 0;
}
/****************************************************************************/
static void __exit mcf_exit(void)
{
platform_driver_unregister(&mcf_platform_driver);
uart_unregister_driver(&mcf_driver);
}
/****************************************************************************/
module_init(mcf_init);
module_exit(mcf_exit);
MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
MODULE_DESCRIPTION("Freescale ColdFire UART driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mcfuart");
/****************************************************************************/
| gpl-2.0 |
markyzq/kernel-next | arch/x86/kernel/cpu/mtrr/cleanup.c | 3386 | 25307 | /*
* MTRR (Memory Type Range Register) cleanup
*
* Copyright (C) 2009 Yinghai Lu
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/kvm_para.h>
#include <linux/range.h>
#include <asm/processor.h>
#include <asm/e820.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
#include "mtrr.h"
struct var_mtrr_range_state {
unsigned long base_pfn;
unsigned long size_pfn;
mtrr_type type;
};
struct var_mtrr_state {
unsigned long range_startk;
unsigned long range_sizek;
unsigned long chunk_sizek;
unsigned long gran_sizek;
unsigned int reg;
};
/* Should be related to MTRR_VAR_RANGES nums */
#define RANGE_NUM 256
static struct range __initdata range[RANGE_NUM];
static int __initdata nr_range;
static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
static int __initdata debug_print;
#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
#define BIOS_BUG_MSG KERN_WARNING \
"WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
static int __init
x86_get_mtrr_mem_range(struct range *range, int nr_range,
unsigned long extra_remove_base,
unsigned long extra_remove_size)
{
unsigned long base, size;
mtrr_type type;
int i;
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
if (type != MTRR_TYPE_WRBACK)
continue;
base = range_state[i].base_pfn;
size = range_state[i].size_pfn;
nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
base, base + size);
}
if (debug_print) {
printk(KERN_DEBUG "After WB checking\n");
for (i = 0; i < nr_range; i++)
printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
/* Take out UC ranges: */
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
if (type != MTRR_TYPE_UNCACHABLE &&
type != MTRR_TYPE_WRPROT)
continue;
size = range_state[i].size_pfn;
if (!size)
continue;
base = range_state[i].base_pfn;
if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
(mtrr_state.enabled & 1)) {
/* Var MTRR contains UC entry below 1M? Skip it: */
printk(BIOS_BUG_MSG, i);
if (base + size <= (1<<(20-PAGE_SHIFT)))
continue;
size -= (1<<(20-PAGE_SHIFT)) - base;
base = 1<<(20-PAGE_SHIFT);
}
subtract_range(range, RANGE_NUM, base, base + size);
}
if (extra_remove_size)
subtract_range(range, RANGE_NUM, extra_remove_base,
extra_remove_base + extra_remove_size);
if (debug_print) {
printk(KERN_DEBUG "After UC checking\n");
for (i = 0; i < RANGE_NUM; i++) {
if (!range[i].end)
continue;
printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
}
/* sort the ranges */
nr_range = clean_sort_range(range, RANGE_NUM);
if (debug_print) {
printk(KERN_DEBUG "After sorting\n");
for (i = 0; i < nr_range; i++)
printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
}
return nr_range;
}
#ifdef CONFIG_MTRR_SANITIZER
static unsigned long __init sum_ranges(struct range *range, int nr_range)
{
unsigned long sum = 0;
int i;
for (i = 0; i < nr_range; i++)
sum += range[i].end - range[i].start;
return sum;
}
static int enable_mtrr_cleanup __initdata =
CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT;
static int __init disable_mtrr_cleanup_setup(char *str)
{
enable_mtrr_cleanup = 0;
return 0;
}
early_param("disable_mtrr_cleanup", disable_mtrr_cleanup_setup);
static int __init enable_mtrr_cleanup_setup(char *str)
{
enable_mtrr_cleanup = 1;
return 0;
}
early_param("enable_mtrr_cleanup", enable_mtrr_cleanup_setup);
static int __init mtrr_cleanup_debug_setup(char *str)
{
debug_print = 1;
return 0;
}
early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
static void __init
set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
unsigned char type, unsigned int address_bits)
{
u32 base_lo, base_hi, mask_lo, mask_hi;
u64 base, mask;
if (!sizek) {
fill_mtrr_var_range(reg, 0, 0, 0, 0);
return;
}
mask = (1ULL << address_bits) - 1;
mask &= ~((((u64)sizek) << 10) - 1);
base = ((u64)basek) << 10;
base |= type;
mask |= 0x800;
base_lo = base & ((1ULL<<32) - 1);
base_hi = base >> 32;
mask_lo = mask & ((1ULL<<32) - 1);
mask_hi = mask >> 32;
fill_mtrr_var_range(reg, base_lo, base_hi, mask_lo, mask_hi);
}
static void __init
save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
unsigned char type)
{
range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
range_state[reg].type = type;
}
static void __init set_var_mtrr_all(unsigned int address_bits)
{
unsigned long basek, sizek;
unsigned char type;
unsigned int reg;
for (reg = 0; reg < num_var_ranges; reg++) {
basek = range_state[reg].base_pfn << (PAGE_SHIFT - 10);
sizek = range_state[reg].size_pfn << (PAGE_SHIFT - 10);
type = range_state[reg].type;
set_var_mtrr(reg, basek, sizek, type, address_bits);
}
}
static unsigned long to_size_factor(unsigned long sizek, char *factorp)
{
unsigned long base = sizek;
char factor;
if (base & ((1<<10) - 1)) {
/* Not MB-aligned: */
factor = 'K';
} else if (base & ((1<<20) - 1)) {
factor = 'M';
base >>= 10;
} else {
factor = 'G';
base >>= 20;
}
*factorp = factor;
return base;
}
static unsigned int __init
range_to_mtrr(unsigned int reg, unsigned long range_startk,
unsigned long range_sizek, unsigned char type)
{
if (!range_sizek || (reg >= num_var_ranges))
return reg;
while (range_sizek) {
unsigned long max_align, align;
unsigned long sizek;
/* Compute the maximum size with which we can make a range: */
if (range_startk)
max_align = __ffs(range_startk);
else
max_align = BITS_PER_LONG - 1;
align = __fls(range_sizek);
if (align > max_align)
align = max_align;
sizek = 1UL << align;
if (debug_print) {
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
start_base = to_size_factor(range_startk, &start_factor);
size_base = to_size_factor(sizek, &size_factor);
Dprintk("Setting variable MTRR %d, "
"base: %ld%cB, range: %ld%cB, type %s\n",
reg, start_base, start_factor,
size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE) ? "UC" :
((type == MTRR_TYPE_WRBACK) ? "WB" : "Other")
);
}
save_var_mtrr(reg++, range_startk, sizek, type);
range_startk += sizek;
range_sizek -= sizek;
if (reg >= num_var_ranges)
break;
}
return reg;
}
static unsigned __init
range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
unsigned long sizek)
{
unsigned long hole_basek, hole_sizek;
unsigned long second_basek, second_sizek;
unsigned long range0_basek, range0_sizek;
unsigned long range_basek, range_sizek;
unsigned long chunk_sizek;
unsigned long gran_sizek;
hole_basek = 0;
hole_sizek = 0;
second_basek = 0;
second_sizek = 0;
chunk_sizek = state->chunk_sizek;
gran_sizek = state->gran_sizek;
/* Align with gran size, prevent small block used up MTRRs: */
range_basek = ALIGN(state->range_startk, gran_sizek);
if ((range_basek > basek) && basek)
return second_sizek;
state->range_sizek -= (range_basek - state->range_startk);
range_sizek = ALIGN(state->range_sizek, gran_sizek);
while (range_sizek > state->range_sizek) {
range_sizek -= gran_sizek;
if (!range_sizek)
return 0;
}
state->range_sizek = range_sizek;
/* Try to append some small hole: */
range0_basek = state->range_startk;
range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
/* No increase: */
if (range0_sizek == state->range_sizek) {
Dprintk("rangeX: %016lx - %016lx\n",
range0_basek<<10,
(range0_basek + state->range_sizek)<<10);
state->reg = range_to_mtrr(state->reg, range0_basek,
state->range_sizek, MTRR_TYPE_WRBACK);
return 0;
}
/* Only cut back when it is not the last: */
if (sizek) {
while (range0_basek + range0_sizek > (basek + sizek)) {
if (range0_sizek >= chunk_sizek)
range0_sizek -= chunk_sizek;
else
range0_sizek = 0;
if (!range0_sizek)
break;
}
}
second_try:
range_basek = range0_basek + range0_sizek;
/* One hole in the middle: */
if (range_basek > basek && range_basek <= (basek + sizek))
second_sizek = range_basek - basek;
if (range0_sizek > state->range_sizek) {
/* One hole in middle or at the end: */
hole_sizek = range0_sizek - state->range_sizek - second_sizek;
/* Hole size should be less than half of range0 size: */
if (hole_sizek >= (range0_sizek >> 1) &&
range0_sizek >= chunk_sizek) {
range0_sizek -= chunk_sizek;
second_sizek = 0;
hole_sizek = 0;
goto second_try;
}
}
if (range0_sizek) {
Dprintk("range0: %016lx - %016lx\n",
range0_basek<<10,
(range0_basek + range0_sizek)<<10);
state->reg = range_to_mtrr(state->reg, range0_basek,
range0_sizek, MTRR_TYPE_WRBACK);
}
if (range0_sizek < state->range_sizek) {
/* Need to handle left over range: */
range_sizek = state->range_sizek - range0_sizek;
Dprintk("range: %016lx - %016lx\n",
range_basek<<10,
(range_basek + range_sizek)<<10);
state->reg = range_to_mtrr(state->reg, range_basek,
range_sizek, MTRR_TYPE_WRBACK);
}
if (hole_sizek) {
hole_basek = range_basek - hole_sizek - second_sizek;
Dprintk("hole: %016lx - %016lx\n",
hole_basek<<10,
(hole_basek + hole_sizek)<<10);
state->reg = range_to_mtrr(state->reg, hole_basek,
hole_sizek, MTRR_TYPE_UNCACHABLE);
}
return second_sizek;
}
static void __init
set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
unsigned long size_pfn)
{
unsigned long basek, sizek;
unsigned long second_sizek = 0;
if (state->reg >= num_var_ranges)
return;
basek = base_pfn << (PAGE_SHIFT - 10);
sizek = size_pfn << (PAGE_SHIFT - 10);
/* See if I can merge with the last range: */
if ((basek <= 1024) ||
(state->range_startk + state->range_sizek == basek)) {
unsigned long endk = basek + sizek;
state->range_sizek = endk - state->range_startk;
return;
}
/* Write the range mtrrs: */
if (state->range_sizek != 0)
second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
/* Allocate an msr: */
state->range_startk = basek + second_sizek;
state->range_sizek = sizek - second_sizek;
}
/* Mininum size of mtrr block that can take hole: */
static u64 mtrr_chunk_size __initdata = (256ULL<<20);
static int __init parse_mtrr_chunk_size_opt(char *p)
{
if (!p)
return -EINVAL;
mtrr_chunk_size = memparse(p, &p);
return 0;
}
early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
/* Granularity of mtrr of block: */
static u64 mtrr_gran_size __initdata;
static int __init parse_mtrr_gran_size_opt(char *p)
{
if (!p)
return -EINVAL;
mtrr_gran_size = memparse(p, &p);
return 0;
}
early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
static unsigned long nr_mtrr_spare_reg __initdata =
CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
static int __init parse_mtrr_spare_reg(char *arg)
{
if (arg)
nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
return 0;
}
early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
static int __init
x86_setup_var_mtrrs(struct range *range, int nr_range,
u64 chunk_size, u64 gran_size)
{
struct var_mtrr_state var_state;
int num_reg;
int i;
var_state.range_startk = 0;
var_state.range_sizek = 0;
var_state.reg = 0;
var_state.chunk_sizek = chunk_size >> 10;
var_state.gran_sizek = gran_size >> 10;
memset(range_state, 0, sizeof(range_state));
/* Write the range: */
for (i = 0; i < nr_range; i++) {
set_var_mtrr_range(&var_state, range[i].start,
range[i].end - range[i].start);
}
/* Write the last range: */
if (var_state.range_sizek != 0)
range_to_mtrr_with_hole(&var_state, 0, 0);
num_reg = var_state.reg;
/* Clear out the extra MTRR's: */
while (var_state.reg < num_var_ranges) {
save_var_mtrr(var_state.reg, 0, 0, 0);
var_state.reg++;
}
return num_reg;
}
struct mtrr_cleanup_result {
unsigned long gran_sizek;
unsigned long chunk_sizek;
unsigned long lose_cover_sizek;
unsigned int num_reg;
int bad;
};
/*
* gran_size: 64K, 128K, 256K, 512K, 1M, 2M, ..., 2G
* chunk size: gran_size, ..., 2G
* so we need (1+16)*8
*/
#define NUM_RESULT 136
#define PSHIFT (PAGE_SHIFT - 10)
static struct mtrr_cleanup_result __initdata result[NUM_RESULT];
static unsigned long __initdata min_loss_pfn[RANGE_NUM];
static void __init print_out_mtrr_range_state(void)
{
char start_factor = 'K', size_factor = 'K';
unsigned long start_base, size_base;
mtrr_type type;
int i;
for (i = 0; i < num_var_ranges; i++) {
size_base = range_state[i].size_pfn << (PAGE_SHIFT - 10);
if (!size_base)
continue;
size_base = to_size_factor(size_base, &size_factor),
start_base = range_state[i].base_pfn << (PAGE_SHIFT - 10);
start_base = to_size_factor(start_base, &start_factor),
type = range_state[i].type;
printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
i, start_base, start_factor,
size_base, size_factor,
(type == MTRR_TYPE_UNCACHABLE) ? "UC" :
((type == MTRR_TYPE_WRPROT) ? "WP" :
((type == MTRR_TYPE_WRBACK) ? "WB" : "Other"))
);
}
}
static int __init mtrr_need_cleanup(void)
{
int i;
mtrr_type type;
unsigned long size;
/* Extra one for all 0: */
int num[MTRR_NUM_TYPES + 1];
/* Check entries number: */
memset(num, 0, sizeof(num));
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
size = range_state[i].size_pfn;
if (type >= MTRR_NUM_TYPES)
continue;
if (!size)
type = MTRR_NUM_TYPES;
num[type]++;
}
/* Check if we got UC entries: */
if (!num[MTRR_TYPE_UNCACHABLE])
return 0;
/* Check if we only had WB and UC */
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
return 1;
}
static unsigned long __initdata range_sums;
static void __init
mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
unsigned long x_remove_base,
unsigned long x_remove_size, int i)
{
static struct range range_new[RANGE_NUM];
unsigned long range_sums_new;
static int nr_range_new;
int num_reg;
/* Convert ranges to var ranges state: */
num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
/* We got new setting in range_state, check it: */
memset(range_new, 0, sizeof(range_new));
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
x_remove_base, x_remove_size);
range_sums_new = sum_ranges(range_new, nr_range_new);
result[i].chunk_sizek = chunk_size >> 10;
result[i].gran_sizek = gran_size >> 10;
result[i].num_reg = num_reg;
if (range_sums < range_sums_new) {
result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT;
result[i].bad = 1;
} else {
result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT;
}
/* Double check it: */
if (!result[i].bad && !result[i].lose_cover_sizek) {
if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
result[i].bad = 1;
}
if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg]))
min_loss_pfn[num_reg] = range_sums - range_sums_new;
}
static void __init mtrr_print_out_one_result(int i)
{
unsigned long gran_base, chunk_base, lose_base;
char gran_factor, chunk_factor, lose_factor;
gran_base = to_size_factor(result[i].gran_sizek, &gran_factor);
chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor);
lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor);
pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
result[i].bad ? "*BAD*" : " ",
gran_base, gran_factor, chunk_base, chunk_factor);
pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n",
result[i].num_reg, result[i].bad ? "-" : "",
lose_base, lose_factor);
}
static int __init mtrr_search_optimal_index(void)
{
int num_reg_good;
int index_good;
int i;
if (nr_mtrr_spare_reg >= num_var_ranges)
nr_mtrr_spare_reg = num_var_ranges - 1;
num_reg_good = -1;
for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
if (!min_loss_pfn[i])
num_reg_good = i;
}
index_good = -1;
if (num_reg_good != -1) {
for (i = 0; i < NUM_RESULT; i++) {
if (!result[i].bad &&
result[i].num_reg == num_reg_good &&
!result[i].lose_cover_sizek) {
index_good = i;
break;
}
}
}
return index_good;
}
int __init mtrr_cleanup(unsigned address_bits)
{
unsigned long x_remove_base, x_remove_size;
unsigned long base, size, def, dummy;
u64 chunk_size, gran_size;
mtrr_type type;
int index_good;
int i;
if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
return 0;
rdmsr(MSR_MTRRdefType, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
/* Get it and store it aside: */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
range_state[i].base_pfn = base;
range_state[i].size_pfn = size;
range_state[i].type = type;
}
/* Check if we need handle it and can handle it: */
if (!mtrr_need_cleanup())
return 0;
/* Print original var MTRRs at first, for debugging: */
printk(KERN_DEBUG "original variable MTRRs\n");
print_out_mtrr_range_state();
memset(range, 0, sizeof(range));
x_remove_size = 0;
x_remove_base = 1 << (32 - PAGE_SHIFT);
if (mtrr_tom2)
x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
/*
* [0, 1M) should always be covered by var mtrr with WB
* and fixed mtrrs should take effect before var mtrr for it:
*/
nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
1ULL<<(20 - PAGE_SHIFT));
/* add from var mtrr at last */
nr_range = x86_get_mtrr_mem_range(range, nr_range,
x_remove_base, x_remove_size);
range_sums = sum_ranges(range, nr_range);
printk(KERN_INFO "total RAM covered: %ldM\n",
range_sums >> (20 - PAGE_SHIFT));
if (mtrr_chunk_size && mtrr_gran_size) {
i = 0;
mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
x_remove_base, x_remove_size, i);
mtrr_print_out_one_result(i);
if (!result[i].bad) {
set_var_mtrr_all(address_bits);
printk(KERN_DEBUG "New variable MTRRs\n");
print_out_mtrr_range_state();
return 1;
}
printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
"will find optimal one\n");
}
i = 0;
memset(min_loss_pfn, 0xff, sizeof(min_loss_pfn));
memset(result, 0, sizeof(result));
for (gran_size = (1ULL<<16); gran_size < (1ULL<<32); gran_size <<= 1) {
for (chunk_size = gran_size; chunk_size < (1ULL<<32);
chunk_size <<= 1) {
if (i >= NUM_RESULT)
continue;
mtrr_calc_range_state(chunk_size, gran_size,
x_remove_base, x_remove_size, i);
if (debug_print) {
mtrr_print_out_one_result(i);
printk(KERN_INFO "\n");
}
i++;
}
}
/* Try to find the optimal index: */
index_good = mtrr_search_optimal_index();
if (index_good != -1) {
printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
i = index_good;
mtrr_print_out_one_result(i);
/* Convert ranges to var ranges state: */
chunk_size = result[i].chunk_sizek;
chunk_size <<= 10;
gran_size = result[i].gran_sizek;
gran_size <<= 10;
x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
set_var_mtrr_all(address_bits);
printk(KERN_DEBUG "New variable MTRRs\n");
print_out_mtrr_range_state();
return 1;
} else {
/* print out all */
for (i = 0; i < NUM_RESULT; i++)
mtrr_print_out_one_result(i);
}
printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n");
return 0;
}
#else
int __init mtrr_cleanup(unsigned address_bits)
{
return 0;
}
#endif
static int disable_mtrr_trim;
static int __init disable_mtrr_trim_setup(char *str)
{
disable_mtrr_trim = 1;
return 0;
}
early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
/*
* Newer AMD K8s and later CPUs have a special magic MSR way to force WB
* for memory >4GB. Check for that here.
* Note this won't check if the MTRRs < 4GB where the magic bit doesn't
* apply to are wrong, but so far we don't know of any such case in the wild.
*/
#define Tom2Enabled (1U << 21)
#define Tom2ForceMemTypeWB (1U << 22)
int __init amd_special_default_mtrr(void)
{
u32 l, h;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return 0;
if (boot_cpu_data.x86 < 0xf)
return 0;
/* In case some hypervisor doesn't pass SYSCFG through: */
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
return 0;
/*
* Memory between 4GB and top of mem is forced WB by this magic bit.
* Reserved before K8RevF, but should be zero there.
*/
if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
(Tom2Enabled | Tom2ForceMemTypeWB))
return 1;
return 0;
}
static u64 __init
real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
{
u64 trim_start, trim_size;
trim_start = start_pfn;
trim_start <<= PAGE_SHIFT;
trim_size = limit_pfn;
trim_size <<= PAGE_SHIFT;
trim_size -= trim_start;
return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED);
}
/**
* mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
* @end_pfn: ending page frame number
*
* Some buggy BIOSes don't setup the MTRRs properly for systems with certain
* memory configurations. This routine checks that the highest MTRR matches
* the end of memory, to make sure the MTRRs having a write back type cover
* all of the memory the kernel is intending to use. If not, it'll trim any
* memory off the end by adjusting end_pfn, removing it from the kernel's
* allocation pools, warning the user with an obnoxious message.
*/
int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
{
unsigned long i, base, size, highest_pfn = 0, def, dummy;
mtrr_type type;
u64 total_trim_size;
/* extra one for all 0 */
int num[MTRR_NUM_TYPES + 1];
/*
* Make sure we only trim uncachable memory on machines that
* support the Intel MTRR architecture:
*/
if (!is_cpu(INTEL) || disable_mtrr_trim)
return 0;
rdmsr(MSR_MTRRdefType, def, dummy);
def &= 0xff;
if (def != MTRR_TYPE_UNCACHABLE)
return 0;
/* Get it and store it aside: */
memset(range_state, 0, sizeof(range_state));
for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
range_state[i].base_pfn = base;
range_state[i].size_pfn = size;
range_state[i].type = type;
}
/* Find highest cached pfn: */
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
if (type != MTRR_TYPE_WRBACK)
continue;
base = range_state[i].base_pfn;
size = range_state[i].size_pfn;
if (highest_pfn < base + size)
highest_pfn = base + size;
}
/* kvm/qemu doesn't have mtrr set right, don't trim them all: */
if (!highest_pfn) {
printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
return 0;
}
/* Check entries number: */
memset(num, 0, sizeof(num));
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
if (type >= MTRR_NUM_TYPES)
continue;
size = range_state[i].size_pfn;
if (!size)
type = MTRR_NUM_TYPES;
num[type]++;
}
/* No entry for WB? */
if (!num[MTRR_TYPE_WRBACK])
return 0;
/* Check if we only had WB and UC: */
if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
num_var_ranges - num[MTRR_NUM_TYPES])
return 0;
memset(range, 0, sizeof(range));
nr_range = 0;
if (mtrr_tom2) {
range[nr_range].start = (1ULL<<(32 - PAGE_SHIFT));
range[nr_range].end = mtrr_tom2 >> PAGE_SHIFT;
if (highest_pfn < range[nr_range].end)
highest_pfn = range[nr_range].end;
nr_range++;
}
nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
/* Check the head: */
total_trim_size = 0;
if (range[0].start)
total_trim_size += real_trim_memory(0, range[0].start);
/* Check the holes: */
for (i = 0; i < nr_range - 1; i++) {
if (range[i].end < range[i+1].start)
total_trim_size += real_trim_memory(range[i].end,
range[i+1].start);
}
/* Check the top: */
i = nr_range - 1;
if (range[i].end < end_pfn)
total_trim_size += real_trim_memory(range[i].end,
end_pfn);
if (total_trim_size) {
pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
if (!changed_by_mtrr_cleanup)
WARN_ON(1);
pr_info("update e820 for mtrr\n");
update_e820();
return 1;
}
return 0;
}
| gpl-2.0 |
UberSlim/KernelSanders_L90 | drivers/edac/e752x_edac.c | 3898 | 40640 | /*
* Intel e752x Memory Controller kernel module
* (C) 2004 Linux Networx (http://lnxi.com)
* This file may be distributed under the terms of the
* GNU General Public License.
*
* See "enum e752x_chips" below for supported chipsets
*
* Written by Tom Zimmerman
*
* Contributors:
* Thayne Harbaugh at realmsys.com (?)
* Wang Zhenyu at intel.com
* Dave Jiang at mvista.com
*
* $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include "edac_core.h"
#define E752X_REVISION " Ver: 2.0.2"
#define EDAC_MOD_STR "e752x_edac"
static int report_non_memory_errors;
static int force_function_unhide;
static int sysbus_parity = -1;
static struct edac_pci_ctl_info *e752x_pci;
#define e752x_printk(level, fmt, arg...) \
edac_printk(level, "e752x", fmt, ##arg)
#define e752x_mc_printk(mci, level, fmt, arg...) \
edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
#ifndef PCI_DEVICE_ID_INTEL_7520_0
#define PCI_DEVICE_ID_INTEL_7520_0 0x3590
#endif /* PCI_DEVICE_ID_INTEL_7520_0 */
#ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
#define PCI_DEVICE_ID_INTEL_7520_1_ERR 0x3591
#endif /* PCI_DEVICE_ID_INTEL_7520_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_7525_0
#define PCI_DEVICE_ID_INTEL_7525_0 0x359E
#endif /* PCI_DEVICE_ID_INTEL_7525_0 */
#ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
#define PCI_DEVICE_ID_INTEL_7525_1_ERR 0x3593
#endif /* PCI_DEVICE_ID_INTEL_7525_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_7320_0
#define PCI_DEVICE_ID_INTEL_7320_0 0x3592
#endif /* PCI_DEVICE_ID_INTEL_7320_0 */
#ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
#define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
#endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
#ifndef PCI_DEVICE_ID_INTEL_3100_0
#define PCI_DEVICE_ID_INTEL_3100_0 0x35B0
#endif /* PCI_DEVICE_ID_INTEL_3100_0 */
#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
#define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
#endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
#define E752X_NR_CSROWS 8 /* number of csrows */
/* E752X register addresses - device 0 function 0 */
#define E752X_MCHSCRB 0x52 /* Memory Scrub register (16b) */
/*
* 6:5 Scrub Completion Count
* 3:2 Scrub Rate (i3100 only)
* 01=fast 10=normal
* 1:0 Scrub Mode enable
* 00=off 10=on
*/
#define E752X_DRB 0x60 /* DRAM row boundary register (8b) */
#define E752X_DRA 0x70 /* DRAM row attribute register (8b) */
/*
* 31:30 Device width row 7
* 01=x8 10=x4 11=x8 DDR2
* 27:26 Device width row 6
* 23:22 Device width row 5
* 19:20 Device width row 4
* 15:14 Device width row 3
* 11:10 Device width row 2
* 7:6 Device width row 1
* 3:2 Device width row 0
*/
#define E752X_DRC 0x7C /* DRAM controller mode reg (32b) */
/* FIXME:IS THIS RIGHT? */
/*
* 22 Number channels 0=1,1=2
* 19:18 DRB Granularity 32/64MB
*/
#define E752X_DRM 0x80 /* Dimm mapping register */
#define E752X_DDRCSR 0x9A /* DDR control and status reg (16b) */
/*
* 14:12 1 single A, 2 single B, 3 dual
*/
#define E752X_TOLM 0xC4 /* DRAM top of low memory reg (16b) */
#define E752X_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */
#define E752X_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */
#define E752X_REMAPOFFSET 0xCA /* DRAM remap limit offset reg (16b) */
/* E752X register addresses - device 0 function 1 */
#define E752X_FERR_GLOBAL 0x40 /* Global first error register (32b) */
#define E752X_NERR_GLOBAL 0x44 /* Global next error register (32b) */
#define E752X_HI_FERR 0x50 /* Hub interface first error reg (8b) */
#define E752X_HI_NERR 0x52 /* Hub interface next error reg (8b) */
#define E752X_HI_ERRMASK 0x54 /* Hub interface error mask reg (8b) */
#define E752X_HI_SMICMD 0x5A /* Hub interface SMI command reg (8b) */
#define E752X_SYSBUS_FERR 0x60 /* System buss first error reg (16b) */
#define E752X_SYSBUS_NERR 0x62 /* System buss next error reg (16b) */
#define E752X_SYSBUS_ERRMASK 0x64 /* System buss error mask reg (16b) */
#define E752X_SYSBUS_SMICMD 0x6A /* System buss SMI command reg (16b) */
#define E752X_BUF_FERR 0x70 /* Memory buffer first error reg (8b) */
#define E752X_BUF_NERR 0x72 /* Memory buffer next error reg (8b) */
#define E752X_BUF_ERRMASK 0x74 /* Memory buffer error mask reg (8b) */
#define E752X_BUF_SMICMD 0x7A /* Memory buffer SMI cmd reg (8b) */
#define E752X_DRAM_FERR 0x80 /* DRAM first error register (16b) */
#define E752X_DRAM_NERR 0x82 /* DRAM next error register (16b) */
#define E752X_DRAM_ERRMASK 0x84 /* DRAM error mask register (8b) */
#define E752X_DRAM_SMICMD 0x8A /* DRAM SMI command register (8b) */
#define E752X_DRAM_RETR_ADD 0xAC /* DRAM Retry address register (32b) */
#define E752X_DRAM_SEC1_ADD 0xA0 /* DRAM first correctable memory */
/* error address register (32b) */
/*
* 31 Reserved
* 30:2 CE address (64 byte block 34:6
* 1 Reserved
* 0 HiLoCS
*/
#define E752X_DRAM_SEC2_ADD 0xC8 /* DRAM first correctable memory */
/* error address register (32b) */
/*
* 31 Reserved
* 30:2 CE address (64 byte block 34:6)
* 1 Reserved
* 0 HiLoCS
*/
#define E752X_DRAM_DED_ADD 0xA4 /* DRAM first uncorrectable memory */
/* error address register (32b) */
/*
* 31 Reserved
* 30:2 CE address (64 byte block 34:6)
* 1 Reserved
* 0 HiLoCS
*/
#define E752X_DRAM_SCRB_ADD 0xA8 /* DRAM 1st uncorrectable scrub mem */
/* error address register (32b) */
/*
* 31 Reserved
* 30:2 CE address (64 byte block 34:6
* 1 Reserved
* 0 HiLoCS
*/
#define E752X_DRAM_SEC1_SYNDROME 0xC4 /* DRAM first correctable memory */
/* error syndrome register (16b) */
#define E752X_DRAM_SEC2_SYNDROME 0xC6 /* DRAM second correctable memory */
/* error syndrome register (16b) */
#define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */
/* 3100 IMCH specific register addresses - device 0 function 1 */
#define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */
#define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */
#define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */
#define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */
/* ICH5R register addresses - device 30 function 0 */
#define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */
#define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */
#define ICH5R_PCI_BRIDGE_CTL 0x3E /* PCI bridge control register (16b) */
enum e752x_chips {
E7520 = 0,
E7525 = 1,
E7320 = 2,
I3100 = 3
};
struct e752x_pvt {
struct pci_dev *bridge_ck;
struct pci_dev *dev_d0f0;
struct pci_dev *dev_d0f1;
u32 tolm;
u32 remapbase;
u32 remaplimit;
int mc_symmetric;
u8 map[8];
int map_type;
const struct e752x_dev_info *dev_info;
};
struct e752x_dev_info {
u16 err_dev;
u16 ctl_dev;
const char *ctl_name;
};
struct e752x_error_info {
u32 ferr_global;
u32 nerr_global;
u32 nsi_ferr; /* 3100 only */
u32 nsi_nerr; /* 3100 only */
u8 hi_ferr; /* all but 3100 */
u8 hi_nerr; /* all but 3100 */
u16 sysbus_ferr;
u16 sysbus_nerr;
u8 buf_ferr;
u8 buf_nerr;
u16 dram_ferr;
u16 dram_nerr;
u32 dram_sec1_add;
u32 dram_sec2_add;
u16 dram_sec1_syndrome;
u16 dram_sec2_syndrome;
u32 dram_ded_add;
u32 dram_scrb_add;
u32 dram_retr_add;
};
static const struct e752x_dev_info e752x_devs[] = {
[E7520] = {
.err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
.ctl_name = "E7520"},
[E7525] = {
.err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
.ctl_name = "E7525"},
[E7320] = {
.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
.ctl_name = "E7320"},
[I3100] = {
.err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
.ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
.ctl_name = "3100"},
};
/* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
* map the scrubbing bandwidth to a hardware register value. The 'set'
* operation finds the 'matching or higher value'. Note that scrubbing
* on the e752x can only be enabled/disabled. The 3100 supports
* a normal and fast mode.
*/
#define SDRATE_EOT 0xFFFFFFFF
struct scrubrate {
u32 bandwidth; /* bandwidth consumed by scrubbing in bytes/sec */
u16 scrubval; /* register value for scrub rate */
};
/* Rate below assumes same performance as i3100 using PC3200 DDR2 in
* normal mode. e752x bridges don't support choosing normal or fast mode,
* so the scrubbing bandwidth value isn't all that important - scrubbing is
* either on or off.
*/
static const struct scrubrate scrubrates_e752x[] = {
{0, 0x00}, /* Scrubbing Off */
{500000, 0x02}, /* Scrubbing On */
{SDRATE_EOT, 0x00} /* End of Table */
};
/* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
* Normal mode: 125 (32000 / 256) times slower than fast mode.
*/
static const struct scrubrate scrubrates_i3100[] = {
{0, 0x00}, /* Scrubbing Off */
{500000, 0x0a}, /* Normal mode - 32k clocks */
{62500000, 0x06}, /* Fast mode - 256 clocks */
{SDRATE_EOT, 0x00} /* End of Table */
};
static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
unsigned long page)
{
u32 remap;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
debugf3("%s()\n", __func__);
if (page < pvt->tolm)
return page;
if ((page >= 0x100000) && (page < pvt->remapbase))
return page;
remap = (page - pvt->tolm) + pvt->remapbase;
if (remap < pvt->remaplimit)
return remap;
e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
return pvt->tolm - 1;
}
static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
u32 sec1_add, u16 sec1_syndrome)
{
u32 page;
int row;
int channel;
int i;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
debugf3("%s()\n", __func__);
/* convert the addr to 4k page */
page = sec1_add >> (PAGE_SHIFT - 4);
/* FIXME - check for -1 */
if (pvt->mc_symmetric) {
/* chip select are bits 14 & 13 */
row = ((page >> 1) & 3);
e752x_printk(KERN_WARNING,
"Test row %d Table %d %d %d %d %d %d %d %d\n", row,
pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
pvt->map[4], pvt->map[5], pvt->map[6],
pvt->map[7]);
/* test for channel remapping */
for (i = 0; i < 8; i++) {
if (pvt->map[i] == row)
break;
}
e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
if (i < 8)
row = i;
else
e752x_mc_printk(mci, KERN_WARNING,
"row %d not found in remap table\n",
row);
} else
row = edac_mc_find_csrow_by_page(mci, page);
/* 0 = channel A, 1 = channel B */
channel = !(error_one & 1);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
sec1_syndrome, row, channel, "e752x CE");
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
u32 sec1_add, u16 sec1_syndrome, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
}
static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
u32 ded_add, u32 scrb_add)
{
u32 error_2b, block_page;
int row;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
debugf3("%s()\n", __func__);
if (error_one & 0x0202) {
error_2b = ded_add;
/* convert to 4k address */
block_page = error_2b >> (PAGE_SHIFT - 4);
row = pvt->mc_symmetric ?
/* chip select are bits 14 & 13 */
((block_page >> 1) & 3) :
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ue(mci, block_page,
offset_in_page(error_2b << 4),
row, "e752x UE from Read");
}
if (error_one & 0x0404) {
error_2b = scrb_add;
/* convert to 4k address */
block_page = error_2b >> (PAGE_SHIFT - 4);
row = pvt->mc_symmetric ?
/* chip select are bits 14 & 13 */
((block_page >> 1) & 3) :
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
edac_mc_handle_ue(mci, block_page,
offset_in_page(error_2b << 4),
row, "e752x UE from Scruber");
}
}
static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
u32 ded_add, u32 scrb_add, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_process_ue(mci, error_one, ded_add, scrb_add);
}
static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
int *error_found, int handle_error)
{
*error_found = 1;
if (!handle_error)
return;
debugf3("%s()\n", __func__);
edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
}
static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
u32 retry_add)
{
u32 error_1b, page;
int row;
struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
error_1b = retry_add;
page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */
/* chip select are bits 14 & 13 */
row = pvt->mc_symmetric ? ((page >> 1) & 3) :
edac_mc_find_csrow_by_page(mci, page);
e752x_mc_printk(mci, KERN_WARNING,
"CE page 0x%lx, row %d : Memory read retry\n",
(long unsigned int)page, row);
}
static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
u32 retry_add, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_process_ded_retry(mci, error, retry_add);
}
static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
int *error_found, int handle_error)
{
*error_found = 1;
if (handle_error)
e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
}
static char *global_message[11] = {
"PCI Express C1",
"PCI Express C",
"PCI Express B1",
"PCI Express B",
"PCI Express A1",
"PCI Express A",
"DMA Controller",
"HUB or NS Interface",
"System Bus",
"DRAM Controller", /* 9th entry */
"Internal Buffer"
};
#define DRAM_ENTRY 9
static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
static void do_global_error(int fatal, u32 errors)
{
int i;
for (i = 0; i < 11; i++) {
if (errors & (1 << i)) {
/* If the error is from DRAM Controller OR
* we are to report ALL errors, then
* report the error
*/
if ((i == DRAM_ENTRY) || report_non_memory_errors)
e752x_printk(KERN_WARNING, "%sError %s\n",
fatal_message[fatal],
global_message[i]);
}
}
}
static inline void global_error(int fatal, u32 errors, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_global_error(fatal, errors);
}
static char *hub_message[7] = {
"HI Address or Command Parity", "HI Illegal Access",
"HI Internal Parity", "Out of Range Access",
"HI Data Parity", "Enhanced Config Access",
"Hub Interface Target Abort"
};
static void do_hub_error(int fatal, u8 errors)
{
int i;
for (i = 0; i < 7; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "%sError %s\n",
fatal_message[fatal], hub_message[i]);
}
}
static inline void hub_error(int fatal, u8 errors, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_hub_error(fatal, errors);
}
#define NSI_FATAL_MASK 0x0c080081
#define NSI_NON_FATAL_MASK 0x23a0ba64
#define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
static char *nsi_message[30] = {
"NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */
"", /* reserved */
"NSI Parity Error", /* bit 2, non-fatal */
"", /* reserved */
"", /* reserved */
"Correctable Error Message", /* bit 5, non-fatal */
"Non-Fatal Error Message", /* bit 6, non-fatal */
"Fatal Error Message", /* bit 7, fatal */
"", /* reserved */
"Receiver Error", /* bit 9, non-fatal */
"", /* reserved */
"Bad TLP", /* bit 11, non-fatal */
"Bad DLLP", /* bit 12, non-fatal */
"REPLAY_NUM Rollover", /* bit 13, non-fatal */
"", /* reserved */
"Replay Timer Timeout", /* bit 15, non-fatal */
"", /* reserved */
"", /* reserved */
"", /* reserved */
"Data Link Protocol Error", /* bit 19, fatal */
"", /* reserved */
"Poisoned TLP", /* bit 21, non-fatal */
"", /* reserved */
"Completion Timeout", /* bit 23, non-fatal */
"Completer Abort", /* bit 24, non-fatal */
"Unexpected Completion", /* bit 25, non-fatal */
"Receiver Overflow", /* bit 26, fatal */
"Malformed TLP", /* bit 27, fatal */
"", /* reserved */
"Unsupported Request" /* bit 29, non-fatal */
};
static void do_nsi_error(int fatal, u32 errors)
{
int i;
for (i = 0; i < 30; i++) {
if (errors & (1 << i))
printk(KERN_WARNING "%sError %s\n",
fatal_message[fatal], nsi_message[i]);
}
}
static inline void nsi_error(int fatal, u32 errors, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_nsi_error(fatal, errors);
}
static char *membuf_message[4] = {
"Internal PMWB to DRAM parity",
"Internal PMWB to System Bus Parity",
"Internal System Bus or IO to PMWB Parity",
"Internal DRAM to PMWB Parity"
};
static void do_membuf_error(u8 errors)
{
int i;
for (i = 0; i < 4; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
membuf_message[i]);
}
}
static inline void membuf_error(u8 errors, int *error_found, int handle_error)
{
*error_found = 1;
if (handle_error)
do_membuf_error(errors);
}
static char *sysbus_message[10] = {
"Addr or Request Parity",
"Data Strobe Glitch",
"Addr Strobe Glitch",
"Data Parity",
"Addr Above TOM",
"Non DRAM Lock Error",
"MCERR", "BINIT",
"Memory Parity",
"IO Subsystem Parity"
};
static void do_sysbus_error(int fatal, u32 errors)
{
int i;
for (i = 0; i < 10; i++) {
if (errors & (1 << i))
e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
fatal_message[fatal], sysbus_message[i]);
}
}
static inline void sysbus_error(int fatal, u32 errors, int *error_found,
int handle_error)
{
*error_found = 1;
if (handle_error)
do_sysbus_error(fatal, errors);
}
static void e752x_check_hub_interface(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u8 stat8;
//pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
stat8 = info->hi_ferr;
if (stat8 & 0x7f) { /* Error, so process */
stat8 &= 0x7f;
if (stat8 & 0x2b)
hub_error(1, stat8 & 0x2b, error_found, handle_error);
if (stat8 & 0x54)
hub_error(0, stat8 & 0x54, error_found, handle_error);
}
//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
stat8 = info->hi_nerr;
if (stat8 & 0x7f) { /* Error, so process */
stat8 &= 0x7f;
if (stat8 & 0x2b)
hub_error(1, stat8 & 0x2b, error_found, handle_error);
if (stat8 & 0x54)
hub_error(0, stat8 & 0x54, error_found, handle_error);
}
}
static void e752x_check_ns_interface(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u32 stat32;
stat32 = info->nsi_ferr;
if (stat32 & NSI_ERR_MASK) { /* Error, so process */
if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */
nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
handle_error);
if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
handle_error);
}
stat32 = info->nsi_nerr;
if (stat32 & NSI_ERR_MASK) {
if (stat32 & NSI_FATAL_MASK)
nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
handle_error);
if (stat32 & NSI_NON_FATAL_MASK)
nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
handle_error);
}
}
static void e752x_check_sysbus(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u32 stat32, error32;
//pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
if (stat32 == 0)
return; /* no errors */
error32 = (stat32 >> 16) & 0x3ff;
stat32 = stat32 & 0x3ff;
if (stat32 & 0x087)
sysbus_error(1, stat32 & 0x087, error_found, handle_error);
if (stat32 & 0x378)
sysbus_error(0, stat32 & 0x378, error_found, handle_error);
if (error32 & 0x087)
sysbus_error(1, error32 & 0x087, error_found, handle_error);
if (error32 & 0x378)
sysbus_error(0, error32 & 0x378, error_found, handle_error);
}
static void e752x_check_membuf(struct e752x_error_info *info,
int *error_found, int handle_error)
{
u8 stat8;
stat8 = info->buf_ferr;
if (stat8 & 0x0f) { /* Error, so process */
stat8 &= 0x0f;
membuf_error(stat8, error_found, handle_error);
}
stat8 = info->buf_nerr;
if (stat8 & 0x0f) { /* Error, so process */
stat8 &= 0x0f;
membuf_error(stat8, error_found, handle_error);
}
}
static void e752x_check_dram(struct mem_ctl_info *mci,
struct e752x_error_info *info, int *error_found,
int handle_error)
{
u16 error_one, error_next;
error_one = info->dram_ferr;
error_next = info->dram_nerr;
/* decode and report errors */
if (error_one & 0x0101) /* check first error correctable */
process_ce(mci, error_one, info->dram_sec1_add,
info->dram_sec1_syndrome, error_found, handle_error);
if (error_next & 0x0101) /* check next error correctable */
process_ce(mci, error_next, info->dram_sec2_add,
info->dram_sec2_syndrome, error_found, handle_error);
if (error_one & 0x4040)
process_ue_no_info_wr(mci, error_found, handle_error);
if (error_next & 0x4040)
process_ue_no_info_wr(mci, error_found, handle_error);
if (error_one & 0x2020)
process_ded_retry(mci, error_one, info->dram_retr_add,
error_found, handle_error);
if (error_next & 0x2020)
process_ded_retry(mci, error_next, info->dram_retr_add,
error_found, handle_error);
if (error_one & 0x0808)
process_threshold_ce(mci, error_one, error_found, handle_error);
if (error_next & 0x0808)
process_threshold_ce(mci, error_next, error_found,
handle_error);
if (error_one & 0x0606)
process_ue(mci, error_one, info->dram_ded_add,
info->dram_scrb_add, error_found, handle_error);
if (error_next & 0x0606)
process_ue(mci, error_next, info->dram_ded_add,
info->dram_scrb_add, error_found, handle_error);
}
static void e752x_get_error_info(struct mem_ctl_info *mci,
struct e752x_error_info *info)
{
struct pci_dev *dev;
struct e752x_pvt *pvt;
memset(info, 0, sizeof(*info));
pvt = (struct e752x_pvt *)mci->pvt_info;
dev = pvt->dev_d0f1;
pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
if (info->ferr_global) {
if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
pci_read_config_dword(dev, I3100_NSI_FERR,
&info->nsi_ferr);
info->hi_ferr = 0;
} else {
pci_read_config_byte(dev, E752X_HI_FERR,
&info->hi_ferr);
info->nsi_ferr = 0;
}
pci_read_config_word(dev, E752X_SYSBUS_FERR,
&info->sysbus_ferr);
pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
&info->dram_sec1_add);
pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
&info->dram_sec1_syndrome);
pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
&info->dram_ded_add);
pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
&info->dram_scrb_add);
pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
&info->dram_retr_add);
/* ignore the reserved bits just in case */
if (info->hi_ferr & 0x7f)
pci_write_config_byte(dev, E752X_HI_FERR,
info->hi_ferr);
if (info->nsi_ferr & NSI_ERR_MASK)
pci_write_config_dword(dev, I3100_NSI_FERR,
info->nsi_ferr);
if (info->sysbus_ferr)
pci_write_config_word(dev, E752X_SYSBUS_FERR,
info->sysbus_ferr);
if (info->buf_ferr & 0x0f)
pci_write_config_byte(dev, E752X_BUF_FERR,
info->buf_ferr);
if (info->dram_ferr)
pci_write_bits16(pvt->bridge_ck, E752X_DRAM_FERR,
info->dram_ferr, info->dram_ferr);
pci_write_config_dword(dev, E752X_FERR_GLOBAL,
info->ferr_global);
}
pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
if (info->nerr_global) {
if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
pci_read_config_dword(dev, I3100_NSI_NERR,
&info->nsi_nerr);
info->hi_nerr = 0;
} else {
pci_read_config_byte(dev, E752X_HI_NERR,
&info->hi_nerr);
info->nsi_nerr = 0;
}
pci_read_config_word(dev, E752X_SYSBUS_NERR,
&info->sysbus_nerr);
pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
&info->dram_sec2_add);
pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
&info->dram_sec2_syndrome);
if (info->hi_nerr & 0x7f)
pci_write_config_byte(dev, E752X_HI_NERR,
info->hi_nerr);
if (info->nsi_nerr & NSI_ERR_MASK)
pci_write_config_dword(dev, I3100_NSI_NERR,
info->nsi_nerr);
if (info->sysbus_nerr)
pci_write_config_word(dev, E752X_SYSBUS_NERR,
info->sysbus_nerr);
if (info->buf_nerr & 0x0f)
pci_write_config_byte(dev, E752X_BUF_NERR,
info->buf_nerr);
if (info->dram_nerr)
pci_write_bits16(pvt->bridge_ck, E752X_DRAM_NERR,
info->dram_nerr, info->dram_nerr);
pci_write_config_dword(dev, E752X_NERR_GLOBAL,
info->nerr_global);
}
}
static int e752x_process_error_info(struct mem_ctl_info *mci,
struct e752x_error_info *info,
int handle_errors)
{
u32 error32, stat32;
int error_found;
error_found = 0;
error32 = (info->ferr_global >> 18) & 0x3ff;
stat32 = (info->ferr_global >> 4) & 0x7ff;
if (error32)
global_error(1, error32, &error_found, handle_errors);
if (stat32)
global_error(0, stat32, &error_found, handle_errors);
error32 = (info->nerr_global >> 18) & 0x3ff;
stat32 = (info->nerr_global >> 4) & 0x7ff;
if (error32)
global_error(1, error32, &error_found, handle_errors);
if (stat32)
global_error(0, stat32, &error_found, handle_errors);
e752x_check_hub_interface(info, &error_found, handle_errors);
e752x_check_ns_interface(info, &error_found, handle_errors);
e752x_check_sysbus(info, &error_found, handle_errors);
e752x_check_membuf(info, &error_found, handle_errors);
e752x_check_dram(mci, info, &error_found, handle_errors);
return error_found;
}
static void e752x_check(struct mem_ctl_info *mci)
{
struct e752x_error_info info;
debugf3("%s()\n", __func__);
e752x_get_error_info(mci, &info);
e752x_process_error_info(mci, &info, 1);
}
/* Program byte/sec bandwidth scrub rate to hardware */
static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
{
const struct scrubrate *scrubrates;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
struct pci_dev *pdev = pvt->dev_d0f0;
int i;
if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
scrubrates = scrubrates_i3100;
else
scrubrates = scrubrates_e752x;
/* Translate the desired scrub rate to a e752x/3100 register value.
* Search for the bandwidth that is equal or greater than the
* desired rate and program the cooresponding register value.
*/
for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
if (scrubrates[i].bandwidth >= new_bw)
break;
if (scrubrates[i].bandwidth == SDRATE_EOT)
return -1;
pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
return scrubrates[i].bandwidth;
}
/* Convert current scrub rate value into byte/sec bandwidth */
static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
{
const struct scrubrate *scrubrates;
struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
struct pci_dev *pdev = pvt->dev_d0f0;
u16 scrubval;
int i;
if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
scrubrates = scrubrates_i3100;
else
scrubrates = scrubrates_e752x;
/* Find the bandwidth matching the memory scrubber configuration */
pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
scrubval = scrubval & 0x0f;
for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
if (scrubrates[i].scrubval == scrubval)
break;
if (scrubrates[i].bandwidth == SDRATE_EOT) {
e752x_printk(KERN_WARNING,
"Invalid sdram scrub control value: 0x%x\n", scrubval);
return -1;
}
return scrubrates[i].bandwidth;
}
/* Return 1 if dual channel mode is active. Else return 0. */
static inline int dual_channel_active(u16 ddrcsr)
{
return (((ddrcsr >> 12) & 3) == 3);
}
/* Remap csrow index numbers if map_type is "reverse"
*/
static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
{
struct e752x_pvt *pvt = mci->pvt_info;
if (!pvt->map_type)
return (7 - index);
return (index);
}
static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u16 ddrcsr)
{
struct csrow_info *csrow;
unsigned long last_cumul_size;
int index, mem_dev, drc_chan;
int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u8 value;
u32 dra, drc, cumul_size;
dra = 0;
for (index = 0; index < 4; index++) {
u8 dra_reg;
pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
dra |= dra_reg << (index * 8);
}
pci_read_config_dword(pdev, E752X_DRC, &drc);
drc_chan = dual_channel_active(ddrcsr);
drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
drc_ddim = (drc >> 20) & 0x3;
/* The dram row boundary (DRB) reg values are boundary address for
* each DRAM row with a granularity of 64 or 128MB (single/dual
* channel operation). DRB regs are cumulative; therefore DRB7 will
* contain the total memory contained in all eight rows.
*/
for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
/* mem_dev 0=x8, 1=x4 */
mem_dev = (dra >> (index * 4 + 2)) & 0x3;
csrow = &mci->csrows[remap_csrow_index(mci, index)];
mem_dev = (mem_dev == 2);
pci_read_config_byte(pdev, E752X_DRB + index, &value);
/* convert a 128 or 64 MiB DRB to a page size. */
cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
if (cumul_size == last_cumul_size)
continue; /* not populated */
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
csrow->mtype = MEM_RDDR; /* only one type supported */
csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
/*
* if single channel or x8 devices then SECDED
* if dual channel and x4 then S4ECD4ED
*/
if (drc_ddim) {
if (drc_chan && mem_dev) {
csrow->edac_mode = EDAC_S4ECD4ED;
mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
} else {
csrow->edac_mode = EDAC_SECDED;
mci->edac_cap |= EDAC_FLAG_SECDED;
}
} else
csrow->edac_mode = EDAC_NONE;
}
}
static void e752x_init_mem_map_table(struct pci_dev *pdev,
struct e752x_pvt *pvt)
{
int index;
u8 value, last, row;
last = 0;
row = 0;
for (index = 0; index < 8; index += 2) {
pci_read_config_byte(pdev, E752X_DRB + index, &value);
/* test if there is a dimm in this slot */
if (value == last) {
/* no dimm in the slot, so flag it as empty */
pvt->map[index] = 0xff;
pvt->map[index + 1] = 0xff;
} else { /* there is a dimm in the slot */
pvt->map[index] = row;
row++;
last = value;
/* test the next value to see if the dimm is double
* sided
*/
pci_read_config_byte(pdev, E752X_DRB + index + 1,
&value);
/* the dimm is single sided, so flag as empty */
/* this is a double sided dimm to save the next row #*/
pvt->map[index + 1] = (value == last) ? 0xff : row;
row++;
last = value;
}
}
}
/* Return 0 on success or 1 on failure. */
static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
struct e752x_pvt *pvt)
{
struct pci_dev *dev;
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev, pvt->bridge_ck);
if (pvt->bridge_ck == NULL)
pvt->bridge_ck = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
if (pvt->bridge_ck == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
"vendor %x device 0x%x (broken BIOS?)\n",
PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
return 1;
}
dev = pci_get_device(PCI_VENDOR_ID_INTEL,
e752x_devs[dev_idx].ctl_dev,
NULL);
if (dev == NULL)
goto fail;
pvt->dev_d0f0 = dev;
pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck);
return 0;
fail:
pci_dev_put(pvt->bridge_ck);
return 1;
}
/* Setup system bus parity mask register.
* Sysbus parity supported on:
* e7320/e7520/e7525 + Xeon
*/
static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
{
char *cpu_id = cpu_data(0).x86_model_id;
struct pci_dev *dev = pvt->dev_d0f1;
int enable = 1;
/* Allow module parameter override, else see if CPU supports parity */
if (sysbus_parity != -1) {
enable = sysbus_parity;
} else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
e752x_printk(KERN_INFO, "System Bus Parity not "
"supported by CPU, disabling\n");
enable = 0;
}
if (enable)
pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
else
pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
}
static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
{
struct pci_dev *dev;
dev = pvt->dev_d0f1;
/* Turn off error disable & SMI in case the BIOS turned it on */
if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
} else {
pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
}
e752x_init_sysbus_parity_mask(pvt);
pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
}
static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
u16 ddrcsr;
int drc_chan; /* Number of channels 0=1chan,1=2chan */
struct e752x_error_info discard;
debugf0("%s(): mci\n", __func__);
debugf0("Starting Probe1\n");
/* check to see if device 0 function 1 is enabled; if it isn't, we
* assume the BIOS has reserved it for a reason and is expecting
* exclusive access, we take care not to violate that assumption and
* fail the probe. */
pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
if (!force_function_unhide && !(stat8 & (1 << 5))) {
printk(KERN_INFO "Contact your BIOS vendor to see if the "
"E752x error registers can be safely un-hidden\n");
return -ENODEV;
}
stat8 |= (1 << 5);
pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
/* FIXME: should check >>12 or 0xf, true for all? */
/* Dual channel = 1, Single channel = 0 */
drc_chan = dual_channel_active(ddrcsr);
mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
if (mci == NULL) {
return -ENOMEM;
}
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
/* 3100 IMCH supports SECDEC only */
mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
/* FIXME - what if different memory types are in different csrows? */
mci->mod_name = EDAC_MOD_STR;
mci->mod_ver = E752X_REVISION;
mci->dev = &pdev->dev;
debugf3("%s(): init pvt\n", __func__);
pvt = (struct e752x_pvt *)mci->pvt_info;
pvt->dev_info = &e752x_devs[dev_idx];
pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
if (e752x_get_devs(pdev, dev_idx, pvt)) {
edac_mc_free(mci);
return -ENODEV;
}
debugf3("%s(): more mci init\n", __func__);
mci->ctl_name = pvt->dev_info->ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = e752x_check;
mci->ctl_page_to_phys = ctl_page_to_phys;
mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
/* set the map type. 1 = normal, 0 = reversed
* Must be set before e752x_init_csrows in case csrow mapping
* is reversed.
*/
pci_read_config_byte(pdev, E752X_DRM, &stat8);
pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
e752x_init_csrows(mci, pdev, ddrcsr);
e752x_init_mem_map_table(pdev, pvt);
if (dev_idx == I3100)
mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
else
mci->edac_cap |= EDAC_FLAG_NONE;
debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
/* load the top of low memory, remap base, and remap limit vars */
pci_read_config_word(pdev, E752X_TOLM, &pci_data);
pvt->tolm = ((u32) pci_data) << 4;
pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
pvt->remapbase = ((u32) pci_data) << 14;
pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
pvt->remaplimit = ((u32) pci_data) << 14;
e752x_printk(KERN_INFO,
"tolm = %x, remapbase = %x, remaplimit = %x\n",
pvt->tolm, pvt->remapbase, pvt->remaplimit);
/* Here we assume that we will never see multiple instances of this
* type of memory controller. The ID is therefore hardcoded to 0.
*/
if (edac_mc_add_mc(mci)) {
debugf3("%s(): failed edac_mc_add_mc()\n", __func__);
goto fail;
}
e752x_init_error_reporting_regs(pvt);
e752x_get_error_info(mci, &discard); /* clear other MCH errors */
/* allocating generic PCI control info */
e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
if (!e752x_pci) {
printk(KERN_WARNING
"%s(): Unable to create PCI control\n", __func__);
printk(KERN_WARNING
"%s(): PCI error report via EDAC not setup\n",
__func__);
}
/* get this far and it's successful */
debugf3("%s(): success\n", __func__);
return 0;
fail:
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
return -ENODEV;
}
/* returns count (>= 0), or negative on error */
static int __devinit e752x_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
debugf0("%s()\n", __func__);
/* wake up and enable device */
if (pci_enable_device(pdev) < 0)
return -EIO;
return e752x_probe1(pdev, ent->driver_data);
}
static void __devexit e752x_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct e752x_pvt *pvt;
debugf0("%s()\n", __func__);
if (e752x_pci)
edac_pci_release_generic_ctl(e752x_pci);
if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
return;
pvt = (struct e752x_pvt *)mci->pvt_info;
pci_dev_put(pvt->dev_d0f0);
pci_dev_put(pvt->dev_d0f1);
pci_dev_put(pvt->bridge_ck);
edac_mc_free(mci);
}
static DEFINE_PCI_DEVICE_TABLE(e752x_pci_tbl) = {
{
PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7520},
{
PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7525},
{
PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
E7320},
{
PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3100},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
static struct pci_driver e752x_driver = {
.name = EDAC_MOD_STR,
.probe = e752x_init_one,
.remove = __devexit_p(e752x_remove_one),
.id_table = e752x_pci_tbl,
};
static int __init e752x_init(void)
{
int pci_rc;
debugf3("%s()\n", __func__);
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&e752x_driver);
return (pci_rc < 0) ? pci_rc : 0;
}
static void __exit e752x_exit(void)
{
debugf3("%s()\n", __func__);
pci_unregister_driver(&e752x_driver);
}
module_init(e752x_init);
module_exit(e752x_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
module_param(force_function_unhide, int, 0444);
MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
" 1=force unhide and hope BIOS doesn't fight driver for "
"Dev0:Fun1 access");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
module_param(sysbus_parity, int, 0444);
MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
" 1=enable system bus parity checking, default=auto-detect");
module_param(report_non_memory_errors, int, 0644);
MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
"reporting, 1=enable non-memory error reporting");
| gpl-2.0 |
SlimRoms/kernel_samsung_mondrianwifi | drivers/tty/hvc/hvc_dcc.c | 3898 | 2129 | /* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <asm/processor.h>
#include "hvc_console.h"
/* DCC Status Bits */
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
static inline u32 __dcc_getstatus(void)
{
u32 __ret;
asm volatile("mrc p14, 0, %0, c0, c1, 0 @ read comms ctrl reg"
: "=r" (__ret) : : "cc");
return __ret;
}
static inline char __dcc_getchar(void)
{
char __c;
asm volatile("mrc p14, 0, %0, c0, c5, 0 @ read comms data reg"
: "=r" (__c));
isb();
return __c;
}
static inline void __dcc_putchar(char c)
{
asm volatile("mcr p14, 0, %0, c0, c5, 0 @ write a char"
: /* no output register */
: "r" (c));
isb();
}
static int hvc_dcc_put_chars(uint32_t vt, const char *buf, int count)
{
int i;
for (i = 0; i < count; i++) {
while (__dcc_getstatus() & DCC_STATUS_TX)
cpu_relax();
__dcc_putchar(buf[i]);
}
return count;
}
static int hvc_dcc_get_chars(uint32_t vt, char *buf, int count)
{
int i;
for (i = 0; i < count; ++i)
if (__dcc_getstatus() & DCC_STATUS_RX)
buf[i] = __dcc_getchar();
else
break;
return i;
}
static const struct hv_ops hvc_dcc_get_put_ops = {
.get_chars = hvc_dcc_get_chars,
.put_chars = hvc_dcc_put_chars,
};
static int __init hvc_dcc_console_init(void)
{
hvc_instantiate(0, 0, &hvc_dcc_get_put_ops);
return 0;
}
console_initcall(hvc_dcc_console_init);
static int __init hvc_dcc_init(void)
{
hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
return 0;
}
device_initcall(hvc_dcc_init);
| gpl-2.0 |
CyanideL/android_kernel_lge_hammerhead | arch/sh/boards/board-secureedge5410.c | 4666 | 1764 | /*
* Copyright (C) 2002 David McCullough <davidm@snapgear.com>
* Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
*
* Based on files with the following comments:
*
* Copyright (C) 2000 Kazumoto Kojima
*
* Modified for 7751 Solution Engine by
* Ian da Silva and Jeremy Siegel, 2001.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <asm/machvec.h>
#include <mach/secureedge5410.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <cpu/timer.h>
unsigned short secureedge5410_ioport;
/*
* EraseConfig handling functions
*/
static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
{
printk("SnapGear: erase switch interrupt!\n");
return IRQ_HANDLED;
}
static int __init eraseconfig_init(void)
{
unsigned int irq = evt2irq(0x240);
printk("SnapGear: EraseConfig init\n");
/* Setup "EraseConfig" switch on external IRQ 0 */
if (request_irq(irq, eraseconfig_interrupt, 0, "Erase Config", NULL))
printk("SnapGear: failed to register IRQ%d for Reset witch\n",
irq);
else
printk("SnapGear: registered EraseConfig switch on IRQ%d\n",
irq);
return 0;
}
module_init(eraseconfig_init);
/*
* Initialize IRQ setting
*
* IRL0 = erase switch
* IRL1 = eth0
* IRL2 = eth1
* IRL3 = crypto
*/
static void __init init_snapgear_IRQ(void)
{
printk("Setup SnapGear IRQ/IPR ...\n");
/* enable individual interrupt mode for externals */
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_snapgear __initmv = {
.mv_name = "SnapGear SecureEdge5410",
.mv_nr_irqs = 72,
.mv_init_irq = init_snapgear_IRQ,
};
| gpl-2.0 |
ElectryDev/octokitty | drivers/acpi/acpica/utaddress.c | 4922 | 9503 | /******************************************************************************
*
* Module Name: utaddress - op_region address range check
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utaddress")
/*******************************************************************************
*
* FUNCTION: acpi_ut_add_address_range
*
* PARAMETERS: space_id - Address space ID
* Address - op_region start address
* Length - op_region length
* region_node - op_region namespace node
*
* RETURN: Status
*
* DESCRIPTION: Add the Operation Region address range to the global list.
* The only supported Space IDs are Memory and I/O. Called when
* the op_region address/length operands are fully evaluated.
*
* MUTEX: Locks the namespace
*
* NOTE: Because this interface is only called when an op_region argument
* list is evaluated, there cannot be any duplicate region_nodes.
* Duplicate Address/Length values are allowed, however, so that multiple
* address conflicts can be detected.
*
******************************************************************************/
acpi_status
acpi_ut_add_address_range(acpi_adr_space_type space_id,
acpi_physical_address address,
u32 length, struct acpi_namespace_node *region_node)
{
struct acpi_address_range *range_info;
acpi_status status;
ACPI_FUNCTION_TRACE(ut_add_address_range);
if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
(space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
return_ACPI_STATUS(AE_OK);
}
/* Allocate/init a new info block, add it to the appropriate list */
range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
if (!range_info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
range_info->start_address = address;
range_info->end_address = (address + length - 1);
range_info->region_node = region_node;
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
ACPI_FREE(range_info);
return_ACPI_STATUS(status);
}
range_info->next = acpi_gbl_address_range_list[space_id];
acpi_gbl_address_range_list[space_id] = range_info;
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"\nAdded [%4.4s] address range: 0x%p-0x%p\n",
acpi_ut_get_node_name(range_info->region_node),
ACPI_CAST_PTR(void, address),
ACPI_CAST_PTR(void, range_info->end_address)));
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_remove_address_range
*
* PARAMETERS: space_id - Address space ID
* region_node - op_region namespace node
*
* RETURN: None
*
* DESCRIPTION: Remove the Operation Region from the global list. The only
* supported Space IDs are Memory and I/O. Called when an
* op_region is deleted.
*
* MUTEX: Assumes the namespace is locked
*
******************************************************************************/
void
acpi_ut_remove_address_range(acpi_adr_space_type space_id,
struct acpi_namespace_node *region_node)
{
struct acpi_address_range *range_info;
struct acpi_address_range *prev;
ACPI_FUNCTION_TRACE(ut_remove_address_range);
if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
(space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
return_VOID;
}
/* Get the appropriate list head and check the list */
range_info = prev = acpi_gbl_address_range_list[space_id];
while (range_info) {
if (range_info->region_node == region_node) {
if (range_info == prev) { /* Found at list head */
acpi_gbl_address_range_list[space_id] =
range_info->next;
} else {
prev->next = range_info->next;
}
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
"\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
acpi_ut_get_node_name(range_info->
region_node),
ACPI_CAST_PTR(void,
range_info->
start_address),
ACPI_CAST_PTR(void,
range_info->
end_address)));
ACPI_FREE(range_info);
return_VOID;
}
prev = range_info;
range_info = range_info->next;
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_check_address_range
*
* PARAMETERS: space_id - Address space ID
* Address - Start address
* Length - Length of address range
* Warn - TRUE if warning on overlap desired
*
* RETURN: Count of the number of conflicts detected. Zero is always
* returned for Space IDs other than Memory or I/O.
*
* DESCRIPTION: Check if the input address range overlaps any of the
* ASL operation region address ranges. The only supported
* Space IDs are Memory and I/O.
*
* MUTEX: Assumes the namespace is locked.
*
******************************************************************************/
u32
acpi_ut_check_address_range(acpi_adr_space_type space_id,
acpi_physical_address address, u32 length, u8 warn)
{
struct acpi_address_range *range_info;
acpi_physical_address end_address;
char *pathname;
u32 overlap_count = 0;
ACPI_FUNCTION_TRACE(ut_check_address_range);
if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
(space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
return_UINT32(0);
}
range_info = acpi_gbl_address_range_list[space_id];
end_address = address + length - 1;
/* Check entire list for all possible conflicts */
while (range_info) {
/*
* Check if the requested Address/Length overlaps this address_range.
* Four cases to consider:
*
* 1) Input address/length is contained completely in the address range
* 2) Input address/length overlaps range at the range start
* 3) Input address/length overlaps range at the range end
* 4) Input address/length completely encompasses the range
*/
if ((address <= range_info->end_address) &&
(end_address >= range_info->start_address)) {
/* Found an address range overlap */
overlap_count++;
if (warn) { /* Optional warning message */
pathname =
acpi_ns_get_external_pathname(range_info->
region_node);
ACPI_WARNING((AE_INFO,
"0x%p-0x%p %s conflicts with Region %s %d",
ACPI_CAST_PTR(void, address),
ACPI_CAST_PTR(void, end_address),
acpi_ut_get_region_name(space_id),
pathname, overlap_count));
ACPI_FREE(pathname);
}
}
range_info = range_info->next;
}
return_UINT32(overlap_count);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_delete_address_lists
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Delete all global address range lists (called during
* subsystem shutdown).
*
******************************************************************************/
void acpi_ut_delete_address_lists(void)
{
struct acpi_address_range *next;
struct acpi_address_range *range_info;
int i;
/* Delete all elements in all address range lists */
for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
next = acpi_gbl_address_range_list[i];
while (next) {
range_info = next;
next = range_info->next;
ACPI_FREE(range_info);
}
acpi_gbl_address_range_list[i] = NULL;
}
}
| gpl-2.0 |
Jaiglissechef-i9100/f4ktion_kernel | drivers/char/hw_random/nomadik-rng.c | 7226 | 2508 | /*
* Nomadik RNG support
* Copyright 2009 Alessandro Rubini
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/err.h>
static struct clk *rng_clk;
static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
void __iomem *base = (void __iomem *)rng->priv;
/*
* The register is 32 bits and gives 16 random bits (low half).
* A subsequent read will delay the core for 400ns, so we just read
* once and accept the very unlikely very small delay, even if wait==0.
*/
*(u16 *)data = __raw_readl(base + 8) & 0xffff;
return 2;
}
/* we have at most one RNG per machine, granted */
static struct hwrng nmk_rng = {
.name = "nomadik",
.read = nmk_rng_read,
};
static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
{
void __iomem *base;
int ret;
rng_clk = clk_get(&dev->dev, NULL);
if (IS_ERR(rng_clk)) {
dev_err(&dev->dev, "could not get rng clock\n");
ret = PTR_ERR(rng_clk);
return ret;
}
clk_enable(rng_clk);
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
goto out_clk;
ret = -ENOMEM;
base = ioremap(dev->res.start, resource_size(&dev->res));
if (!base)
goto out_release;
nmk_rng.priv = (unsigned long)base;
ret = hwrng_register(&nmk_rng);
if (ret)
goto out_unmap;
return 0;
out_unmap:
iounmap(base);
out_release:
amba_release_regions(dev);
out_clk:
clk_disable(rng_clk);
clk_put(rng_clk);
return ret;
}
static int nmk_rng_remove(struct amba_device *dev)
{
void __iomem *base = (void __iomem *)nmk_rng.priv;
hwrng_unregister(&nmk_rng);
iounmap(base);
amba_release_regions(dev);
clk_disable(rng_clk);
clk_put(rng_clk);
return 0;
}
static struct amba_id nmk_rng_ids[] = {
{
.id = 0x000805e1,
.mask = 0x000fffff, /* top bits are rev and cfg: accept all */
},
{0, 0},
};
MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
static struct amba_driver nmk_rng_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "rng",
},
.probe = nmk_rng_probe,
.remove = nmk_rng_remove,
.id_table = nmk_rng_ids,
};
module_amba_driver(nmk_rng_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
paloda/android_kernel_bq_vegetalte | fs/ocfs2/cluster/netdebug.c | 7994 | 14560 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* netdebug.c
*
* debug functionality for o2net
*
* Copyright (C) 2005, 2008 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
*/
#ifdef CONFIG_DEBUG_FS
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/kref.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include "tcp.h"
#include "nodemanager.h"
#define MLOG_MASK_PREFIX ML_TCP
#include "masklog.h"
#include "tcp_internal.h"
#define O2NET_DEBUG_DIR "o2net"
#define SC_DEBUG_NAME "sock_containers"
#define NST_DEBUG_NAME "send_tracking"
#define STATS_DEBUG_NAME "stats"
#define NODES_DEBUG_NAME "connected_nodes"
#define SHOW_SOCK_CONTAINERS 0
#define SHOW_SOCK_STATS 1
static struct dentry *o2net_dentry;
static struct dentry *sc_dentry;
static struct dentry *nst_dentry;
static struct dentry *stats_dentry;
static struct dentry *nodes_dentry;
static DEFINE_SPINLOCK(o2net_debug_lock);
static LIST_HEAD(sock_containers);
static LIST_HEAD(send_tracking);
void o2net_debug_add_nst(struct o2net_send_tracking *nst)
{
spin_lock(&o2net_debug_lock);
list_add(&nst->st_net_debug_item, &send_tracking);
spin_unlock(&o2net_debug_lock);
}
void o2net_debug_del_nst(struct o2net_send_tracking *nst)
{
spin_lock(&o2net_debug_lock);
if (!list_empty(&nst->st_net_debug_item))
list_del_init(&nst->st_net_debug_item);
spin_unlock(&o2net_debug_lock);
}
static struct o2net_send_tracking
*next_nst(struct o2net_send_tracking *nst_start)
{
struct o2net_send_tracking *nst, *ret = NULL;
assert_spin_locked(&o2net_debug_lock);
list_for_each_entry(nst, &nst_start->st_net_debug_item,
st_net_debug_item) {
/* discover the head of the list */
if (&nst->st_net_debug_item == &send_tracking)
break;
/* use st_task to detect real nsts in the list */
if (nst->st_task != NULL) {
ret = nst;
break;
}
}
return ret;
}
static void *nst_seq_start(struct seq_file *seq, loff_t *pos)
{
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
spin_lock(&o2net_debug_lock);
nst = next_nst(dummy_nst);
spin_unlock(&o2net_debug_lock);
return nst;
}
static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
spin_lock(&o2net_debug_lock);
nst = next_nst(dummy_nst);
list_del_init(&dummy_nst->st_net_debug_item);
if (nst)
list_add(&dummy_nst->st_net_debug_item,
&nst->st_net_debug_item);
spin_unlock(&o2net_debug_lock);
return nst; /* unused, just needs to be null when done */
}
static int nst_seq_show(struct seq_file *seq, void *v)
{
struct o2net_send_tracking *nst, *dummy_nst = seq->private;
ktime_t now;
s64 sock, send, status;
spin_lock(&o2net_debug_lock);
nst = next_nst(dummy_nst);
if (!nst)
goto out;
now = ktime_get();
sock = ktime_to_us(ktime_sub(now, nst->st_sock_time));
send = ktime_to_us(ktime_sub(now, nst->st_send_time));
status = ktime_to_us(ktime_sub(now, nst->st_status_time));
/* get_task_comm isn't exported. oh well. */
seq_printf(seq, "%p:\n"
" pid: %lu\n"
" tgid: %lu\n"
" process name: %s\n"
" node: %u\n"
" sc: %p\n"
" message id: %d\n"
" message type: %u\n"
" message key: 0x%08x\n"
" sock acquiry: %lld usecs ago\n"
" send start: %lld usecs ago\n"
" wait start: %lld usecs ago\n",
nst, (unsigned long)task_pid_nr(nst->st_task),
(unsigned long)nst->st_task->tgid,
nst->st_task->comm, nst->st_node,
nst->st_sc, nst->st_id, nst->st_msg_type,
nst->st_msg_key,
(long long)sock,
(long long)send,
(long long)status);
out:
spin_unlock(&o2net_debug_lock);
return 0;
}
static void nst_seq_stop(struct seq_file *seq, void *v)
{
}
static const struct seq_operations nst_seq_ops = {
.start = nst_seq_start,
.next = nst_seq_next,
.stop = nst_seq_stop,
.show = nst_seq_show,
};
static int nst_fop_open(struct inode *inode, struct file *file)
{
struct o2net_send_tracking *dummy_nst;
struct seq_file *seq;
int ret;
dummy_nst = kmalloc(sizeof(struct o2net_send_tracking), GFP_KERNEL);
if (dummy_nst == NULL) {
ret = -ENOMEM;
goto out;
}
dummy_nst->st_task = NULL;
ret = seq_open(file, &nst_seq_ops);
if (ret)
goto out;
seq = file->private_data;
seq->private = dummy_nst;
o2net_debug_add_nst(dummy_nst);
dummy_nst = NULL;
out:
kfree(dummy_nst);
return ret;
}
static int nst_fop_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct o2net_send_tracking *dummy_nst = seq->private;
o2net_debug_del_nst(dummy_nst);
return seq_release_private(inode, file);
}
static const struct file_operations nst_seq_fops = {
.open = nst_fop_open,
.read = seq_read,
.llseek = seq_lseek,
.release = nst_fop_release,
};
void o2net_debug_add_sc(struct o2net_sock_container *sc)
{
spin_lock(&o2net_debug_lock);
list_add(&sc->sc_net_debug_item, &sock_containers);
spin_unlock(&o2net_debug_lock);
}
void o2net_debug_del_sc(struct o2net_sock_container *sc)
{
spin_lock(&o2net_debug_lock);
list_del_init(&sc->sc_net_debug_item);
spin_unlock(&o2net_debug_lock);
}
struct o2net_sock_debug {
int dbg_ctxt;
struct o2net_sock_container *dbg_sock;
};
static struct o2net_sock_container
*next_sc(struct o2net_sock_container *sc_start)
{
struct o2net_sock_container *sc, *ret = NULL;
assert_spin_locked(&o2net_debug_lock);
list_for_each_entry(sc, &sc_start->sc_net_debug_item,
sc_net_debug_item) {
/* discover the head of the list miscast as a sc */
if (&sc->sc_net_debug_item == &sock_containers)
break;
/* use sc_page to detect real scs in the list */
if (sc->sc_page != NULL) {
ret = sc;
break;
}
}
return ret;
}
static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
{
struct o2net_sock_debug *sd = seq->private;
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock(&o2net_debug_lock);
sc = next_sc(dummy_sc);
spin_unlock(&o2net_debug_lock);
return sc;
}
static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct o2net_sock_debug *sd = seq->private;
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock(&o2net_debug_lock);
sc = next_sc(dummy_sc);
list_del_init(&dummy_sc->sc_net_debug_item);
if (sc)
list_add(&dummy_sc->sc_net_debug_item, &sc->sc_net_debug_item);
spin_unlock(&o2net_debug_lock);
return sc; /* unused, just needs to be null when done */
}
#ifdef CONFIG_OCFS2_FS_STATS
# define sc_send_count(_s) ((_s)->sc_send_count)
# define sc_recv_count(_s) ((_s)->sc_recv_count)
# define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total))
# define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total))
# define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total))
# define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total))
#else
# define sc_send_count(_s) (0U)
# define sc_recv_count(_s) (0U)
# define sc_tv_acquiry_total_ns(_s) (0LL)
# define sc_tv_send_total_ns(_s) (0LL)
# define sc_tv_status_total_ns(_s) (0LL)
# define sc_tv_process_total_ns(_s) (0LL)
#endif
/* So that debugfs.ocfs2 can determine which format is being used */
#define O2NET_STATS_STR_VERSION 1
static void sc_show_sock_stats(struct seq_file *seq,
struct o2net_sock_container *sc)
{
if (!sc)
return;
seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION,
sc->sc_node->nd_num, (unsigned long)sc_send_count(sc),
(long long)sc_tv_acquiry_total_ns(sc),
(long long)sc_tv_send_total_ns(sc),
(long long)sc_tv_status_total_ns(sc),
(unsigned long)sc_recv_count(sc),
(long long)sc_tv_process_total_ns(sc));
}
static void sc_show_sock_container(struct seq_file *seq,
struct o2net_sock_container *sc)
{
struct inet_sock *inet = NULL;
__be32 saddr = 0, daddr = 0;
__be16 sport = 0, dport = 0;
if (!sc)
return;
if (sc->sc_sock) {
inet = inet_sk(sc->sc_sock->sk);
/* the stack's structs aren't sparse endian clean */
saddr = (__force __be32)inet->inet_saddr;
daddr = (__force __be32)inet->inet_daddr;
sport = (__force __be16)inet->inet_sport;
dport = (__force __be16)inet->inet_dport;
}
/* XXX sigh, inet-> doesn't have sparse annotation so any
* use of it here generates a warning with -Wbitwise */
seq_printf(seq, "%p:\n"
" krefs: %d\n"
" sock: %pI4:%u -> "
"%pI4:%u\n"
" remote node: %s\n"
" page off: %zu\n"
" handshake ok: %u\n"
" timer: %lld usecs\n"
" data ready: %lld usecs\n"
" advance start: %lld usecs\n"
" advance stop: %lld usecs\n"
" func start: %lld usecs\n"
" func stop: %lld usecs\n"
" func key: 0x%08x\n"
" func type: %u\n",
sc,
atomic_read(&sc->sc_kref.refcount),
&saddr, inet ? ntohs(sport) : 0,
&daddr, inet ? ntohs(dport) : 0,
sc->sc_node->nd_name,
sc->sc_page_off,
sc->sc_handshake_ok,
(long long)ktime_to_us(sc->sc_tv_timer),
(long long)ktime_to_us(sc->sc_tv_data_ready),
(long long)ktime_to_us(sc->sc_tv_advance_start),
(long long)ktime_to_us(sc->sc_tv_advance_stop),
(long long)ktime_to_us(sc->sc_tv_func_start),
(long long)ktime_to_us(sc->sc_tv_func_stop),
sc->sc_msg_key,
sc->sc_msg_type);
}
static int sc_seq_show(struct seq_file *seq, void *v)
{
struct o2net_sock_debug *sd = seq->private;
struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
spin_lock(&o2net_debug_lock);
sc = next_sc(dummy_sc);
if (sc) {
if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS)
sc_show_sock_container(seq, sc);
else
sc_show_sock_stats(seq, sc);
}
spin_unlock(&o2net_debug_lock);
return 0;
}
static void sc_seq_stop(struct seq_file *seq, void *v)
{
}
static const struct seq_operations sc_seq_ops = {
.start = sc_seq_start,
.next = sc_seq_next,
.stop = sc_seq_stop,
.show = sc_seq_show,
};
static int sc_common_open(struct file *file, struct o2net_sock_debug *sd)
{
struct o2net_sock_container *dummy_sc;
struct seq_file *seq;
int ret;
dummy_sc = kmalloc(sizeof(struct o2net_sock_container), GFP_KERNEL);
if (dummy_sc == NULL) {
ret = -ENOMEM;
goto out;
}
dummy_sc->sc_page = NULL;
ret = seq_open(file, &sc_seq_ops);
if (ret)
goto out;
seq = file->private_data;
seq->private = sd;
sd->dbg_sock = dummy_sc;
o2net_debug_add_sc(dummy_sc);
dummy_sc = NULL;
out:
kfree(dummy_sc);
return ret;
}
static int sc_fop_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct o2net_sock_debug *sd = seq->private;
struct o2net_sock_container *dummy_sc = sd->dbg_sock;
o2net_debug_del_sc(dummy_sc);
return seq_release_private(inode, file);
}
static int stats_fop_open(struct inode *inode, struct file *file)
{
struct o2net_sock_debug *sd;
sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
if (sd == NULL)
return -ENOMEM;
sd->dbg_ctxt = SHOW_SOCK_STATS;
sd->dbg_sock = NULL;
return sc_common_open(file, sd);
}
static const struct file_operations stats_seq_fops = {
.open = stats_fop_open,
.read = seq_read,
.llseek = seq_lseek,
.release = sc_fop_release,
};
static int sc_fop_open(struct inode *inode, struct file *file)
{
struct o2net_sock_debug *sd;
sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
if (sd == NULL)
return -ENOMEM;
sd->dbg_ctxt = SHOW_SOCK_CONTAINERS;
sd->dbg_sock = NULL;
return sc_common_open(file, sd);
}
static const struct file_operations sc_seq_fops = {
.open = sc_fop_open,
.read = seq_read,
.llseek = seq_lseek,
.release = sc_fop_release,
};
static int o2net_fill_bitmap(char *buf, int len)
{
unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
int i = -1, out = 0;
o2net_fill_node_map(map, sizeof(map));
while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
out += snprintf(buf + out, PAGE_SIZE - out, "\n");
return out;
}
static int nodes_fop_open(struct inode *inode, struct file *file)
{
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
i_size_write(inode, o2net_fill_bitmap(buf, PAGE_SIZE));
file->private_data = buf;
return 0;
}
static int o2net_debug_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static ssize_t o2net_debug_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
i_size_read(file->f_mapping->host));
}
static const struct file_operations nodes_fops = {
.open = nodes_fop_open,
.release = o2net_debug_release,
.read = o2net_debug_read,
.llseek = generic_file_llseek,
};
void o2net_debugfs_exit(void)
{
debugfs_remove(nodes_dentry);
debugfs_remove(stats_dentry);
debugfs_remove(sc_dentry);
debugfs_remove(nst_dentry);
debugfs_remove(o2net_dentry);
}
int o2net_debugfs_init(void)
{
umode_t mode = S_IFREG|S_IRUSR;
o2net_dentry = debugfs_create_dir(O2NET_DEBUG_DIR, NULL);
if (o2net_dentry)
nst_dentry = debugfs_create_file(NST_DEBUG_NAME, mode,
o2net_dentry, NULL, &nst_seq_fops);
if (nst_dentry)
sc_dentry = debugfs_create_file(SC_DEBUG_NAME, mode,
o2net_dentry, NULL, &sc_seq_fops);
if (sc_dentry)
stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, mode,
o2net_dentry, NULL, &stats_seq_fops);
if (stats_dentry)
nodes_dentry = debugfs_create_file(NODES_DEBUG_NAME, mode,
o2net_dentry, NULL, &nodes_fops);
if (nodes_dentry)
return 0;
o2net_debugfs_exit();
mlog_errno(-ENOMEM);
return -ENOMEM;
}
#endif /* CONFIG_DEBUG_FS */
| gpl-2.0 |
armani-dev/kernel_test | sound/firewire/lib.c | 7994 | 2393 | /*
* miscellaneous helper functions
*
* Copyright (c) Clemens Ladisch <clemens@ladisch.de>
* Licensed under the terms of the GNU General Public License, version 2.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firewire.h>
#include <linux/module.h>
#include "lib.h"
#define ERROR_RETRY_DELAY_MS 5
/**
* rcode_string - convert a firewire result code to a string
* @rcode: the result
*/
const char *rcode_string(unsigned int rcode)
{
static const char *const names[] = {
[RCODE_COMPLETE] = "complete",
[RCODE_CONFLICT_ERROR] = "conflict error",
[RCODE_DATA_ERROR] = "data error",
[RCODE_TYPE_ERROR] = "type error",
[RCODE_ADDRESS_ERROR] = "address error",
[RCODE_SEND_ERROR] = "send error",
[RCODE_CANCELLED] = "cancelled",
[RCODE_BUSY] = "busy",
[RCODE_GENERATION] = "generation",
[RCODE_NO_ACK] = "no ack",
};
if (rcode < ARRAY_SIZE(names) && names[rcode])
return names[rcode];
else
return "unknown";
}
EXPORT_SYMBOL(rcode_string);
/**
* snd_fw_transaction - send a request and wait for its completion
* @unit: the driver's unit on the target device
* @tcode: the transaction code
* @offset: the address in the target's address space
* @buffer: input/output data
* @length: length of @buffer
*
* Submits an asynchronous request to the target device, and waits for the
* response. The node ID and the current generation are derived from @unit.
* On a bus reset or an error, the transaction is retried a few times.
* Returns zero on success, or a negative error code.
*/
int snd_fw_transaction(struct fw_unit *unit, int tcode,
u64 offset, void *buffer, size_t length)
{
struct fw_device *device = fw_parent_device(unit);
int generation, rcode, tries = 0;
for (;;) {
generation = device->generation;
smp_rmb(); /* node_id vs. generation */
rcode = fw_run_transaction(device->card, tcode,
device->node_id, generation,
device->max_speed, offset,
buffer, length);
if (rcode == RCODE_COMPLETE)
return 0;
if (rcode_is_permanent_error(rcode) || ++tries >= 3) {
dev_err(&unit->device, "transaction failed: %s\n",
rcode_string(rcode));
return -EIO;
}
msleep(ERROR_RETRY_DELAY_MS);
}
}
EXPORT_SYMBOL(snd_fw_transaction);
MODULE_DESCRIPTION("FireWire audio helper functions");
MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_klte | arch/powerpc/lib/code-patching.c | 9274 | 12715 | /*
* Copyright 2008 Michael Ellerman, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/code-patching.h>
void patch_instruction(unsigned int *addr, unsigned int instr)
{
*addr = instr;
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
}
void patch_branch(unsigned int *addr, unsigned long target, int flags)
{
patch_instruction(addr, create_branch(addr, target, flags));
}
unsigned int create_branch(const unsigned int *addr,
unsigned long target, int flags)
{
unsigned int instruction;
long offset;
offset = target;
if (! (flags & BRANCH_ABSOLUTE))
offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */
if (offset < -0x2000000 || offset > 0x1fffffc || offset & 0x3)
return 0;
/* Mask out the flags and target, so they don't step on each other. */
instruction = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC);
return instruction;
}
unsigned int create_cond_branch(const unsigned int *addr,
unsigned long target, int flags)
{
unsigned int instruction;
long offset;
offset = target;
if (! (flags & BRANCH_ABSOLUTE))
offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */
if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
return 0;
/* Mask out the flags and target, so they don't step on each other. */
instruction = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC);
return instruction;
}
static unsigned int branch_opcode(unsigned int instr)
{
return (instr >> 26) & 0x3F;
}
static int instr_is_branch_iform(unsigned int instr)
{
return branch_opcode(instr) == 18;
}
static int instr_is_branch_bform(unsigned int instr)
{
return branch_opcode(instr) == 16;
}
int instr_is_relative_branch(unsigned int instr)
{
if (instr & BRANCH_ABSOLUTE)
return 0;
return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
}
static unsigned long branch_iform_target(const unsigned int *instr)
{
signed long imm;
imm = *instr & 0x3FFFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x2000000)
imm -= 0x4000000;
if ((*instr & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
static unsigned long branch_bform_target(const unsigned int *instr)
{
signed long imm;
imm = *instr & 0xFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x8000)
imm -= 0x10000;
if ((*instr & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
unsigned long branch_target(const unsigned int *instr)
{
if (instr_is_branch_iform(*instr))
return branch_iform_target(instr);
else if (instr_is_branch_bform(*instr))
return branch_bform_target(instr);
return 0;
}
int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr)
{
if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr))
return branch_target(instr) == addr;
return 0;
}
unsigned int translate_branch(const unsigned int *dest, const unsigned int *src)
{
unsigned long target;
target = branch_target(src);
if (instr_is_branch_iform(*src))
return create_branch(dest, target, *src);
else if (instr_is_branch_bform(*src))
return create_cond_branch(dest, target, *src);
return 0;
}
#ifdef CONFIG_CODE_PATCHING_SELFTEST
static void __init test_trampoline(void)
{
asm ("nop;\n");
}
#define check(x) \
if (!(x)) printk("code-patching: test failed at line %d\n", __LINE__);
static void __init test_branch_iform(void)
{
unsigned int instr;
unsigned long addr;
addr = (unsigned long)&instr;
/* The simplest case, branch to self, no flags */
check(instr_is_branch_iform(0x48000000));
/* All bits of target set, and flags */
check(instr_is_branch_iform(0x4bffffff));
/* High bit of opcode set, which is wrong */
check(!instr_is_branch_iform(0xcbffffff));
/* Middle bits of opcode set, which is wrong */
check(!instr_is_branch_iform(0x7bffffff));
/* Simplest case, branch to self with link */
check(instr_is_branch_iform(0x48000001));
/* All bits of targets set */
check(instr_is_branch_iform(0x4bfffffd));
/* Some bits of targets set */
check(instr_is_branch_iform(0x4bff00fd));
/* Must be a valid branch to start with */
check(!instr_is_branch_iform(0x7bfffffd));
/* Absolute branch to 0x100 */
instr = 0x48000103;
check(instr_is_branch_to_addr(&instr, 0x100));
/* Absolute branch to 0x420fc */
instr = 0x480420ff;
check(instr_is_branch_to_addr(&instr, 0x420fc));
/* Maximum positive relative branch, + 20MB - 4B */
instr = 0x49fffffc;
check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC));
/* Smallest negative relative branch, - 4B */
instr = 0x4bfffffc;
check(instr_is_branch_to_addr(&instr, addr - 4));
/* Largest negative relative branch, - 32 MB */
instr = 0x4a000000;
check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
/* Branch to self, with link */
instr = create_branch(&instr, addr, BRANCH_SET_LINK);
check(instr_is_branch_to_addr(&instr, addr));
/* Branch to self - 0x100, with link */
instr = create_branch(&instr, addr - 0x100, BRANCH_SET_LINK);
check(instr_is_branch_to_addr(&instr, addr - 0x100));
/* Branch to self + 0x100, no link */
instr = create_branch(&instr, addr + 0x100, 0);
check(instr_is_branch_to_addr(&instr, addr + 0x100));
/* Maximum relative negative offset, - 32 MB */
instr = create_branch(&instr, addr - 0x2000000, BRANCH_SET_LINK);
check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
/* Out of range relative negative offset, - 32 MB + 4*/
instr = create_branch(&instr, addr - 0x2000004, BRANCH_SET_LINK);
check(instr == 0);
/* Out of range relative positive offset, + 32 MB */
instr = create_branch(&instr, addr + 0x2000000, BRANCH_SET_LINK);
check(instr == 0);
/* Unaligned target */
instr = create_branch(&instr, addr + 3, BRANCH_SET_LINK);
check(instr == 0);
/* Check flags are masked correctly */
instr = create_branch(&instr, addr, 0xFFFFFFFC);
check(instr_is_branch_to_addr(&instr, addr));
check(instr == 0x48000000);
}
static void __init test_create_function_call(void)
{
unsigned int *iptr;
unsigned long dest;
/* Check we can create a function call */
iptr = (unsigned int *)ppc_function_entry(test_trampoline);
dest = ppc_function_entry(test_create_function_call);
patch_instruction(iptr, create_branch(iptr, dest, BRANCH_SET_LINK));
check(instr_is_branch_to_addr(iptr, dest));
}
static void __init test_branch_bform(void)
{
unsigned long addr;
unsigned int *iptr, instr, flags;
iptr = &instr;
addr = (unsigned long)iptr;
/* The simplest case, branch to self, no flags */
check(instr_is_branch_bform(0x40000000));
/* All bits of target set, and flags */
check(instr_is_branch_bform(0x43ffffff));
/* High bit of opcode set, which is wrong */
check(!instr_is_branch_bform(0xc3ffffff));
/* Middle bits of opcode set, which is wrong */
check(!instr_is_branch_bform(0x7bffffff));
/* Absolute conditional branch to 0x100 */
instr = 0x43ff0103;
check(instr_is_branch_to_addr(&instr, 0x100));
/* Absolute conditional branch to 0x20fc */
instr = 0x43ff20ff;
check(instr_is_branch_to_addr(&instr, 0x20fc));
/* Maximum positive relative conditional branch, + 32 KB - 4B */
instr = 0x43ff7ffc;
check(instr_is_branch_to_addr(&instr, addr + 0x7FFC));
/* Smallest negative relative conditional branch, - 4B */
instr = 0x43fffffc;
check(instr_is_branch_to_addr(&instr, addr - 4));
/* Largest negative relative conditional branch, - 32 KB */
instr = 0x43ff8000;
check(instr_is_branch_to_addr(&instr, addr - 0x8000));
/* All condition code bits set & link */
flags = 0x3ff000 | BRANCH_SET_LINK;
/* Branch to self */
instr = create_cond_branch(iptr, addr, flags);
check(instr_is_branch_to_addr(&instr, addr));
/* Branch to self - 0x100 */
instr = create_cond_branch(iptr, addr - 0x100, flags);
check(instr_is_branch_to_addr(&instr, addr - 0x100));
/* Branch to self + 0x100 */
instr = create_cond_branch(iptr, addr + 0x100, flags);
check(instr_is_branch_to_addr(&instr, addr + 0x100));
/* Maximum relative negative offset, - 32 KB */
instr = create_cond_branch(iptr, addr - 0x8000, flags);
check(instr_is_branch_to_addr(&instr, addr - 0x8000));
/* Out of range relative negative offset, - 32 KB + 4*/
instr = create_cond_branch(iptr, addr - 0x8004, flags);
check(instr == 0);
/* Out of range relative positive offset, + 32 KB */
instr = create_cond_branch(iptr, addr + 0x8000, flags);
check(instr == 0);
/* Unaligned target */
instr = create_cond_branch(iptr, addr + 3, flags);
check(instr == 0);
/* Check flags are masked correctly */
instr = create_cond_branch(iptr, addr, 0xFFFFFFFC);
check(instr_is_branch_to_addr(&instr, addr));
check(instr == 0x43FF0000);
}
static void __init test_translate_branch(void)
{
unsigned long addr;
unsigned int *p, *q;
void *buf;
buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
check(buf);
if (!buf)
return;
/* Simple case, branch to self moved a little */
p = buf;
addr = (unsigned long)p;
patch_branch(p, addr, 0);
check(instr_is_branch_to_addr(p, addr));
q = p + 1;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(q, addr));
/* Maximum negative case, move b . to addr + 32 MB */
p = buf;
addr = (unsigned long)p;
patch_branch(p, addr, 0);
q = buf + 0x2000000;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
check(*q == 0x4a000000);
/* Maximum positive case, move x to x - 32 MB + 4 */
p = buf + 0x2000000;
addr = (unsigned long)p;
patch_branch(p, addr, 0);
q = buf + 4;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
check(*q == 0x49fffffc);
/* Jump to x + 16 MB moved to x + 20 MB */
p = buf;
addr = 0x1000000 + (unsigned long)buf;
patch_branch(p, addr, BRANCH_SET_LINK);
q = buf + 0x1400000;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
/* Jump to x + 16 MB moved to x - 16 MB + 4 */
p = buf + 0x1000000;
addr = 0x2000000 + (unsigned long)buf;
patch_branch(p, addr, 0);
q = buf + 4;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
/* Conditional branch tests */
/* Simple case, branch to self moved a little */
p = buf;
addr = (unsigned long)p;
patch_instruction(p, create_cond_branch(p, addr, 0));
check(instr_is_branch_to_addr(p, addr));
q = p + 1;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(q, addr));
/* Maximum negative case, move b . to addr + 32 KB */
p = buf;
addr = (unsigned long)p;
patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC));
q = buf + 0x8000;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
check(*q == 0x43ff8000);
/* Maximum positive case, move x to x - 32 KB + 4 */
p = buf + 0x8000;
addr = (unsigned long)p;
patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC));
q = buf + 4;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
check(*q == 0x43ff7ffc);
/* Jump to x + 12 KB moved to x + 20 KB */
p = buf;
addr = 0x3000 + (unsigned long)buf;
patch_instruction(p, create_cond_branch(p, addr, BRANCH_SET_LINK));
q = buf + 0x5000;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
/* Jump to x + 8 KB moved to x - 8 KB + 4 */
p = buf + 0x2000;
addr = 0x4000 + (unsigned long)buf;
patch_instruction(p, create_cond_branch(p, addr, 0));
q = buf + 4;
patch_instruction(q, translate_branch(q, p));
check(instr_is_branch_to_addr(p, addr));
check(instr_is_branch_to_addr(q, addr));
/* Free the buffer we were using */
vfree(buf);
}
static int __init test_code_patching(void)
{
printk(KERN_DEBUG "Running code patching self-tests ...\n");
test_branch_iform();
test_branch_bform();
test_create_function_call();
test_translate_branch();
return 0;
}
late_initcall(test_code_patching);
#endif /* CONFIG_CODE_PATCHING_SELFTEST */
| gpl-2.0 |
novaspirit/tf101-linux-2.6.36 | arch/mips/fw/arc/memory.c | 9530 | 3629 | /*
* memory.c: PROM library functions for acquiring/using memory descriptors
* given to us from the ARCS firmware.
*
* Copyright (C) 1996 by David S. Miller
* Copyright (C) 1999, 2000, 2001 by Ralf Baechle
* Copyright (C) 1999, 2000 by Silicon Graphics, Inc.
*
* PROM library functions for acquiring/using memory descriptors given to us
* from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set
* because on some machines like SGI IP27 the ARC memory configuration data
* completly bogus and alternate easier to use mechanisms are available.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/swap.h>
#include <asm/sgialib.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/bootinfo.h>
#undef DEBUG
/*
* For ARC firmware memory functions the unit of meassuring memory is always
* a 4k page of memory
*/
#define ARC_PAGE_SHIFT 12
struct linux_mdesc * __init ArcGetMemoryDescriptor(struct linux_mdesc *Current)
{
return (struct linux_mdesc *) ARC_CALL1(get_mdesc, Current);
}
#ifdef DEBUG /* convenient for debugging */
static char *arcs_mtypes[8] = {
"Exception Block",
"ARCS Romvec Page",
"Free/Contig RAM",
"Generic Free RAM",
"Bad Memory",
"Standalone Program Pages",
"ARCS Temp Storage Area",
"ARCS Permanent Storage Area"
};
static char *arc_mtypes[8] = {
"Exception Block",
"SystemParameterBlock",
"FreeMemory",
"Bad Memory",
"LoadedProgram",
"FirmwareTemporary",
"FirmwarePermanent",
"FreeContiguous"
};
#define mtypes(a) (prom_flags & PROM_FLAG_ARCS) ? arcs_mtypes[a.arcs] \
: arc_mtypes[a.arc]
#endif
static inline int memtype_classify_arcs(union linux_memtypes type)
{
switch (type.arcs) {
case arcs_fcontig:
case arcs_free:
return BOOT_MEM_RAM;
case arcs_atmp:
return BOOT_MEM_ROM_DATA;
case arcs_eblock:
case arcs_rvpage:
case arcs_bmem:
case arcs_prog:
case arcs_aperm:
return BOOT_MEM_RESERVED;
default:
BUG();
}
while(1); /* Nuke warning. */
}
static inline int memtype_classify_arc(union linux_memtypes type)
{
switch (type.arc) {
case arc_free:
case arc_fcontig:
return BOOT_MEM_RAM;
case arc_atmp:
return BOOT_MEM_ROM_DATA;
case arc_eblock:
case arc_rvpage:
case arc_bmem:
case arc_prog:
case arc_aperm:
return BOOT_MEM_RESERVED;
default:
BUG();
}
while(1); /* Nuke warning. */
}
static int __init prom_memtype_classify(union linux_memtypes type)
{
if (prom_flags & PROM_FLAG_ARCS) /* SGI is ``different'' ... */
return memtype_classify_arcs(type);
return memtype_classify_arc(type);
}
void __init prom_meminit(void)
{
struct linux_mdesc *p;
#ifdef DEBUG
int i = 0;
printk("ARCS MEMORY DESCRIPTOR dump:\n");
p = ArcGetMemoryDescriptor(PROM_NULL_MDESC);
while(p) {
printk("[%d,%p]: base<%08lx> pages<%08lx> type<%s>\n",
i, p, p->base, p->pages, mtypes(p->type));
p = ArcGetMemoryDescriptor(p);
i++;
}
#endif
p = PROM_NULL_MDESC;
while ((p = ArcGetMemoryDescriptor(p))) {
unsigned long base, size;
long type;
base = p->base << ARC_PAGE_SHIFT;
size = p->pages << ARC_PAGE_SHIFT;
type = prom_memtype_classify(p->type);
add_memory_region(base, size, type);
}
}
void __init prom_free_prom_memory(void)
{
unsigned long addr;
int i;
if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
return;
for (i = 0; i < boot_mem_map.nr_map; i++) {
if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
continue;
addr = boot_mem_map.map[i].addr;
free_init_pages("prom memory",
addr, addr + boot_mem_map.map[i].size);
}
}
| gpl-2.0 |
u-ra/android_kernel_htc_villec2 | fs/ocfs2/dlmfs/userdlm.c | 13626 | 17995 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* userdlm.c
*
* Code which implements the kernel side of a minimal userspace
* interface to our DLM.
*
* Many of the functions here are pared down versions of dlmglue.c
* functions.
*
* Copyright (C) 2003, 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/signal.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/crc32.h>
#include "ocfs2_lockingver.h"
#include "stackglue.h"
#include "userdlm.h"
#define MLOG_MASK_PREFIX ML_DLMFS
#include "cluster/masklog.h"
static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
{
return container_of(lksb, struct user_lock_res, l_lksb);
}
static inline int user_check_wait_flag(struct user_lock_res *lockres,
int flag)
{
int ret;
spin_lock(&lockres->l_lock);
ret = lockres->l_flags & flag;
spin_unlock(&lockres->l_lock);
return ret;
}
static inline void user_wait_on_busy_lock(struct user_lock_res *lockres)
{
wait_event(lockres->l_event,
!user_check_wait_flag(lockres, USER_LOCK_BUSY));
}
static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
{
wait_event(lockres->l_event,
!user_check_wait_flag(lockres, USER_LOCK_BLOCKED));
}
/* I heart container_of... */
static inline struct ocfs2_cluster_connection *
cluster_connection_from_user_lockres(struct user_lock_res *lockres)
{
struct dlmfs_inode_private *ip;
ip = container_of(lockres,
struct dlmfs_inode_private,
ip_lockres);
return ip->ip_conn;
}
static struct inode *
user_dlm_inode_from_user_lockres(struct user_lock_res *lockres)
{
struct dlmfs_inode_private *ip;
ip = container_of(lockres,
struct dlmfs_inode_private,
ip_lockres);
return &ip->ip_vfs_inode;
}
static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
{
spin_lock(&lockres->l_lock);
lockres->l_flags &= ~USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
}
#define user_log_dlm_error(_func, _stat, _lockres) do { \
mlog(ML_ERROR, "Dlm error %d while calling %s on " \
"resource %.*s\n", _stat, _func, \
_lockres->l_namelen, _lockres->l_name); \
} while (0)
/* WARNING: This function lives in a world where the only three lock
* levels are EX, PR, and NL. It *will* have to be adjusted when more
* lock types are added. */
static inline int user_highest_compat_lock_level(int level)
{
int new_level = DLM_LOCK_EX;
if (level == DLM_LOCK_EX)
new_level = DLM_LOCK_NL;
else if (level == DLM_LOCK_PR)
new_level = DLM_LOCK_PR;
return new_level;
}
static void user_ast(struct ocfs2_dlm_lksb *lksb)
{
struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
int status;
mlog(ML_BASTS, "AST fired for lockres %.*s, level %d => %d\n",
lockres->l_namelen, lockres->l_name, lockres->l_level,
lockres->l_requested);
spin_lock(&lockres->l_lock);
status = ocfs2_dlm_lock_status(&lockres->l_lksb);
if (status) {
mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
status, lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
return;
}
mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
"Lockres %.*s, requested ivmode. flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
/* we're downconverting. */
if (lockres->l_requested < lockres->l_level) {
if (lockres->l_requested <=
user_highest_compat_lock_level(lockres->l_blocking)) {
lockres->l_blocking = DLM_LOCK_NL;
lockres->l_flags &= ~USER_LOCK_BLOCKED;
}
}
lockres->l_level = lockres->l_requested;
lockres->l_requested = DLM_LOCK_IV;
lockres->l_flags |= USER_LOCK_ATTACHED;
lockres->l_flags &= ~USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
wake_up(&lockres->l_event);
}
static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
{
struct inode *inode;
inode = user_dlm_inode_from_user_lockres(lockres);
if (!igrab(inode))
BUG();
}
static void user_dlm_unblock_lock(struct work_struct *work);
static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
{
if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
user_dlm_grab_inode_ref(lockres);
INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
queue_work(user_dlm_worker, &lockres->l_work);
lockres->l_flags |= USER_LOCK_QUEUED;
}
}
static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
{
int queue = 0;
if (!(lockres->l_flags & USER_LOCK_BLOCKED))
return;
switch (lockres->l_blocking) {
case DLM_LOCK_EX:
if (!lockres->l_ex_holders && !lockres->l_ro_holders)
queue = 1;
break;
case DLM_LOCK_PR:
if (!lockres->l_ex_holders)
queue = 1;
break;
default:
BUG();
}
if (queue)
__user_dlm_queue_lockres(lockres);
}
static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
{
struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
mlog(ML_BASTS, "BAST fired for lockres %.*s, blocking %d, level %d\n",
lockres->l_namelen, lockres->l_name, level, lockres->l_level);
spin_lock(&lockres->l_lock);
lockres->l_flags |= USER_LOCK_BLOCKED;
if (level > lockres->l_blocking)
lockres->l_blocking = level;
__user_dlm_queue_lockres(lockres);
spin_unlock(&lockres->l_lock);
wake_up(&lockres->l_event);
}
static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
{
struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
mlog(ML_BASTS, "UNLOCK AST fired for lockres %.*s, flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
if (status)
mlog(ML_ERROR, "dlm returns status %d\n", status);
spin_lock(&lockres->l_lock);
/* The teardown flag gets set early during the unlock process,
* so test the cancel flag to make sure that this ast isn't
* for a concurrent cancel. */
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
&& !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
lockres->l_level = DLM_LOCK_IV;
} else if (status == DLM_CANCELGRANT) {
/* We tried to cancel a convert request, but it was
* already granted. Don't clear the busy flag - the
* ast should've done this already. */
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
goto out_noclear;
} else {
BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
/* Cancel succeeded, we want to re-queue */
lockres->l_requested = DLM_LOCK_IV; /* cancel an
* upconvert
* request. */
lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
/* we want the unblock thread to look at it again
* now. */
if (lockres->l_flags & USER_LOCK_BLOCKED)
__user_dlm_queue_lockres(lockres);
}
lockres->l_flags &= ~USER_LOCK_BUSY;
out_noclear:
spin_unlock(&lockres->l_lock);
wake_up(&lockres->l_event);
}
/*
* This is the userdlmfs locking protocol version.
*
* See fs/ocfs2/dlmglue.c for more details on locking versions.
*/
static struct ocfs2_locking_protocol user_dlm_lproto = {
.lp_max_version = {
.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
},
.lp_lock_ast = user_ast,
.lp_blocking_ast = user_bast,
.lp_unlock_ast = user_unlock_ast,
};
static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
{
struct inode *inode;
inode = user_dlm_inode_from_user_lockres(lockres);
iput(inode);
}
static void user_dlm_unblock_lock(struct work_struct *work)
{
int new_level, status;
struct user_lock_res *lockres =
container_of(work, struct user_lock_res, l_work);
struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
mlog(0, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED),
"Lockres %.*s, flags 0x%x\n",
lockres->l_namelen, lockres->l_name, lockres->l_flags);
/* notice that we don't clear USER_LOCK_BLOCKED here. If it's
* set, we want user_ast clear it. */
lockres->l_flags &= ~USER_LOCK_QUEUED;
/* It's valid to get here and no longer be blocked - if we get
* several basts in a row, we might be queued by the first
* one, the unblock thread might run and clear the queued
* flag, and finally we might get another bast which re-queues
* us before our ast for the downconvert is called. */
if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
mlog(ML_BASTS, "lockres %.*s USER_LOCK_BLOCKED\n",
lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_TEARDOWN\n",
lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
if (lockres->l_flags & USER_LOCK_BUSY) {
if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
mlog(ML_BASTS, "lockres %.*s USER_LOCK_IN_CANCEL\n",
lockres->l_namelen, lockres->l_name);
spin_unlock(&lockres->l_lock);
goto drop_ref;
}
lockres->l_flags |= USER_LOCK_IN_CANCEL;
spin_unlock(&lockres->l_lock);
status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
DLM_LKF_CANCEL);
if (status)
user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto drop_ref;
}
/* If there are still incompat holders, we can exit safely
* without worrying about re-queueing this lock as that will
* happen on the last call to user_cluster_unlock. */
if ((lockres->l_blocking == DLM_LOCK_EX)
&& (lockres->l_ex_holders || lockres->l_ro_holders)) {
spin_unlock(&lockres->l_lock);
mlog(ML_BASTS, "lockres %.*s, EX/PR Holders %u,%u\n",
lockres->l_namelen, lockres->l_name,
lockres->l_ex_holders, lockres->l_ro_holders);
goto drop_ref;
}
if ((lockres->l_blocking == DLM_LOCK_PR)
&& lockres->l_ex_holders) {
spin_unlock(&lockres->l_lock);
mlog(ML_BASTS, "lockres %.*s, EX Holders %u\n",
lockres->l_namelen, lockres->l_name,
lockres->l_ex_holders);
goto drop_ref;
}
/* yay, we can downconvert now. */
new_level = user_highest_compat_lock_level(lockres->l_blocking);
lockres->l_requested = new_level;
lockres->l_flags |= USER_LOCK_BUSY;
mlog(ML_BASTS, "lockres %.*s, downconvert %d => %d\n",
lockres->l_namelen, lockres->l_name, lockres->l_level, new_level);
spin_unlock(&lockres->l_lock);
/* need lock downconvert request now... */
status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
DLM_LKF_CONVERT|DLM_LKF_VALBLK,
lockres->l_name,
lockres->l_namelen);
if (status) {
user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
user_recover_from_dlm_error(lockres);
}
drop_ref:
user_dlm_drop_inode_ref(lockres);
}
static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
int level)
{
switch(level) {
case DLM_LOCK_EX:
lockres->l_ex_holders++;
break;
case DLM_LOCK_PR:
lockres->l_ro_holders++;
break;
default:
BUG();
}
}
/* predict what lock level we'll be dropping down to on behalf
* of another node, and return true if the currently wanted
* level will be compatible with it. */
static inline int
user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
int wanted)
{
BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
return wanted <= user_highest_compat_lock_level(lockres->l_blocking);
}
int user_dlm_cluster_lock(struct user_lock_res *lockres,
int level,
int lkm_flags)
{
int status, local_flags;
struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
if (level != DLM_LOCK_EX &&
level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
status = -EINVAL;
goto bail;
}
mlog(ML_BASTS, "lockres %.*s, level %d, flags = 0x%x\n",
lockres->l_namelen, lockres->l_name, level, lkm_flags);
again:
if (signal_pending(current)) {
status = -ERESTARTSYS;
goto bail;
}
spin_lock(&lockres->l_lock);
/* We only compare against the currently granted level
* here. If the lock is blocked waiting on a downconvert,
* we'll get caught below. */
if ((lockres->l_flags & USER_LOCK_BUSY) &&
(level > lockres->l_level)) {
/* is someone sitting in dlm_lock? If so, wait on
* them. */
spin_unlock(&lockres->l_lock);
user_wait_on_busy_lock(lockres);
goto again;
}
if ((lockres->l_flags & USER_LOCK_BLOCKED) &&
(!user_may_continue_on_blocked_lock(lockres, level))) {
/* is the lock is currently blocked on behalf of
* another node */
spin_unlock(&lockres->l_lock);
user_wait_on_blocked_lock(lockres);
goto again;
}
if (level > lockres->l_level) {
local_flags = lkm_flags | DLM_LKF_VALBLK;
if (lockres->l_level != DLM_LOCK_IV)
local_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level;
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
BUG_ON(level == DLM_LOCK_IV);
BUG_ON(level == DLM_LOCK_NL);
/* call dlm_lock to upgrade lock now */
status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
local_flags, lockres->l_name,
lockres->l_namelen);
if (status) {
if ((lkm_flags & DLM_LKF_NOQUEUE) &&
(status != -EAGAIN))
user_log_dlm_error("ocfs2_dlm_lock",
status, lockres);
user_recover_from_dlm_error(lockres);
goto bail;
}
user_wait_on_busy_lock(lockres);
goto again;
}
user_dlm_inc_holders(lockres, level);
spin_unlock(&lockres->l_lock);
status = 0;
bail:
return status;
}
static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
int level)
{
switch(level) {
case DLM_LOCK_EX:
BUG_ON(!lockres->l_ex_holders);
lockres->l_ex_holders--;
break;
case DLM_LOCK_PR:
BUG_ON(!lockres->l_ro_holders);
lockres->l_ro_holders--;
break;
default:
BUG();
}
}
void user_dlm_cluster_unlock(struct user_lock_res *lockres,
int level)
{
if (level != DLM_LOCK_EX &&
level != DLM_LOCK_PR) {
mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
lockres->l_namelen, lockres->l_name);
return;
}
spin_lock(&lockres->l_lock);
user_dlm_dec_holders(lockres, level);
__user_dlm_cond_queue_lockres(lockres);
spin_unlock(&lockres->l_lock);
}
void user_dlm_write_lvb(struct inode *inode,
const char *val,
unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
char *lvb;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
BUG_ON(lockres->l_level < DLM_LOCK_EX);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(lvb, val, len);
spin_unlock(&lockres->l_lock);
}
ssize_t user_dlm_read_lvb(struct inode *inode,
char *val,
unsigned int len)
{
struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
char *lvb;
ssize_t ret = len;
BUG_ON(len > DLM_LVB_LEN);
spin_lock(&lockres->l_lock);
BUG_ON(lockres->l_level < DLM_LOCK_PR);
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
memcpy(val, lvb, len);
} else
ret = 0;
spin_unlock(&lockres->l_lock);
return ret;
}
void user_dlm_lock_res_init(struct user_lock_res *lockres,
struct dentry *dentry)
{
memset(lockres, 0, sizeof(*lockres));
spin_lock_init(&lockres->l_lock);
init_waitqueue_head(&lockres->l_event);
lockres->l_level = DLM_LOCK_IV;
lockres->l_requested = DLM_LOCK_IV;
lockres->l_blocking = DLM_LOCK_IV;
/* should have been checked before getting here. */
BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
memcpy(lockres->l_name,
dentry->d_name.name,
dentry->d_name.len);
lockres->l_namelen = dentry->d_name.len;
}
int user_dlm_destroy_lock(struct user_lock_res *lockres)
{
int status = -EBUSY;
struct ocfs2_cluster_connection *conn =
cluster_connection_from_user_lockres(lockres);
mlog(ML_BASTS, "lockres %.*s\n", lockres->l_namelen, lockres->l_name);
spin_lock(&lockres->l_lock);
if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
spin_unlock(&lockres->l_lock);
return 0;
}
lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
while (lockres->l_flags & USER_LOCK_BUSY) {
spin_unlock(&lockres->l_lock);
user_wait_on_busy_lock(lockres);
spin_lock(&lockres->l_lock);
}
if (lockres->l_ro_holders || lockres->l_ex_holders) {
spin_unlock(&lockres->l_lock);
goto bail;
}
status = 0;
if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
spin_unlock(&lockres->l_lock);
goto bail;
}
lockres->l_flags &= ~USER_LOCK_ATTACHED;
lockres->l_flags |= USER_LOCK_BUSY;
spin_unlock(&lockres->l_lock);
status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
if (status) {
user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
goto bail;
}
user_wait_on_busy_lock(lockres);
status = 0;
bail:
return status;
}
static void user_dlm_recovery_handler_noop(int node_num,
void *recovery_data)
{
/* We ignore recovery events */
return;
}
void user_dlm_set_locking_protocol(void)
{
ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
}
struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
{
int rc;
struct ocfs2_cluster_connection *conn;
rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
&user_dlm_lproto,
user_dlm_recovery_handler_noop,
NULL, &conn);
if (rc)
mlog_errno(rc);
return rc ? ERR_PTR(rc) : conn;
}
void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
{
ocfs2_cluster_disconnect(conn, 0);
}
| gpl-2.0 |
eaas-framework/virtualbox | src/VBox/Devices/PC/ipxe/src/drivers/net/ath/ath9k/ath9k_ar9003_mac.c | 59 | 18342 | /*
* Copyright (c) 2010-2011 Atheros Communications Inc.
*
* Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
* Original from Linux kernel 3.0.1
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <ipxe/io.h>
#include "hw.h"
#include "ar9003_mac.h"
static void ar9003_hw_rx_enable(struct ath_hw *hw)
{
REG_WRITE(hw, AR_CR, 0);
}
static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
{
int checksum;
checksum = ads->info + ads->link
+ ads->data0 + ads->ctl3
+ ads->data1 + ads->ctl5
+ ads->data2 + ads->ctl7
+ ads->data3 + ads->ctl9;
return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
}
static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
{
struct ar9003_txc *ads = ds;
ads->link = ds_link;
ads->ctl10 &= ~AR_TxPtrChkSum;
ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
}
static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
{
struct ar9003_txc *ads = ds;
*ds_link = &ads->link;
}
static int ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
{
u32 isr = 0;
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
u32 sync_cause = 0;
if (ah->ah_ier & AR_IER_ENABLE) {
if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
}
sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
*masked = 0;
if (!isr && !sync_cause)
return 0;
} else {
*masked = 0;
isr = REG_READ(ah, AR_ISR);
}
if (isr) {
if (isr & AR_ISR_BCNMISC) {
u32 isr2;
isr2 = REG_READ(ah, AR_ISR_S2);
mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
MAP_ISR_S2_TIM);
mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
MAP_ISR_S2_DTIM);
mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
MAP_ISR_S2_DTIMSYNC);
mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
MAP_ISR_S2_CABEND);
mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
MAP_ISR_S2_GTT);
mask2 |= ((isr2 & AR_ISR_S2_CST) <<
MAP_ISR_S2_CST);
mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
MAP_ISR_S2_TSFOOR);
mask2 |= ((isr2 & AR_ISR_S2_BB_WATCHDOG) >>
MAP_ISR_S2_BB_WATCHDOG);
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR_S2, isr2);
isr &= ~AR_ISR_BCNMISC;
}
}
if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
isr = REG_READ(ah, AR_ISR_RAC);
if (isr == 0xffffffff) {
*masked = 0;
return 0;
}
*masked = isr & ATH9K_INT_COMMON;
if (ah->config.rx_intr_mitigation)
if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
*masked |= ATH9K_INT_RXLP;
if (ah->config.tx_intr_mitigation)
if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
*masked |= ATH9K_INT_TX;
if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
*masked |= ATH9K_INT_RXLP;
if (isr & AR_ISR_HP_RXOK)
*masked |= ATH9K_INT_RXHP;
if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
*masked |= ATH9K_INT_TX;
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
u32 s0, s1;
s0 = REG_READ(ah, AR_ISR_S0);
REG_WRITE(ah, AR_ISR_S0, s0);
s1 = REG_READ(ah, AR_ISR_S1);
REG_WRITE(ah, AR_ISR_S1, s1);
isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
AR_ISR_TXEOL);
}
}
if (isr & AR_ISR_GENTMR) {
u32 s5;
if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
s5 = REG_READ(ah, AR_ISR_S5_S);
else
s5 = REG_READ(ah, AR_ISR_S5);
ah->intr_gen_timer_trigger =
MS(s5, AR_ISR_S5_GENTIMER_TRIG);
ah->intr_gen_timer_thresh =
MS(s5, AR_ISR_S5_GENTIMER_THRESH);
if (ah->intr_gen_timer_trigger)
*masked |= ATH9K_INT_GENTIMER;
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR_S5, s5);
isr &= ~AR_ISR_GENTMR;
}
}
*masked |= mask2;
if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
REG_WRITE(ah, AR_ISR, isr);
(void) REG_READ(ah, AR_ISR);
}
}
if (sync_cause) {
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
REG_WRITE(ah, AR_RC, 0);
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
DBG("ath9k: "
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
(void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
}
return 1;
}
static void ar9003_hw_fill_txdesc(struct ath_hw *ah __unused, void *ds, u32 seglen,
int is_firstseg, int is_lastseg,
const void *ds0, u32 buf_addr,
unsigned int qcu)
{
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
unsigned int descid = 0;
ads->info = (ATHEROS_VENDOR_ID << AR_DescId_S) |
(1 << AR_TxRxDesc_S) |
(1 << AR_CtrlStat_S) |
(qcu << AR_TxQcuNum_S) | 0x17;
ads->data0 = buf_addr;
ads->data1 = 0;
ads->data2 = 0;
ads->data3 = 0;
ads->ctl3 = (seglen << AR_BufLen_S);
ads->ctl3 &= AR_BufLen;
/* Fill in pointer checksum and descriptor id */
ads->ctl10 = ar9003_calc_ptr_chksum(ads);
ads->ctl10 |= (descid << AR_TxDescId_S);
if (is_firstseg) {
ads->ctl12 |= (is_lastseg ? 0 : AR_TxMore);
} else if (is_lastseg) {
ads->ctl11 = 0;
ads->ctl12 = 0;
ads->ctl13 = AR9003TXC_CONST(ds0)->ctl13;
ads->ctl14 = AR9003TXC_CONST(ds0)->ctl14;
} else {
/* XXX Intermediate descriptor in a multi-descriptor frame.*/
ads->ctl11 = 0;
ads->ctl12 = AR_TxMore;
ads->ctl13 = 0;
ads->ctl14 = 0;
}
}
static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds __unused,
struct ath_tx_status *ts)
{
struct ar9003_txs *ads;
u32 status;
ads = &ah->ts_ring[ah->ts_tail];
status = *(volatile typeof(ads->status8) *)&(ads->status8);
if ((status & AR_TxDone) == 0)
return -EINPROGRESS;
ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
(MS(ads->ds_info, AR_TxRxDesc) != 1)) {
DBG("ath9k: "
"Tx Descriptor error %x\n", ads->ds_info);
memset(ads, 0, sizeof(*ads));
return -EIO;
}
if (status & AR_TxOpExceeded)
ts->ts_status |= ATH9K_TXERR_XTXOP;
ts->ts_rateindex = MS(status, AR_FinalTxIdx);
ts->ts_seqnum = MS(status, AR_SeqNum);
ts->tid = MS(status, AR_TxTid);
ts->qid = MS(ads->ds_info, AR_TxQcuNum);
ts->desc_id = MS(ads->status1, AR_TxDescId);
ts->ts_tstamp = ads->status4;
ts->ts_status = 0;
ts->ts_flags = 0;
status = *(volatile typeof(ads->status2) *)&(ads->status2);
ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
if (status & AR_TxBaStatus) {
ts->ts_flags |= ATH9K_TX_BA;
ts->ba_low = ads->status5;
ts->ba_high = ads->status6;
}
status = *(volatile typeof(ads->status3) *)&(ads->status3);
if (status & AR_ExcessiveRetries)
ts->ts_status |= ATH9K_TXERR_XRETRY;
if (status & AR_Filtered)
ts->ts_status |= ATH9K_TXERR_FILT;
if (status & AR_FIFOUnderrun) {
ts->ts_status |= ATH9K_TXERR_FIFO;
ath9k_hw_updatetxtriglevel(ah, 1);
}
if (status & AR_TxTimerExpired)
ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
if (status & AR_DescCfgErr)
ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
if (status & AR_TxDataUnderrun) {
ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
ath9k_hw_updatetxtriglevel(ah, 1);
}
if (status & AR_TxDelimUnderrun) {
ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
ath9k_hw_updatetxtriglevel(ah, 1);
}
ts->ts_shortretry = MS(status, AR_RTSFailCnt);
ts->ts_longretry = MS(status, AR_DataFailCnt);
ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
status = *(volatile typeof(ads->status7) *)&(ads->status7);
ts->ts_rssi = MS(status, AR_TxRSSICombined);
ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
memset(ads, 0, sizeof(*ads));
return 0;
}
static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
u32 keyIx, enum ath9k_key_type keyType, u32 flags)
{
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
if (txpower > ah->txpower_limit)
txpower = ah->txpower_limit;
if (txpower > 63)
txpower = 63;
ads->ctl11 = (pktlen & AR_FrameLen)
| (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
| SM(txpower, AR_XmitPower)
| (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
| (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
| (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
ads->ctl12 =
(keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
| SM(type, AR_FrameType)
| (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
| (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
| (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
ads->ctl17 = SM(keyType, AR_EncrType) |
(flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
ads->ctl18 = 0;
ads->ctl19 = AR_Not_Sounding;
ads->ctl20 = 0;
ads->ctl21 = 0;
ads->ctl22 = 0;
}
static void ar9003_hw_set_clrdmask(struct ath_hw *ah __unused, void *ds, int val)
{
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
if (val)
ads->ctl11 |= AR_ClrDestMask;
else
ads->ctl11 &= ~AR_ClrDestMask;
}
static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah __unused, void *ds,
void *lastds,
u32 durUpdateEn, u32 rtsctsRate,
u32 rtsctsDuration __unused,
struct ath9k_11n_rate_series series[],
u32 nseries __unused, u32 flags)
{
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
struct ar9003_txc *last_ads = (struct ar9003_txc *) lastds;
uint32_t ctl11;
if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
ctl11 = ads->ctl11;
if (flags & ATH9K_TXDESC_RTSENA) {
ctl11 &= ~AR_CTSEnable;
ctl11 |= AR_RTSEnable;
} else {
ctl11 &= ~AR_RTSEnable;
ctl11 |= AR_CTSEnable;
}
ads->ctl11 = ctl11;
} else {
ads->ctl11 = (ads->ctl11 & ~(AR_RTSEnable | AR_CTSEnable));
}
ads->ctl13 = set11nTries(series, 0)
| set11nTries(series, 1)
| set11nTries(series, 2)
| set11nTries(series, 3)
| (durUpdateEn ? AR_DurUpdateEna : 0)
| SM(0, AR_BurstDur);
ads->ctl14 = set11nRate(series, 0)
| set11nRate(series, 1)
| set11nRate(series, 2)
| set11nRate(series, 3);
ads->ctl15 = set11nPktDurRTSCTS(series, 0)
| set11nPktDurRTSCTS(series, 1);
ads->ctl16 = set11nPktDurRTSCTS(series, 2)
| set11nPktDurRTSCTS(series, 3);
ads->ctl18 = set11nRateFlags(series, 0)
| set11nRateFlags(series, 1)
| set11nRateFlags(series, 2)
| set11nRateFlags(series, 3)
| SM(rtsctsRate, AR_RTSCTSRate);
ads->ctl19 = AR_Not_Sounding;
last_ads->ctl13 = ads->ctl13;
last_ads->ctl14 = ads->ctl14;
}
static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
u32 aggrLen)
{
#define FIRST_DESC_NDELIMS 60
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
if (ah->ent_mode & AR_ENT_OTP_MPSD) {
u32 ctl17, ndelim;
/*
* Add delimiter when using RTS/CTS with aggregation
* and non enterprise AR9003 card
*/
ctl17 = ads->ctl17;
ndelim = MS(ctl17, AR_PadDelim);
if (ndelim < FIRST_DESC_NDELIMS) {
aggrLen += (FIRST_DESC_NDELIMS - ndelim) * 4;
ndelim = FIRST_DESC_NDELIMS;
}
ctl17 &= ~AR_AggrLen;
ctl17 |= SM(aggrLen, AR_AggrLen);
ctl17 &= ~AR_PadDelim;
ctl17 |= SM(ndelim, AR_PadDelim);
ads->ctl17 = ctl17;
} else {
ads->ctl17 &= ~AR_AggrLen;
ads->ctl17 |= SM(aggrLen, AR_AggrLen);
}
}
static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah __unused, void *ds,
u32 numDelims)
{
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
unsigned int ctl17;
ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
/*
* We use a stack variable to manipulate ctl6 to reduce uncached
* read modify, modfiy, write.
*/
ctl17 = ads->ctl17;
ctl17 &= ~AR_PadDelim;
ctl17 |= SM(numDelims, AR_PadDelim);
ads->ctl17 = ctl17;
}
static void ar9003_hw_set11n_aggr_last(struct ath_hw *ah __unused, void *ds)
{
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
ads->ctl12 |= AR_IsAggr;
ads->ctl12 &= ~AR_MoreAggr;
ads->ctl17 &= ~AR_PadDelim;
}
static void ar9003_hw_clr11n_aggr(struct ath_hw *ah __unused, void *ds)
{
struct ar9003_txc *ads = (struct ar9003_txc *) ds;
ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
}
void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah __unused, void *ds, u8 chains)
{
struct ar9003_txc *ads = ds;
ads->ctl12 |= SM(chains, AR_PAPRDChainMask);
}
void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
{
struct ath_hw_ops *ops = ath9k_hw_ops(hw);
ops->rx_enable = ar9003_hw_rx_enable;
ops->set_desc_link = ar9003_hw_set_desc_link;
ops->get_desc_link = ar9003_hw_get_desc_link;
ops->get_isr = ar9003_hw_get_isr;
ops->fill_txdesc = ar9003_hw_fill_txdesc;
ops->proc_txdesc = ar9003_hw_proc_txdesc;
ops->set11n_txdesc = ar9003_hw_set11n_txdesc;
ops->set11n_ratescenario = ar9003_hw_set11n_ratescenario;
ops->set11n_aggr_first = ar9003_hw_set11n_aggr_first;
ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
ops->set_clrdmask = ar9003_hw_set_clrdmask;
}
void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
{
REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
}
void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
enum ath9k_rx_qtype qtype)
{
if (qtype == ATH9K_RX_QUEUE_HP)
REG_WRITE(ah, AR_HP_RXDP, rxdp);
else
REG_WRITE(ah, AR_LP_RXDP, rxdp);
}
int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah __unused, struct ath_rx_status *rxs,
void *buf_addr)
{
struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
unsigned int phyerr;
/* TODO: byte swap on big endian for ar9300_10 */
if ((rxsp->status11 & AR_RxDone) == 0)
return -EINPROGRESS;
if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
return -EINVAL;
if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
return -EINPROGRESS;
if (!rxs)
return 0;
rxs->rs_status = 0;
rxs->rs_flags = 0;
rxs->rs_datalen = rxsp->status2 & AR_DataLen;
rxs->rs_tstamp = rxsp->status3;
/* XXX: Keycache */
rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
if (rxsp->status11 & AR_RxKeyIdxValid)
rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
else
rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
rxs->evm0 = rxsp->status6;
rxs->evm1 = rxsp->status7;
rxs->evm2 = rxsp->status8;
rxs->evm3 = rxsp->status9;
rxs->evm4 = (rxsp->status10 & 0xffff);
if (rxsp->status11 & AR_PreDelimCRCErr)
rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
if (rxsp->status11 & AR_PostDelimCRCErr)
rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
if (rxsp->status11 & AR_DecryptBusyErr)
rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
if ((rxsp->status11 & AR_RxFrameOK) == 0) {
/*
* AR_CRCErr will bet set to true if we're on the last
* subframe and the AR_PostDelimCRCErr is caught.
* In a way this also gives us a guarantee that when
* (!(AR_CRCErr) && (AR_PostDelimCRCErr)) we cannot
* possibly be reviewing the last subframe. AR_CRCErr
* is the CRC of the actual data.
*/
if (rxsp->status11 & AR_CRCErr)
rxs->rs_status |= ATH9K_RXERR_CRC;
else if (rxsp->status11 & AR_PHYErr) {
phyerr = MS(rxsp->status11, AR_PHYErrCode);
/*
* If we reach a point here where AR_PostDelimCRCErr is
* true it implies we're *not* on the last subframe. In
* in that case that we know already that the CRC of
* the frame was OK, and MAC would send an ACK for that
* subframe, even if we did get a phy error of type
* ATH9K_PHYERR_OFDM_RESTART. This is only applicable
* to frame that are prior to the last subframe.
* The AR_PostDelimCRCErr is the CRC for the MPDU
* delimiter, which contains the 4 reserved bits,
* the MPDU length (12 bits), and follows the MPDU
* delimiter for an A-MPDU subframe (0x4E = 'N' ASCII).
*/
if ((phyerr == ATH9K_PHYERR_OFDM_RESTART) &&
(rxsp->status11 & AR_PostDelimCRCErr)) {
rxs->rs_phyerr = 0;
} else {
rxs->rs_status |= ATH9K_RXERR_PHY;
rxs->rs_phyerr = phyerr;
}
} else if (rxsp->status11 & AR_DecryptCRCErr)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
else if (rxsp->status11 & AR_KeyMiss)
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
}
return 0;
}
void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
{
ah->ts_tail = 0;
memset((void *) ah->ts_ring, 0,
ah->ts_size * sizeof(struct ar9003_txs));
DBG2("ath9k: "
"TS Start 0x%x End 0x%x Virt %p, Size %d\n",
ah->ts_paddr_start, ah->ts_paddr_end,
ah->ts_ring, ah->ts_size);
REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
}
void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
u32 ts_paddr_start,
u8 size)
{
ah->ts_paddr_start = ts_paddr_start;
ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
ah->ts_size = size;
ah->ts_ring = (struct ar9003_txs *) ts_start;
ath9k_hw_reset_txstatus_ring(ah);
}
| gpl-2.0 |
avareldalton85/rpi2-linux-rt | fs/gfs2/acl.c | 315 | 2488 | /*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
#include <linux/gfs2_ondisk.h>
#include "gfs2.h"
#include "incore.h"
#include "acl.h"
#include "xattr.h"
#include "glock.h"
#include "inode.h"
#include "meta_io.h"
#include "trans.h"
#include "util.h"
static const char *gfs2_acl_name(int type)
{
switch (type) {
case ACL_TYPE_ACCESS:
return GFS2_POSIX_ACL_ACCESS;
case ACL_TYPE_DEFAULT:
return GFS2_POSIX_ACL_DEFAULT;
}
return NULL;
}
struct posix_acl *gfs2_get_acl(struct inode *inode, int type)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct posix_acl *acl;
const char *name;
char *data;
int len;
if (!ip->i_eattr)
return NULL;
name = gfs2_acl_name(type);
if (name == NULL)
return ERR_PTR(-EINVAL);
len = gfs2_xattr_acl_get(ip, name, &data);
if (len < 0)
return ERR_PTR(len);
if (len == 0)
return NULL;
acl = posix_acl_from_xattr(&init_user_ns, data, len);
kfree(data);
return acl;
}
int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int error;
int len;
char *data;
const char *name = gfs2_acl_name(type);
BUG_ON(name == NULL);
if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
return -E2BIG;
if (type == ACL_TYPE_ACCESS) {
umode_t mode = inode->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error < 0)
return error;
if (error == 0)
acl = NULL;
if (mode != inode->i_mode) {
inode->i_mode = mode;
mark_inode_dirty(inode);
}
}
if (acl) {
len = posix_acl_to_xattr(&init_user_ns, acl, NULL, 0);
if (len == 0)
return 0;
data = kmalloc(len, GFP_NOFS);
if (data == NULL)
return -ENOMEM;
error = posix_acl_to_xattr(&init_user_ns, acl, data, len);
if (error < 0)
goto out;
} else {
data = NULL;
len = 0;
}
error = __gfs2_xattr_set(inode, name, data, len, 0, GFS2_EATYPE_SYS);
if (error)
goto out;
if (acl)
set_cached_acl(inode, type, acl);
else
forget_cached_acl(inode, type);
out:
kfree(data);
return error;
}
| gpl-2.0 |
ZoliN/ChuwiHBKernel | drivers/regulator/88pm800.c | 315 | 11897 | /*
* Regulators driver for Marvell 88PM800
*
* Copyright (C) 2012 Marvell International Ltd.
* Joseph(Yossi) Hanin <yhanin@marvell.com>
* Yi Zhang <yizhang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/88pm80x.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/regulator/of_regulator.h>
/* LDO1 with DVC[0..3] */
#define PM800_LDO1_VOUT (0x08) /* VOUT1 */
#define PM800_LDO1_VOUT_2 (0x09)
#define PM800_LDO1_VOUT_3 (0x0A)
#define PM800_LDO2_VOUT (0x0B)
#define PM800_LDO3_VOUT (0x0C)
#define PM800_LDO4_VOUT (0x0D)
#define PM800_LDO5_VOUT (0x0E)
#define PM800_LDO6_VOUT (0x0F)
#define PM800_LDO7_VOUT (0x10)
#define PM800_LDO8_VOUT (0x11)
#define PM800_LDO9_VOUT (0x12)
#define PM800_LDO10_VOUT (0x13)
#define PM800_LDO11_VOUT (0x14)
#define PM800_LDO12_VOUT (0x15)
#define PM800_LDO13_VOUT (0x16)
#define PM800_LDO14_VOUT (0x17)
#define PM800_LDO15_VOUT (0x18)
#define PM800_LDO16_VOUT (0x19)
#define PM800_LDO17_VOUT (0x1A)
#define PM800_LDO18_VOUT (0x1B)
#define PM800_LDO19_VOUT (0x1C)
/* BUCK1 with DVC[0..3] */
#define PM800_BUCK1 (0x3C)
#define PM800_BUCK1_1 (0x3D)
#define PM800_BUCK1_2 (0x3E)
#define PM800_BUCK1_3 (0x3F)
#define PM800_BUCK2 (0x40)
#define PM800_BUCK3 (0x41)
#define PM800_BUCK3 (0x41)
#define PM800_BUCK4 (0x42)
#define PM800_BUCK4_1 (0x43)
#define PM800_BUCK4_2 (0x44)
#define PM800_BUCK4_3 (0x45)
#define PM800_BUCK5 (0x46)
#define PM800_BUCK_ENA (0x50)
#define PM800_LDO_ENA1_1 (0x51)
#define PM800_LDO_ENA1_2 (0x52)
#define PM800_LDO_ENA1_3 (0x53)
#define PM800_LDO_ENA2_1 (0x56)
#define PM800_LDO_ENA2_2 (0x57)
#define PM800_LDO_ENA2_3 (0x58)
#define PM800_BUCK1_MISC1 (0x78)
#define PM800_BUCK3_MISC1 (0x7E)
#define PM800_BUCK4_MISC1 (0x81)
#define PM800_BUCK5_MISC1 (0x84)
struct pm800_regulator_info {
struct regulator_desc desc;
int max_ua;
};
struct pm800_regulators {
struct regulator_dev *regulators[PM800_ID_RG_MAX];
struct pm80x_chip *chip;
struct regmap *map;
};
/*
* vreg - the buck regs string.
* ereg - the string for the enable register.
* ebit - the bit number in the enable register.
* amax - the current
* Buck has 2 kinds of voltage steps. It is easy to find voltage by ranges,
* not the constant voltage table.
* n_volt - Number of available selectors
*/
#define PM800_BUCK(vreg, ereg, ebit, amax, volt_ranges, n_volt) \
{ \
.desc = { \
.name = #vreg, \
.ops = &pm800_volt_range_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM800_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = n_volt, \
.linear_ranges = volt_ranges, \
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
.vsel_reg = PM800_##vreg, \
.vsel_mask = 0x7f, \
.enable_reg = PM800_##ereg, \
.enable_mask = 1 << (ebit), \
}, \
.max_ua = (amax), \
}
/*
* vreg - the LDO regs string
* ereg - the string for the enable register.
* ebit - the bit number in the enable register.
* amax - the current
* volt_table - the LDO voltage table
* For all the LDOes, there are too many ranges. Using volt_table will be
* simpler and faster.
*/
#define PM800_LDO(vreg, ereg, ebit, amax, ldo_volt_table) \
{ \
.desc = { \
.name = #vreg, \
.ops = &pm800_volt_table_ops, \
.type = REGULATOR_VOLTAGE, \
.id = PM800_ID_##vreg, \
.owner = THIS_MODULE, \
.n_voltages = ARRAY_SIZE(ldo_volt_table), \
.vsel_reg = PM800_##vreg##_VOUT, \
.vsel_mask = 0x1f, \
.enable_reg = PM800_##ereg, \
.enable_mask = 1 << (ebit), \
.volt_table = ldo_volt_table, \
}, \
.max_ua = (amax), \
}
/* Ranges are sorted in ascending order. */
static const struct regulator_linear_range buck1_volt_range[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x54, 50000),
};
/* BUCK 2~5 have same ranges. */
static const struct regulator_linear_range buck2_5_volt_range[] = {
REGULATOR_LINEAR_RANGE(600000, 0, 0x4f, 12500),
REGULATOR_LINEAR_RANGE(1600000, 0x50, 0x72, 50000),
};
static const unsigned int ldo1_volt_table[] = {
600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000,
1000000, 1050000, 1100000, 1150000, 1200000, 1300000, 1400000, 1500000,
};
static const unsigned int ldo2_volt_table[] = {
1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000,
};
/* LDO 3~17 have same voltage table. */
static const unsigned int ldo3_17_volt_table[] = {
1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000,
2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
};
/* LDO 18~19 have same voltage table. */
static const unsigned int ldo18_19_volt_table[] = {
1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000,
};
static int pm800_get_current_limit(struct regulator_dev *rdev)
{
struct pm800_regulator_info *info = rdev_get_drvdata(rdev);
return info->max_ua;
}
static struct regulator_ops pm800_volt_range_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_current_limit = pm800_get_current_limit,
};
static struct regulator_ops pm800_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.get_current_limit = pm800_get_current_limit,
};
/* The array is indexed by id(PM800_ID_XXX) */
static struct pm800_regulator_info pm800_regulator_info[] = {
PM800_BUCK(BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
PM800_BUCK(BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
PM800_BUCK(BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
PM800_BUCK(BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
PM800_BUCK(BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
PM800_LDO(LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
PM800_LDO(LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
PM800_LDO(LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
PM800_LDO(LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
PM800_LDO(LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
PM800_LDO(LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
PM800_LDO(LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
PM800_LDO(LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
PM800_LDO(LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
PM800_LDO(LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
PM800_LDO(LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
PM800_LDO(LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
PM800_LDO(LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
PM800_LDO(LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
PM800_LDO(LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
PM800_LDO(LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
PM800_LDO(LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
PM800_LDO(LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
PM800_LDO(LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
};
#define PM800_REGULATOR_OF_MATCH(_name, _id) \
[PM800_ID_##_id] = { \
.name = #_name, \
.driver_data = &pm800_regulator_info[PM800_ID_##_id], \
}
static struct of_regulator_match pm800_regulator_matches[] = {
PM800_REGULATOR_OF_MATCH(buck1, BUCK1),
PM800_REGULATOR_OF_MATCH(buck2, BUCK2),
PM800_REGULATOR_OF_MATCH(buck3, BUCK3),
PM800_REGULATOR_OF_MATCH(buck4, BUCK4),
PM800_REGULATOR_OF_MATCH(buck5, BUCK5),
PM800_REGULATOR_OF_MATCH(ldo1, LDO1),
PM800_REGULATOR_OF_MATCH(ldo2, LDO2),
PM800_REGULATOR_OF_MATCH(ldo3, LDO3),
PM800_REGULATOR_OF_MATCH(ldo4, LDO4),
PM800_REGULATOR_OF_MATCH(ldo5, LDO5),
PM800_REGULATOR_OF_MATCH(ldo6, LDO6),
PM800_REGULATOR_OF_MATCH(ldo7, LDO7),
PM800_REGULATOR_OF_MATCH(ldo8, LDO8),
PM800_REGULATOR_OF_MATCH(ldo9, LDO9),
PM800_REGULATOR_OF_MATCH(ldo10, LDO10),
PM800_REGULATOR_OF_MATCH(ldo11, LDO11),
PM800_REGULATOR_OF_MATCH(ldo12, LDO12),
PM800_REGULATOR_OF_MATCH(ldo13, LDO13),
PM800_REGULATOR_OF_MATCH(ldo14, LDO14),
PM800_REGULATOR_OF_MATCH(ldo15, LDO15),
PM800_REGULATOR_OF_MATCH(ldo16, LDO16),
PM800_REGULATOR_OF_MATCH(ldo17, LDO17),
PM800_REGULATOR_OF_MATCH(ldo18, LDO18),
PM800_REGULATOR_OF_MATCH(ldo19, LDO19),
};
static int pm800_regulator_dt_init(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int ret;
ret = of_regulator_match(&pdev->dev, np,
pm800_regulator_matches,
ARRAY_SIZE(pm800_regulator_matches));
if (ret < 0)
return ret;
return 0;
}
static int pm800_regulator_probe(struct platform_device *pdev)
{
struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
struct pm800_regulators *pm800_data;
struct pm800_regulator_info *info;
struct regulator_config config = { };
struct regulator_init_data *init_data;
int i, ret;
if (!pdata || pdata->num_regulators == 0) {
if (IS_ENABLED(CONFIG_OF)) {
ret = pm800_regulator_dt_init(pdev);
if (ret)
return ret;
} else {
return -ENODEV;
}
} else if (pdata->num_regulators) {
unsigned int count = 0;
/* Check whether num_regulator is valid. */
for (i = 0; i < ARRAY_SIZE(pdata->regulators); i++) {
if (pdata->regulators[i])
count++;
}
if (count != pdata->num_regulators)
return -EINVAL;
} else {
return -EINVAL;
}
pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
GFP_KERNEL);
if (!pm800_data) {
dev_err(&pdev->dev, "Failed to allocate pm800_regualtors");
return -ENOMEM;
}
pm800_data->map = chip->subchip->regmap_power;
pm800_data->chip = chip;
platform_set_drvdata(pdev, pm800_data);
for (i = 0; i < PM800_ID_RG_MAX; i++) {
if (!pdata || pdata->num_regulators == 0)
init_data = pm800_regulator_matches[i].init_data;
else
init_data = pdata->regulators[i];
if (!init_data)
continue;
info = pm800_regulator_matches[i].driver_data;
config.dev = &pdev->dev;
config.init_data = init_data;
config.driver_data = info;
config.regmap = pm800_data->map;
config.of_node = pm800_regulator_matches[i].of_node;
pm800_data->regulators[i] =
regulator_register(&info->desc, &config);
if (IS_ERR(pm800_data->regulators[i])) {
ret = PTR_ERR(pm800_data->regulators[i]);
dev_err(&pdev->dev, "Failed to register %s\n",
info->desc.name);
while (--i >= 0)
regulator_unregister(pm800_data->regulators[i]);
return ret;
}
}
return 0;
}
static int pm800_regulator_remove(struct platform_device *pdev)
{
struct pm800_regulators *pm800_data = platform_get_drvdata(pdev);
int i;
for (i = 0; i < PM800_ID_RG_MAX; i++)
regulator_unregister(pm800_data->regulators[i]);
return 0;
}
static struct platform_driver pm800_regulator_driver = {
.driver = {
.name = "88pm80x-regulator",
.owner = THIS_MODULE,
},
.probe = pm800_regulator_probe,
.remove = pm800_regulator_remove,
};
module_platform_driver(pm800_regulator_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Joseph(Yossi) Hanin <yhanin@marvell.com>");
MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM800 PMIC");
MODULE_ALIAS("platform:88pm800-regulator");
| gpl-2.0 |
xIchigox/ArkCORE-NG | dep/acelite/ace/Intrusive_List.cpp | 571 | 2184 | // $Id: Intrusive_List.cpp 92069 2010-09-28 11:38:59Z johnnyw $
#ifndef ACE_INTRUSIVE_LIST_CPP
#define ACE_INTRUSIVE_LIST_CPP
#include "ace/Intrusive_List.h"
#if !defined (ACE_LACKS_PRAGMA_ONCE)
# pragma once
#endif /* ACE_LACKS_PRAGMA_ONCE */
#if !defined (__ACE_INLINE__)
#include "ace/Intrusive_List.inl"
#endif /* __ACE_INLINE__ */
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
template <class T>
ACE_Intrusive_List<T>::ACE_Intrusive_List (void)
: head_ (0)
, tail_ (0)
{
}
template<class T>
ACE_Intrusive_List<T>::~ACE_Intrusive_List (void)
{
}
template<class T> void
ACE_Intrusive_List<T>::push_back (T *node)
{
if (this->tail_ == 0)
{
this->tail_ = node;
this->head_ = node;
node->next (0);
node->prev (0);
}
else
{
this->tail_->next (node);
node->prev (this->tail_);
node->next (0);
this->tail_ = node;
}
}
template<class T> void
ACE_Intrusive_List<T>::push_front (T *node)
{
if (this->head_ == 0)
{
this->tail_ = node;
this->head_ = node;
node->next (0);
node->prev (0);
}
else
{
this->head_->prev (node);
node->next (this->head_);
node->prev (0);
this->head_ = node;
}
}
template<class T> T *
ACE_Intrusive_List<T>::pop_front (void)
{
T *node = this->head_;
if (node != 0)
{
this->unsafe_remove (node);
}
return node;
}
template<class T> T *
ACE_Intrusive_List<T>::pop_back (void)
{
T *node = this->tail_;
if (node != 0)
{
this->unsafe_remove (node);
}
return node;
}
template<class T> void
ACE_Intrusive_List<T>::remove (T *node)
{
for (T *i = this->head_; i != 0; i = i->next ())
{
if (node == i)
{
this->unsafe_remove (node);
return;
}
}
}
template<class T> void
ACE_Intrusive_List<T>::unsafe_remove (T *node)
{
if (node->prev () != 0)
node->prev ()->next (node->next ());
else
this->head_ = node->next ();
if (node->next () != 0)
node->next ()->prev (node->prev ());
else
this->tail_ = node->prev ();
node->next (0);
node->prev (0);
}
ACE_END_VERSIONED_NAMESPACE_DECL
#endif /* ACE_INTRUSIVE_LIST_CPP */
| gpl-2.0 |
gbiyer/Sony-Aosp-Kernel | drivers/tty/serial/atmel_serial.c | 1595 | 48112 | /*
* Driver for Atmel AT91 / AT32 Serial ports
* Copyright (C) 2003 Rick Bronson
*
* Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* DMA support added by Chip Coldwell.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/atmel_pdc.h>
#include <linux/atmel_serial.h>
#include <linux/uaccess.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/atmel.h>
#include <asm/io.h>
#include <asm/ioctls.h>
#ifdef CONFIG_ARM
#include <mach/cpu.h>
#include <asm/gpio.h>
#endif
#define PDC_BUFFER_SIZE 512
/* Revisit: We should calculate this based on the actual port settings */
#define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */
#if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/serial_core.h>
static void atmel_start_rx(struct uart_port *port);
static void atmel_stop_rx(struct uart_port *port);
#ifdef CONFIG_SERIAL_ATMEL_TTYAT
/* Use device name ttyAT, major 204 and minor 154-169. This is necessary if we
* should coexist with the 8250 driver, such as if we have an external 16C550
* UART. */
#define SERIAL_ATMEL_MAJOR 204
#define MINOR_START 154
#define ATMEL_DEVICENAME "ttyAT"
#else
/* Use device name ttyS, major 4, minor 64-68. This is the usual serial port
* name, but it is legally reserved for the 8250 driver. */
#define SERIAL_ATMEL_MAJOR TTY_MAJOR
#define MINOR_START 64
#define ATMEL_DEVICENAME "ttyS"
#endif
#define ATMEL_ISR_PASS_LIMIT 256
/* UART registers. CR is write-only, hence no GET macro */
#define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR)
#define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR)
#define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR)
#define UART_PUT_IER(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IER)
#define UART_PUT_IDR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_IDR)
#define UART_GET_IMR(port) __raw_readl((port)->membase + ATMEL_US_IMR)
#define UART_GET_CSR(port) __raw_readl((port)->membase + ATMEL_US_CSR)
#define UART_GET_CHAR(port) __raw_readl((port)->membase + ATMEL_US_RHR)
#define UART_PUT_CHAR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_THR)
#define UART_GET_BRGR(port) __raw_readl((port)->membase + ATMEL_US_BRGR)
#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
#define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR)
#define UART_PUT_RPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
#define UART_GET_RPR(port) __raw_readl((port)->membase + ATMEL_PDC_RPR)
#define UART_PUT_RCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RCR)
#define UART_PUT_RNPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNPR)
#define UART_PUT_RNCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_RNCR)
#define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR)
#define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
#define UART_GET_TCR(port) __raw_readl((port)->membase + ATMEL_PDC_TCR)
static int (*atmel_open_hook)(struct uart_port *);
static void (*atmel_close_hook)(struct uart_port *);
struct atmel_dma_buffer {
unsigned char *buf;
dma_addr_t dma_addr;
unsigned int dma_size;
unsigned int ofs;
};
struct atmel_uart_char {
u16 status;
u16 ch;
};
#define ATMEL_SERIAL_RINGSIZE 1024
/*
* We wrap our port structure around the generic uart_port.
*/
struct atmel_uart_port {
struct uart_port uart; /* uart */
struct clk *clk; /* uart clock */
int may_wakeup; /* cached value of device_may_wakeup for times we need to disable it */
u32 backup_imr; /* IMR saved during suspend */
int break_active; /* break being received */
short use_dma_rx; /* enable PDC receiver */
short pdc_rx_idx; /* current PDC RX buffer */
struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
short use_dma_tx; /* enable PDC transmitter */
struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
struct tasklet_struct tasklet;
unsigned int irq_status;
unsigned int irq_status_prev;
struct circ_buf rx_ring;
struct serial_rs485 rs485; /* rs485 settings */
unsigned int tx_done_mask;
};
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART);
#ifdef SUPPORT_SYSRQ
static struct console atmel_console;
#endif
#if defined(CONFIG_OF)
static const struct of_device_id atmel_serial_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-usart" },
{ .compatible = "atmel,at91sam9260-usart" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_serial_dt_ids);
#endif
static inline struct atmel_uart_port *
to_atmel_uart_port(struct uart_port *uart)
{
return container_of(uart, struct atmel_uart_port, uart);
}
#ifdef CONFIG_SERIAL_ATMEL_PDC
static bool atmel_use_dma_rx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
return atmel_port->use_dma_rx;
}
static bool atmel_use_dma_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
return atmel_port->use_dma_tx;
}
#else
static bool atmel_use_dma_rx(struct uart_port *port)
{
return false;
}
static bool atmel_use_dma_tx(struct uart_port *port)
{
return false;
}
#endif
/* Enable or disable the rs485 support */
void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int mode;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
mode = UART_GET_MR(port);
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
atmel_port->rs485 = *rs485conf;
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
if ((rs485conf->delay_rts_after_send) > 0)
UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
if (atmel_use_dma_tx(port))
atmel_port->tx_done_mask = ATMEL_US_ENDTX |
ATMEL_US_TXBUFE;
else
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
}
UART_PUT_MR(port, mode);
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
spin_unlock_irqrestore(&port->lock, flags);
}
/*
* Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
*/
static u_int atmel_tx_empty(struct uart_port *port)
{
return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0;
}
/*
* Set state of the modem control output lines
*/
static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
{
unsigned int control = 0;
unsigned int mode;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
#ifdef CONFIG_ARCH_AT91RM9200
if (cpu_is_at91rm9200()) {
/*
* AT91RM9200 Errata #39: RTS0 is not internally connected
* to PA21. We need to drive the pin manually.
*/
if (port->mapbase == AT91RM9200_BASE_US0) {
if (mctrl & TIOCM_RTS)
at91_set_gpio_value(AT91_PIN_PA21, 0);
else
at91_set_gpio_value(AT91_PIN_PA21, 1);
}
}
#endif
if (mctrl & TIOCM_RTS)
control |= ATMEL_US_RTSEN;
else
control |= ATMEL_US_RTSDIS;
if (mctrl & TIOCM_DTR)
control |= ATMEL_US_DTREN;
else
control |= ATMEL_US_DTRDIS;
UART_PUT_CR(port, control);
/* Local loopback mode? */
mode = UART_GET_MR(port) & ~ATMEL_US_CHMODE;
if (mctrl & TIOCM_LOOP)
mode |= ATMEL_US_CHMODE_LOC_LOOP;
else
mode |= ATMEL_US_CHMODE_NORMAL;
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
}
UART_PUT_MR(port, mode);
}
/*
* Get state of the modem control input lines
*/
static u_int atmel_get_mctrl(struct uart_port *port)
{
unsigned int status, ret = 0;
status = UART_GET_CSR(port);
/*
* The control signals are active low.
*/
if (!(status & ATMEL_US_DCD))
ret |= TIOCM_CD;
if (!(status & ATMEL_US_CTS))
ret |= TIOCM_CTS;
if (!(status & ATMEL_US_DSR))
ret |= TIOCM_DSR;
if (!(status & ATMEL_US_RI))
ret |= TIOCM_RI;
return ret;
}
/*
* Stop transmitting.
*/
static void atmel_stop_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
}
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
!(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_start_rx(port);
}
/*
* Start transmitting.
*/
static void atmel_start_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
/* The transmitter is already running. Yes, we
really need this.*/
return;
if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
!(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX))
atmel_stop_rx(port);
/* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
}
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
}
/*
* start receiving - port is in process of being opened.
*/
static void atmel_start_rx(struct uart_port *port)
{
UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
UART_PUT_CR(port, ATMEL_US_RXEN);
if (atmel_use_dma_rx(port)) {
/* enable PDC controller */
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
port->read_status_mask);
UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
} else {
UART_PUT_IER(port, ATMEL_US_RXRDY);
}
}
/*
* Stop receiving - port is in process of being closed.
*/
static void atmel_stop_rx(struct uart_port *port)
{
UART_PUT_CR(port, ATMEL_US_RXDIS);
if (atmel_use_dma_rx(port)) {
/* disable PDC receive */
UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
port->read_status_mask);
} else {
UART_PUT_IDR(port, ATMEL_US_RXRDY);
}
}
/*
* Enable modem status interrupts
*/
static void atmel_enable_ms(struct uart_port *port)
{
UART_PUT_IER(port, ATMEL_US_RIIC | ATMEL_US_DSRIC
| ATMEL_US_DCDIC | ATMEL_US_CTSIC);
}
/*
* Control the transmission of a break signal
*/
static void atmel_break_ctl(struct uart_port *port, int break_state)
{
if (break_state != 0)
UART_PUT_CR(port, ATMEL_US_STTBRK); /* start break */
else
UART_PUT_CR(port, ATMEL_US_STPBRK); /* stop break */
}
/*
* Stores the incoming character in the ring buffer
*/
static void
atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
unsigned int ch)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *ring = &atmel_port->rx_ring;
struct atmel_uart_char *c;
if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
/* Buffer overflow, ignore char */
return;
c = &((struct atmel_uart_char *)ring->buf)[ring->head];
c->status = status;
c->ch = ch;
/* Make sure the character is stored before we update head. */
smp_wmb();
ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
}
/*
* Deal with parity, framing and overrun errors.
*/
static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
{
/* clear error */
UART_PUT_CR(port, ATMEL_US_RSTSTA);
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
}
/*
* Characters received (called from interrupt handler)
*/
static void atmel_rx_chars(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, ch;
status = UART_GET_CSR(port);
while (status & ATMEL_US_RXRDY) {
ch = UART_GET_CHAR(port);
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK)
|| atmel_port->break_active)) {
/* clear error */
UART_PUT_CR(port, ATMEL_US_RSTSTA);
if (status & ATMEL_US_RXBRK
&& !atmel_port->break_active) {
atmel_port->break_active = 1;
UART_PUT_IER(port, ATMEL_US_RXBRK);
} else {
/*
* This is either the end-of-break
* condition or we've received at
* least one character without RXBRK
* being set. In both cases, the next
* RXBRK will indicate start-of-break.
*/
UART_PUT_IDR(port, ATMEL_US_RXBRK);
status &= ~ATMEL_US_RXBRK;
atmel_port->break_active = 0;
}
}
atmel_buffer_rx_char(port, status, ch);
status = UART_GET_CSR(port);
}
tasklet_schedule(&atmel_port->tasklet);
}
/*
* Transmit characters (called from tasklet with TXRDY interrupt
* disabled)
*/
static void atmel_tx_chars(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, port->x_char);
port->icount.tx++;
port->x_char = 0;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (!uart_circ_empty(xmit))
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
}
/*
* receive interrupt handler.
*/
static void
atmel_handle_receive(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_rx(port)) {
/*
* PDC receive. Just schedule the tasklet and let it
* figure out the details.
*
* TODO: We're not handling error flags correctly at
* the moment.
*/
if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
UART_PUT_IDR(port, (ATMEL_US_ENDRX
| ATMEL_US_TIMEOUT));
tasklet_schedule(&atmel_port->tasklet);
}
if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
ATMEL_US_FRAME | ATMEL_US_PARE))
atmel_pdc_rxerr(port, pending);
}
/* Interrupt receive */
if (pending & ATMEL_US_RXRDY)
atmel_rx_chars(port);
else if (pending & ATMEL_US_RXBRK) {
/*
* End of break detected. If it came along with a
* character, atmel_rx_chars will handle it.
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA);
UART_PUT_IDR(port, ATMEL_US_RXBRK);
atmel_port->break_active = 0;
}
}
/*
* transmit interrupt handler. (Transmit is IRQF_NODELAY safe)
*/
static void
atmel_handle_transmit(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & atmel_port->tx_done_mask) {
/* Either PDC or interrupt transmission */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
tasklet_schedule(&atmel_port->tasklet);
}
}
/*
* status flags interrupt handler.
*/
static void
atmel_handle_status(struct uart_port *port, unsigned int pending,
unsigned int status)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
| ATMEL_US_CTSIC)) {
atmel_port->irq_status = status;
tasklet_schedule(&atmel_port->tasklet);
}
}
/*
* Interrupt handler
*/
static irqreturn_t atmel_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
unsigned int status, pending, pass_counter = 0;
do {
status = UART_GET_CSR(port);
pending = status & UART_GET_IMR(port);
if (!pending)
break;
atmel_handle_receive(port, pending);
atmel_handle_status(port, pending, status);
atmel_handle_transmit(port, pending);
} while (pass_counter++ < ATMEL_ISR_PASS_LIMIT);
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
}
/*
* Called from tasklet with ENDTX and TXBUFE interrupts disabled.
*/
static void atmel_tx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *xmit = &port->state->xmit;
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
int count;
/* nothing left to transmit? */
if (UART_GET_TCR(port))
return;
xmit->tail += pdc->ofs;
xmit->tail &= UART_XMIT_SIZE - 1;
port->icount.tx += pdc->ofs;
pdc->ofs = 0;
/* more to transmit - setup next transfer */
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
dma_sync_single_for_device(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
pdc->ofs = count;
UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
UART_PUT_TCR(port, count);
/* re-enable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
} else {
if ((atmel_port->rs485.flags & SER_RS485_ENABLED) &&
!(atmel_port->rs485.flags & SER_RS485_RX_DURING_TX)) {
/* DMA done, stop TX, start RX for RS485 */
atmel_start_rx(port);
}
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
static void atmel_rx_from_ring(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *ring = &atmel_port->rx_ring;
unsigned int flg;
unsigned int status;
while (ring->head != ring->tail) {
struct atmel_uart_char c;
/* Make sure c is loaded after head. */
smp_rmb();
c = ((struct atmel_uart_char *)ring->buf)[ring->tail];
ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1);
port->icount.rx++;
status = c.status;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME
| ATMEL_US_OVRE | ATMEL_US_RXBRK))) {
if (status & ATMEL_US_RXBRK) {
/* ignore side-effect */
status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME);
port->icount.brk++;
if (uart_handle_break(port))
continue;
}
if (status & ATMEL_US_PARE)
port->icount.parity++;
if (status & ATMEL_US_FRAME)
port->icount.frame++;
if (status & ATMEL_US_OVRE)
port->icount.overrun++;
status &= port->read_status_mask;
if (status & ATMEL_US_RXBRK)
flg = TTY_BREAK;
else if (status & ATMEL_US_PARE)
flg = TTY_PARITY;
else if (status & ATMEL_US_FRAME)
flg = TTY_FRAME;
}
if (uart_handle_sysrq_char(port, c.ch))
continue;
uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg);
}
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
spin_lock(&port->lock);
}
static void atmel_rx_from_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_port *tport = &port->state->port;
struct atmel_dma_buffer *pdc;
int rx_idx = atmel_port->pdc_rx_idx;
unsigned int head;
unsigned int tail;
unsigned int count;
do {
/* Reset the UART timeout early so that we don't miss one */
UART_PUT_CR(port, ATMEL_US_STTTO);
pdc = &atmel_port->pdc_rx[rx_idx];
head = UART_GET_RPR(port) - pdc->dma_addr;
tail = pdc->ofs;
/* If the PDC has switched buffers, RPR won't contain
* any address within the current buffer. Since head
* is unsigned, we just need a one-way comparison to
* find out.
*
* In this case, we just need to consume the entire
* buffer and resubmit it for DMA. This will clear the
* ENDRX bit as well, so that we can safely re-enable
* all interrupts below.
*/
head = min(head, pdc->dma_size);
if (likely(head != tail)) {
dma_sync_single_for_cpu(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
/*
* head will only wrap around when we recycle
* the DMA buffer, and when that happens, we
* explicitly set tail to 0. So head will
* always be greater than tail.
*/
count = head - tail;
tty_insert_flip_string(tport, pdc->buf + pdc->ofs,
count);
dma_sync_single_for_device(port->dev, pdc->dma_addr,
pdc->dma_size, DMA_FROM_DEVICE);
port->icount.rx += count;
pdc->ofs = head;
}
/*
* If the current buffer is full, we need to check if
* the next one contains any additional data.
*/
if (head >= pdc->dma_size) {
pdc->ofs = 0;
UART_PUT_RNPR(port, pdc->dma_addr);
UART_PUT_RNCR(port, pdc->dma_size);
rx_idx = !rx_idx;
atmel_port->pdc_rx_idx = rx_idx;
}
} while (head >= pdc->dma_size);
/*
* Drop the lock here since it might end up calling
* uart_start(), which takes the lock.
*/
spin_unlock(&port->lock);
tty_flip_buffer_push(tport);
spin_lock(&port->lock);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
}
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
static void atmel_tasklet_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status;
unsigned int status_change;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
if (atmel_use_dma_tx(port))
atmel_tx_dma(port);
else
atmel_tx_chars(port);
status = atmel_port->irq_status;
status_change = status ^ atmel_port->irq_status_prev;
if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
| ATMEL_US_DCD | ATMEL_US_CTS)) {
/* TODO: All reads to CSR will clear these interrupts! */
if (status_change & ATMEL_US_RI)
port->icount.rng++;
if (status_change & ATMEL_US_DSR)
port->icount.dsr++;
if (status_change & ATMEL_US_DCD)
uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
if (status_change & ATMEL_US_CTS)
uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
wake_up_interruptible(&port->state->port.delta_msr_wait);
atmel_port->irq_status_prev = status;
}
if (atmel_use_dma_rx(port))
atmel_rx_from_dma(port);
else
atmel_rx_from_ring(port);
spin_unlock(&port->lock);
}
/*
* Perform initialization and enable port for reception
*/
static int atmel_startup(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_struct *tty = port->state->port.tty;
int retval;
/*
* Ensure that no interrupts are enabled otherwise when
* request_irq() is called we could get stuck trying to
* handle an unexpected interrupt
*/
UART_PUT_IDR(port, -1);
/*
* Allocate the IRQ
*/
retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED,
tty ? tty->name : "atmel_serial", port);
if (retval) {
printk("atmel_serial: atmel_startup - Can't get irq\n");
return retval;
}
/*
* Initialize DMA (if necessary)
*/
if (atmel_use_dma_rx(port)) {
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
if (pdc->buf == NULL) {
if (i != 0) {
dma_unmap_single(port->dev,
atmel_port->pdc_rx[0].dma_addr,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
kfree(atmel_port->pdc_rx[0].buf);
}
free_irq(port->irq, port);
return -ENOMEM;
}
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
PDC_BUFFER_SIZE,
DMA_FROM_DEVICE);
pdc->dma_size = PDC_BUFFER_SIZE;
pdc->ofs = 0;
}
atmel_port->pdc_rx_idx = 0;
UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
UART_PUT_RCR(port, PDC_BUFFER_SIZE);
UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
}
if (atmel_use_dma_tx(port)) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
struct circ_buf *xmit = &port->state->xmit;
pdc->buf = xmit->buf;
pdc->dma_addr = dma_map_single(port->dev,
pdc->buf,
UART_XMIT_SIZE,
DMA_TO_DEVICE);
pdc->dma_size = UART_XMIT_SIZE;
pdc->ofs = 0;
}
/*
* If there is a specific "open" function (to register
* control line interrupts)
*/
if (atmel_open_hook) {
retval = atmel_open_hook(port);
if (retval) {
free_irq(port->irq, port);
return retval;
}
}
/* Save current CSR for comparison in atmel_tasklet_func() */
atmel_port->irq_status_prev = UART_GET_CSR(port);
atmel_port->irq_status = atmel_port->irq_status_prev;
/*
* Finally, enable the serial port
*/
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
/* enable xmit & rcvr */
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
if (atmel_use_dma_rx(port)) {
/* set UART timeout */
UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
UART_PUT_CR(port, ATMEL_US_STTTO);
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
/* enable PDC controller */
UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
} else {
/* enable receive only */
UART_PUT_IER(port, ATMEL_US_RXRDY);
}
return 0;
}
/*
* Disable the port
*/
static void atmel_shutdown(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/*
* Clear out any scheduled tasklets before
* we destroy the buffers
*/
tasklet_kill(&atmel_port->tasklet);
/*
* Ensure everything is stopped and
* disable all interrupts, port and break condition.
*/
atmel_stop_rx(port);
atmel_stop_tx(port);
UART_PUT_CR(port, ATMEL_US_RSTSTA);
UART_PUT_IDR(port, -1);
/*
* Shut-down the DMA.
*/
if (atmel_use_dma_rx(port)) {
int i;
for (i = 0; i < 2; i++) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_FROM_DEVICE);
kfree(pdc->buf);
}
}
if (atmel_use_dma_tx(port)) {
struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
dma_unmap_single(port->dev,
pdc->dma_addr,
pdc->dma_size,
DMA_TO_DEVICE);
}
/*
* Free the interrupt
*/
free_irq(port->irq, port);
/*
* If there is a specific "close" function (to unregister
* control line interrupts)
*/
if (atmel_close_hook)
atmel_close_hook(port);
}
/*
* Flush any TX data submitted for DMA. Called when the TX circular
* buffer is reset.
*/
static void atmel_flush_buffer(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_use_dma_tx(port)) {
UART_PUT_TCR(port, 0);
atmel_port->pdc_tx.ofs = 0;
}
}
/*
* Power / Clock management.
*/
static void atmel_serial_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
switch (state) {
case 0:
/*
* Enable the peripheral clock for this serial port.
* This is called on uart_open() or a resume event.
*/
clk_enable(atmel_port->clk);
/* re-enable interrupts if we disabled some on suspend */
UART_PUT_IER(port, atmel_port->backup_imr);
break;
case 3:
/* Back up the interrupt mask and disable all interrupts */
atmel_port->backup_imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
/*
* Disable the peripheral clock for this serial port.
* This is called on uart_close() or a suspend event.
*/
clk_disable(atmel_port->clk);
break;
default:
printk(KERN_ERR "atmel_serial: unknown pm %d\n", state);
}
}
/*
* Change the port parameters
*/
static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
unsigned long flags;
unsigned int mode, imr, quot, baud;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
/* Get current mode register */
mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL
| ATMEL_US_NBSTOP | ATMEL_US_PAR
| ATMEL_US_USMODE);
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
quot = uart_get_divisor(port, baud);
if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */
quot /= 8;
mode |= ATMEL_US_USCLKS_MCK_DIV8;
}
/* byte size */
switch (termios->c_cflag & CSIZE) {
case CS5:
mode |= ATMEL_US_CHRL_5;
break;
case CS6:
mode |= ATMEL_US_CHRL_6;
break;
case CS7:
mode |= ATMEL_US_CHRL_7;
break;
default:
mode |= ATMEL_US_CHRL_8;
break;
}
/* stop bits */
if (termios->c_cflag & CSTOPB)
mode |= ATMEL_US_NBSTOP_2;
/* parity */
if (termios->c_cflag & PARENB) {
/* Mark or Space parity */
if (termios->c_cflag & CMSPAR) {
if (termios->c_cflag & PARODD)
mode |= ATMEL_US_PAR_MARK;
else
mode |= ATMEL_US_PAR_SPACE;
} else if (termios->c_cflag & PARODD)
mode |= ATMEL_US_PAR_ODD;
else
mode |= ATMEL_US_PAR_EVEN;
} else
mode |= ATMEL_US_PAR_NONE;
/* hardware handshake (RTS/CTS) */
if (termios->c_cflag & CRTSCTS)
mode |= ATMEL_US_USMODE_HWHS;
else
mode |= ATMEL_US_USMODE_NORMAL;
spin_lock_irqsave(&port->lock, flags);
port->read_status_mask = ATMEL_US_OVRE;
if (termios->c_iflag & INPCK)
port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= ATMEL_US_RXBRK;
if (atmel_use_dma_rx(port))
/* need to enable error interrupts */
UART_PUT_IER(port, port->read_status_mask);
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= ATMEL_US_RXBRK;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= ATMEL_US_OVRE;
}
/* TODO: Ignore all characters if CREAD is set.*/
/* update the per-port timeout */
uart_update_timeout(port, termios->c_cflag, baud);
/*
* save/disable interrupts. The tty layer will ensure that the
* transmitter is empty if requested by the caller, so there's
* no need to wait for it here.
*/
imr = UART_GET_IMR(port);
UART_PUT_IDR(port, -1);
/* disable receiver and transmitter */
UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
/* Resetting serial mode to RS232 (0x0) */
mode &= ~ATMEL_US_USMODE;
if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if ((atmel_port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port,
atmel_port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
}
/* set the parity, stop bits and data size */
UART_PUT_MR(port, mode);
/* set the baud rate */
UART_PUT_BRGR(port, quot);
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
/* restore interrupts */
UART_PUT_IER(port, imr);
/* CTS flow-control and modem-status interrupts */
if (UART_ENABLE_MS(port, termios->c_cflag))
port->ops->enable_ms(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void atmel_set_ldisc(struct uart_port *port, int new)
{
if (new == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
atmel_enable_ms(port);
} else {
port->flags &= ~UPF_HARDPPS_CD;
}
}
/*
* Return string describing the specified port
*/
static const char *atmel_type(struct uart_port *port)
{
return (port->type == PORT_ATMEL) ? "ATMEL_SERIAL" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void atmel_release_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
int size = pdev->resource[0].end - pdev->resource[0].start + 1;
release_mem_region(port->mapbase, size);
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int atmel_request_port(struct uart_port *port)
{
struct platform_device *pdev = to_platform_device(port->dev);
int size = pdev->resource[0].end - pdev->resource[0].start + 1;
if (!request_mem_region(port->mapbase, size, "atmel_serial"))
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (port->membase == NULL) {
release_mem_region(port->mapbase, size);
return -ENOMEM;
}
}
return 0;
}
/*
* Configure/autoconfigure the port.
*/
static void atmel_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_ATMEL;
atmel_request_port(port);
}
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
*/
static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_ATMEL)
ret = -EINVAL;
if (port->irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != SERIAL_IO_MEM)
ret = -EINVAL;
if (port->uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)port->mapbase != ser->iomem_base)
ret = -EINVAL;
if (port->iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
#ifdef CONFIG_CONSOLE_POLL
static int atmel_poll_get_char(struct uart_port *port)
{
while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
cpu_relax();
return UART_GET_CHAR(port);
}
static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
{
while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
cpu_relax();
UART_PUT_CHAR(port, ch);
}
#endif
static int
atmel_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
{
struct serial_rs485 rs485conf;
switch (cmd) {
case TIOCSRS485:
if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
sizeof(rs485conf)))
return -EFAULT;
atmel_config_rs485(port, &rs485conf);
break;
case TIOCGRS485:
if (copy_to_user((struct serial_rs485 *) arg,
&(to_atmel_uart_port(port)->rs485),
sizeof(rs485conf)))
return -EFAULT;
break;
default:
return -ENOIOCTLCMD;
}
return 0;
}
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
.get_mctrl = atmel_get_mctrl,
.stop_tx = atmel_stop_tx,
.start_tx = atmel_start_tx,
.stop_rx = atmel_stop_rx,
.enable_ms = atmel_enable_ms,
.break_ctl = atmel_break_ctl,
.startup = atmel_startup,
.shutdown = atmel_shutdown,
.flush_buffer = atmel_flush_buffer,
.set_termios = atmel_set_termios,
.set_ldisc = atmel_set_ldisc,
.type = atmel_type,
.release_port = atmel_release_port,
.request_port = atmel_request_port,
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
.ioctl = atmel_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_get_char = atmel_poll_get_char,
.poll_put_char = atmel_poll_put_char,
#endif
};
static void atmel_of_init_port(struct atmel_uart_port *atmel_port,
struct device_node *np)
{
u32 rs485_delay[2];
/* DMA/PDC usage specification */
if (of_get_property(np, "atmel,use-dma-rx", NULL))
atmel_port->use_dma_rx = 1;
else
atmel_port->use_dma_rx = 0;
if (of_get_property(np, "atmel,use-dma-tx", NULL))
atmel_port->use_dma_tx = 1;
else
atmel_port->use_dma_tx = 0;
/* rs485 properties */
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
struct serial_rs485 *rs485conf = &atmel_port->rs485;
rs485conf->delay_rts_before_send = rs485_delay[0];
rs485conf->delay_rts_after_send = rs485_delay[1];
rs485conf->flags = 0;
if (of_get_property(np, "rs485-rx-during-tx", NULL))
rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (of_get_property(np, "linux,rs485-enabled-at-boot-time", NULL))
rs485conf->flags |= SER_RS485_ENABLED;
}
}
/*
* Configure the port from the platform device resource info.
*/
static void atmel_init_port(struct atmel_uart_port *atmel_port,
struct platform_device *pdev)
{
struct uart_port *port = &atmel_port->uart;
struct atmel_uart_data *pdata = pdev->dev.platform_data;
if (pdev->dev.of_node) {
atmel_of_init_port(atmel_port, pdev->dev.of_node);
} else {
atmel_port->use_dma_rx = pdata->use_dma_rx;
atmel_port->use_dma_tx = pdata->use_dma_tx;
atmel_port->rs485 = pdata->rs485;
}
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &atmel_pops;
port->fifosize = 1;
port->dev = &pdev->dev;
port->mapbase = pdev->resource[0].start;
port->irq = pdev->resource[1].start;
tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
(unsigned long)port);
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
if (pdata && pdata->regs) {
/* Already mapped by setup code */
port->membase = pdata->regs;
} else {
port->flags |= UPF_IOREMAP;
port->membase = NULL;
}
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
atmel_port->clk = clk_get(&pdev->dev, "usart");
clk_enable(atmel_port->clk);
port->uartclk = clk_get_rate(atmel_port->clk);
clk_disable(atmel_port->clk);
/* only enable clock when USART is in use */
}
/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
else if (atmel_use_dma_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
} else {
atmel_port->tx_done_mask = ATMEL_US_TXRDY;
}
}
struct platform_device *atmel_default_console_device; /* the serial console device */
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
static void atmel_console_putchar(struct uart_port *port, int ch)
{
while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
cpu_relax();
UART_PUT_CHAR(port, ch);
}
/*
* Interrupts are disabled on entering
*/
static void atmel_console_write(struct console *co, const char *s, u_int count)
{
struct uart_port *port = &atmel_ports[co->index].uart;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status, imr;
unsigned int pdc_tx;
/*
* First, save IMR and then disable interrupts
*/
imr = UART_GET_IMR(port);
UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
/* Store PDC transmit status and disable it */
pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
uart_console_write(port, s, count, atmel_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore IMR
*/
do {
status = UART_GET_CSR(port);
} while (!(status & ATMEL_US_TXRDY));
/* Restore PDC transmit status */
if (pdc_tx)
UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
/* set interrupts back the way they were */
UART_PUT_IER(port, imr);
}
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init atmel_console_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
unsigned int mr, quot;
/*
* If the baud rate generator isn't running, the port wasn't
* initialized by the boot loader.
*/
quot = UART_GET_BRGR(port) & ATMEL_US_CD;
if (!quot)
return;
mr = UART_GET_MR(port) & ATMEL_US_CHRL;
if (mr == ATMEL_US_CHRL_8)
*bits = 8;
else
*bits = 7;
mr = UART_GET_MR(port) & ATMEL_US_PAR;
if (mr == ATMEL_US_PAR_EVEN)
*parity = 'e';
else if (mr == ATMEL_US_PAR_ODD)
*parity = 'o';
/*
* The serial core only rounds down when matching this to a
* supported baud rate. Make sure we don't end up slightly
* lower than one of those, as it would make us fall through
* to a much lower baud rate than we really want.
*/
*baud = port->uartclk / (16 * (quot - 1));
}
static int __init atmel_console_setup(struct console *co, char *options)
{
struct uart_port *port = &atmel_ports[co->index].uart;
int baud = 115200;
int bits = 8;
int parity = 'n';
int flow = 'n';
if (port->membase == NULL) {
/* Port not initialized yet - delay setup */
return -ENODEV;
}
clk_enable(atmel_ports[co->index].clk);
UART_PUT_IDR(port, -1);
UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
atmel_console_get_options(port, &baud, &parity, &bits);
return uart_set_options(port, co, baud, parity, bits, flow);
}
static struct uart_driver atmel_uart;
static struct console atmel_console = {
.name = ATMEL_DEVICENAME,
.write = atmel_console_write,
.device = uart_console_device,
.setup = atmel_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &atmel_uart,
};
#define ATMEL_CONSOLE_DEVICE (&atmel_console)
/*
* Early console initialization (before VM subsystem initialized).
*/
static int __init atmel_console_init(void)
{
if (atmel_default_console_device) {
struct atmel_uart_data *pdata =
atmel_default_console_device->dev.platform_data;
int id = pdata->num;
struct atmel_uart_port *port = &atmel_ports[id];
port->backup_imr = 0;
port->uart.line = id;
add_preferred_console(ATMEL_DEVICENAME, id, NULL);
atmel_init_port(port, atmel_default_console_device);
register_console(&atmel_console);
}
return 0;
}
console_initcall(atmel_console_init);
/*
* Late console initialization.
*/
static int __init atmel_late_console_init(void)
{
if (atmel_default_console_device
&& !(atmel_console.flags & CON_ENABLED))
register_console(&atmel_console);
return 0;
}
core_initcall(atmel_late_console_init);
static inline bool atmel_is_console_port(struct uart_port *port)
{
return port->cons && port->cons->index == port->line;
}
#else
#define ATMEL_CONSOLE_DEVICE NULL
static inline bool atmel_is_console_port(struct uart_port *port)
{
return false;
}
#endif
static struct uart_driver atmel_uart = {
.owner = THIS_MODULE,
.driver_name = "atmel_serial",
.dev_name = ATMEL_DEVICENAME,
.major = SERIAL_ATMEL_MAJOR,
.minor = MINOR_START,
.nr = ATMEL_MAX_UART,
.cons = ATMEL_CONSOLE_DEVICE,
};
#ifdef CONFIG_PM
static bool atmel_serial_clk_will_stop(void)
{
#ifdef CONFIG_ARCH_AT91
return at91_suspend_entering_slow_clock();
#else
return false;
#endif
}
static int atmel_serial_suspend(struct platform_device *pdev,
pm_message_t state)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
if (atmel_is_console_port(port) && console_suspend_enabled) {
/* Drain the TX shifter */
while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
cpu_relax();
}
/* we can not wake up if we're running on slow clock */
atmel_port->may_wakeup = device_may_wakeup(&pdev->dev);
if (atmel_serial_clk_will_stop())
device_set_wakeup_enable(&pdev->dev, 0);
uart_suspend_port(&atmel_uart, port);
return 0;
}
static int atmel_serial_resume(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
uart_resume_port(&atmel_uart, port);
device_set_wakeup_enable(&pdev->dev, atmel_port->may_wakeup);
return 0;
}
#else
#define atmel_serial_suspend NULL
#define atmel_serial_resume NULL
#endif
static int atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
struct device_node *np = pdev->dev.of_node;
struct atmel_uart_data *pdata = pdev->dev.platform_data;
void *data;
int ret = -ENODEV;
struct pinctrl *pinctrl;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
if (np)
ret = of_alias_get_id(np, "serial");
else
if (pdata)
ret = pdata->num;
if (ret < 0)
/* port id not found in platform data nor device-tree aliases:
* auto-enumerate it */
ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART);
if (ret >= ATMEL_MAX_UART) {
ret = -ENODEV;
goto err;
}
if (test_and_set_bit(ret, atmel_ports_in_use)) {
/* port already in use */
ret = -EBUSY;
goto err;
}
port = &atmel_ports[ret];
port->backup_imr = 0;
port->uart.line = ret;
atmel_init_port(port, pdev);
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
goto err;
}
if (!atmel_use_dma_rx(&port->uart)) {
ret = -ENOMEM;
data = kmalloc(sizeof(struct atmel_uart_char)
* ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
if (!data)
goto err_alloc_ring;
port->rx_ring.buf = data;
}
ret = uart_add_one_port(&atmel_uart, &port->uart);
if (ret)
goto err_add_port;
#ifdef CONFIG_SERIAL_ATMEL_CONSOLE
if (atmel_is_console_port(&port->uart)
&& ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
/*
* The serial core enabled the clock for us, so undo
* the clk_enable() in atmel_console_setup()
*/
clk_disable(port->clk);
}
#endif
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, port);
if (port->rs485.flags & SER_RS485_ENABLED) {
UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
}
return 0;
err_add_port:
kfree(port->rx_ring.buf);
port->rx_ring.buf = NULL;
err_alloc_ring:
if (!atmel_is_console_port(&port->uart)) {
clk_put(port->clk);
port->clk = NULL;
}
err:
return ret;
}
static int atmel_serial_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
int ret = 0;
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
ret = uart_remove_one_port(&atmel_uart, port);
tasklet_kill(&atmel_port->tasklet);
kfree(atmel_port->rx_ring.buf);
/* "port" is allocated statically, so we shouldn't free it */
clear_bit(port->line, atmel_ports_in_use);
clk_put(atmel_port->clk);
return ret;
}
static struct platform_driver atmel_serial_driver = {
.probe = atmel_serial_probe,
.remove = atmel_serial_remove,
.suspend = atmel_serial_suspend,
.resume = atmel_serial_resume,
.driver = {
.name = "atmel_usart",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
},
};
static int __init atmel_serial_init(void)
{
int ret;
ret = uart_register_driver(&atmel_uart);
if (ret)
return ret;
ret = platform_driver_register(&atmel_serial_driver);
if (ret)
uart_unregister_driver(&atmel_uart);
return ret;
}
static void __exit atmel_serial_exit(void)
{
platform_driver_unregister(&atmel_serial_driver);
uart_unregister_driver(&atmel_uart);
}
module_init(atmel_serial_init);
module_exit(atmel_serial_exit);
MODULE_AUTHOR("Rick Bronson");
MODULE_DESCRIPTION("Atmel AT91 / AT32 serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel_usart");
| gpl-2.0 |
andr7e/android_kernel_elephone_p6000 | kernel/arch/arm/mach-orion5x/ts409-setup.c | 2107 | 8339 | /*
* QNAP TS-409 Board Setup
*
* Maintainer: Sylver Bruneau <sylver.bruneau@gmail.com>
*
* Copyright (C) 2008 Sylver Bruneau <sylver.bruneau@gmail.com>
* Copyright (C) 2008 Martin Michlmayr <tbm@cyrius.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/mtd/physmap.h>
#include <linux/mv643xx_eth.h>
#include <linux/leds.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/serial_reg.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/pci.h>
#include <mach/orion5x.h>
#include "common.h"
#include "mpp.h"
#include "tsx09-common.h"
/*****************************************************************************
* QNAP TS-409 Info
****************************************************************************/
/*
* QNAP TS-409 hardware :
* - Marvell 88F5281-D0
* - Marvell 88SX7042 SATA controller (PCIe)
* - Marvell 88E1118 Gigabit Ethernet PHY
* - RTC S35390A (@0x30) on I2C bus
* - 8MB NOR flash
* - 256MB of DDR-2 RAM
*/
/*
* 8MB NOR flash Device bus boot chip select
*/
#define QNAP_TS409_NOR_BOOT_BASE 0xff800000
#define QNAP_TS409_NOR_BOOT_SIZE SZ_8M
/****************************************************************************
* 8MiB NOR flash. The struct mtd_partition is not in the same order as the
* partitions on the device because we want to keep compatibility with
* existing QNAP firmware.
*
* Layout as used by QNAP:
* [2] 0x00000000-0x00200000 : "Kernel"
* [3] 0x00200000-0x00600000 : "RootFS1"
* [4] 0x00600000-0x00700000 : "RootFS2"
* [6] 0x00700000-0x00760000 : "NAS Config" (read-only)
* [5] 0x00760000-0x00780000 : "U-Boot Config"
* [1] 0x00780000-0x00800000 : "U-Boot" (read-only)
***************************************************************************/
static struct mtd_partition qnap_ts409_partitions[] = {
{
.name = "U-Boot",
.size = 0x00080000,
.offset = 0x00780000,
.mask_flags = MTD_WRITEABLE,
}, {
.name = "Kernel",
.size = 0x00200000,
.offset = 0,
}, {
.name = "RootFS1",
.size = 0x00400000,
.offset = 0x00200000,
}, {
.name = "RootFS2",
.size = 0x00100000,
.offset = 0x00600000,
}, {
.name = "U-Boot Config",
.size = 0x00020000,
.offset = 0x00760000,
}, {
.name = "NAS Config",
.size = 0x00060000,
.offset = 0x00700000,
.mask_flags = MTD_WRITEABLE,
},
};
static struct physmap_flash_data qnap_ts409_nor_flash_data = {
.width = 1,
.parts = qnap_ts409_partitions,
.nr_parts = ARRAY_SIZE(qnap_ts409_partitions)
};
static struct resource qnap_ts409_nor_flash_resource = {
.flags = IORESOURCE_MEM,
.start = QNAP_TS409_NOR_BOOT_BASE,
.end = QNAP_TS409_NOR_BOOT_BASE + QNAP_TS409_NOR_BOOT_SIZE - 1,
};
static struct platform_device qnap_ts409_nor_flash = {
.name = "physmap-flash",
.id = 0,
.dev = { .platform_data = &qnap_ts409_nor_flash_data, },
.num_resources = 1,
.resource = &qnap_ts409_nor_flash_resource,
};
/*****************************************************************************
* PCI
****************************************************************************/
static int __init qnap_ts409_pci_map_irq(const struct pci_dev *dev, u8 slot,
u8 pin)
{
int irq;
/*
* Check for devices with hard-wired IRQs.
*/
irq = orion5x_pci_map_irq(dev, slot, pin);
if (irq != -1)
return irq;
/*
* PCI isn't used on the TS-409
*/
return -1;
}
static struct hw_pci qnap_ts409_pci __initdata = {
.nr_controllers = 2,
.setup = orion5x_pci_sys_setup,
.scan = orion5x_pci_sys_scan_bus,
.map_irq = qnap_ts409_pci_map_irq,
};
static int __init qnap_ts409_pci_init(void)
{
if (machine_is_ts409())
pci_common_init(&qnap_ts409_pci);
return 0;
}
subsys_initcall(qnap_ts409_pci_init);
/*****************************************************************************
* RTC S35390A on I2C bus
****************************************************************************/
#define TS409_RTC_GPIO 10
static struct i2c_board_info __initdata qnap_ts409_i2c_rtc = {
I2C_BOARD_INFO("s35390a", 0x30),
};
/*****************************************************************************
* LEDs attached to GPIO
****************************************************************************/
static struct gpio_led ts409_led_pins[] = {
{
.name = "ts409:red:sata1",
.gpio = 4,
.active_low = 1,
}, {
.name = "ts409:red:sata2",
.gpio = 5,
.active_low = 1,
}, {
.name = "ts409:red:sata3",
.gpio = 6,
.active_low = 1,
}, {
.name = "ts409:red:sata4",
.gpio = 7,
.active_low = 1,
},
};
static struct gpio_led_platform_data ts409_led_data = {
.leds = ts409_led_pins,
.num_leds = ARRAY_SIZE(ts409_led_pins),
};
static struct platform_device ts409_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &ts409_led_data,
},
};
/****************************************************************************
* GPIO Attached Keys
* Power button is attached to the PIC microcontroller
****************************************************************************/
#define QNAP_TS409_GPIO_KEY_RESET 14
#define QNAP_TS409_GPIO_KEY_MEDIA 15
static struct gpio_keys_button qnap_ts409_buttons[] = {
{
.code = KEY_RESTART,
.gpio = QNAP_TS409_GPIO_KEY_RESET,
.desc = "Reset Button",
.active_low = 1,
}, {
.code = KEY_COPY,
.gpio = QNAP_TS409_GPIO_KEY_MEDIA,
.desc = "USB Copy Button",
.active_low = 1,
},
};
static struct gpio_keys_platform_data qnap_ts409_button_data = {
.buttons = qnap_ts409_buttons,
.nbuttons = ARRAY_SIZE(qnap_ts409_buttons),
};
static struct platform_device qnap_ts409_button_device = {
.name = "gpio-keys",
.id = -1,
.num_resources = 0,
.dev = {
.platform_data = &qnap_ts409_button_data,
},
};
/*****************************************************************************
* General Setup
****************************************************************************/
static unsigned int ts409_mpp_modes[] __initdata = {
MPP0_UNUSED,
MPP1_UNUSED,
MPP2_UNUSED,
MPP3_UNUSED,
MPP4_GPIO, /* HDD 1 status */
MPP5_GPIO, /* HDD 2 status */
MPP6_GPIO, /* HDD 3 status */
MPP7_GPIO, /* HDD 4 status */
MPP8_UNUSED,
MPP9_UNUSED,
MPP10_GPIO, /* RTC int */
MPP11_UNUSED,
MPP12_UNUSED,
MPP13_UNUSED,
MPP14_GPIO, /* SW_RST */
MPP15_GPIO, /* USB copy button */
MPP16_UART, /* UART1 RXD */
MPP17_UART, /* UART1 TXD */
MPP18_UNUSED,
MPP19_UNUSED,
0,
};
static void __init qnap_ts409_init(void)
{
/*
* Setup basic Orion functions. Need to be called early.
*/
orion5x_init();
orion5x_mpp_conf(ts409_mpp_modes);
/*
* Configure peripherals.
*/
mvebu_mbus_add_window("devbus-boot", QNAP_TS409_NOR_BOOT_BASE,
QNAP_TS409_NOR_BOOT_SIZE);
platform_device_register(&qnap_ts409_nor_flash);
orion5x_ehci0_init();
qnap_tsx09_find_mac_addr(QNAP_TS409_NOR_BOOT_BASE +
qnap_ts409_partitions[5].offset,
qnap_ts409_partitions[5].size);
orion5x_eth_init(&qnap_tsx09_eth_data);
orion5x_i2c_init();
orion5x_uart0_init();
orion5x_uart1_init();
platform_device_register(&qnap_ts409_button_device);
/* Get RTC IRQ and register the chip */
if (gpio_request(TS409_RTC_GPIO, "rtc") == 0) {
if (gpio_direction_input(TS409_RTC_GPIO) == 0)
qnap_ts409_i2c_rtc.irq = gpio_to_irq(TS409_RTC_GPIO);
else
gpio_free(TS409_RTC_GPIO);
}
if (qnap_ts409_i2c_rtc.irq == 0)
pr_warning("qnap_ts409_init: failed to get RTC IRQ\n");
i2c_register_board_info(0, &qnap_ts409_i2c_rtc, 1);
platform_device_register(&ts409_leds);
/* register tsx09 specific power-off method */
pm_power_off = qnap_tsx09_power_off;
}
MACHINE_START(TS409, "QNAP TS-409")
/* Maintainer: Sylver Bruneau <sylver.bruneau@gmail.com> */
.atag_offset = 0x100,
.init_machine = qnap_ts409_init,
.map_io = orion5x_map_io,
.init_early = orion5x_init_early,
.init_irq = orion5x_init_irq,
.init_time = orion5x_timer_init,
.fixup = tag_fixup_mem32,
.restart = orion5x_restart,
MACHINE_END
| gpl-2.0 |
jduhamel/linux | drivers/block/paride/pf.c | 2619 | 24889 | /*
pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
This is the high-level driver for parallel port ATAPI disk
drives based on chips supported by the paride module.
By default, the driver will autoprobe for a single parallel
port ATAPI disk drive, but if their individual parameters are
specified, the driver can handle up to 4 drives.
The behaviour of the pf driver can be altered by setting
some parameters from the insmod command line. The following
parameters are adjustable:
drive0 These four arguments can be arrays of
drive1 1-7 integers as follows:
drive2
drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly>
Where,
<prt> is the base of the parallel port address for
the corresponding drive. (required)
<pro> is the protocol number for the adapter that
supports this drive. These numbers are
logged by 'paride' when the protocol modules
are initialised. (0 if not given)
<uni> for those adapters that support chained
devices, this is the unit selector for the
chain of devices on the given port. It should
be zero for devices that don't support chaining.
(0 if not given)
<mod> this can be -1 to choose the best mode, or one
of the mode numbers supported by the adapter.
(-1 if not given)
<slv> ATAPI CDroms can be jumpered to master or slave.
Set this to 0 to choose the master drive, 1 to
choose the slave, -1 (the default) to choose the
first drive found.
<lun> Some ATAPI devices support multiple LUNs.
One example is the ATAPI PD/CD drive from
Matshita/Panasonic. This device has a
CD drive on LUN 0 and a PD drive on LUN 1.
By default, the driver will search for the
first LUN with a supported device. Set
this parameter to force it to use a specific
LUN. (default -1)
<dly> some parallel ports require the driver to
go more slowly. -1 sets a default value that
should work with the chosen protocol. Otherwise,
set this to a small integer, the larger it is
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
major You may use this parameter to overide the
default major number (47) that this driver
will use. Be sure to change the device
name as well.
name This parameter is a character string that
contains the name the kernel will use for this
device (in /proc output, for instance).
(default "pf").
cluster The driver will attempt to aggregate requests
for adjacent blocks into larger multi-block
clusters. The maximum cluster size (in 512
byte sectors) is set with this parameter.
(default 64)
verbose This parameter controls the amount of logging
that the driver will do. Set it to 0 for
normal operation, 1 to see autoprobe progress
messages, or 2 to see additional debugging
output. (default 0)
nice This parameter controls the driver's use of
idle CPU time, at the expense of some speed.
If this driver is built into the kernel, you can use the
following command line parameters, with the same values
as the corresponding module parameters listed above:
pf.drive0
pf.drive1
pf.drive2
pf.drive3
pf.cluster
pf.nice
In addition, you can use the parameter pf.disable to disable
the driver entirely.
*/
/* Changes:
1.01 GRG 1998.05.03 Changes for SMP. Eliminate sti().
Fix for drives that don't clear STAT_ERR
until after next CDB delivered.
Small change in pf_completion to round
up transfer size.
1.02 GRG 1998.06.16 Eliminated an Ugh
1.03 GRG 1998.08.16 Use HZ in loop timings, extra debugging
1.04 GRG 1998.09.24 Added jumbo support
*/
#define PF_VERSION "1.04"
#define PF_MAJOR 47
#define PF_NAME "pf"
#define PF_UNITS 4
#include <linux/types.h>
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is off
by default.
*/
static bool verbose = 0;
static int major = PF_MAJOR;
static char *name = PF_NAME;
static int cluster = 64;
static int nice = 0;
static int disable = 0;
static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 };
static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 };
static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 };
static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 };
static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3};
static int pf_drive_count;
enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
/* end of parameters */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
static DEFINE_MUTEX(pf_mutex);
static DEFINE_SPINLOCK(pf_spin_lock);
module_param(verbose, bool, 0644);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(cluster, int, 0);
module_param(nice, int, 0);
module_param_array(drive0, int, NULL, 0);
module_param_array(drive1, int, NULL, 0);
module_param_array(drive2, int, NULL, 0);
module_param_array(drive3, int, NULL, 0);
#include "paride.h"
#include "pseudo.h"
/* constants for faking geometry numbers */
#define PF_FD_MAX 8192 /* use FD geometry under this size */
#define PF_FD_HDS 2
#define PF_FD_SPT 18
#define PF_HD_HDS 64
#define PF_HD_SPT 32
#define PF_MAX_RETRIES 5
#define PF_TMO 800 /* interrupt timeout in jiffies */
#define PF_SPIN_DEL 50 /* spin delay in micro-seconds */
#define PF_SPIN (1000000*PF_TMO)/(HZ*PF_SPIN_DEL)
#define STAT_ERR 0x00001
#define STAT_INDEX 0x00002
#define STAT_ECC 0x00004
#define STAT_DRQ 0x00008
#define STAT_SEEK 0x00010
#define STAT_WRERR 0x00020
#define STAT_READY 0x00040
#define STAT_BUSY 0x00080
#define ATAPI_REQ_SENSE 0x03
#define ATAPI_LOCK 0x1e
#define ATAPI_DOOR 0x1b
#define ATAPI_MODE_SENSE 0x5a
#define ATAPI_CAPACITY 0x25
#define ATAPI_IDENTIFY 0x12
#define ATAPI_READ_10 0x28
#define ATAPI_WRITE_10 0x2a
static int pf_open(struct block_device *bdev, fmode_t mode);
static void do_pf_request(struct request_queue * q);
static int pf_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static void pf_release(struct gendisk *disk, fmode_t mode);
static int pf_detect(void);
static void do_pf_read(void);
static void do_pf_read_start(void);
static void do_pf_write(void);
static void do_pf_write_start(void);
static void do_pf_read_drq(void);
static void do_pf_write_done(void);
#define PF_NM 0
#define PF_RO 1
#define PF_RW 2
#define PF_NAMELEN 8
struct pf_unit {
struct pi_adapter pia; /* interface to paride layer */
struct pi_adapter *pi;
int removable; /* removable media device ? */
int media_status; /* media present ? WP ? */
int drive; /* drive */
int lun;
int access; /* count of active opens ... */
int present; /* device present ? */
char name[PF_NAMELEN]; /* pf0, pf1, ... */
struct gendisk *disk;
};
static struct pf_unit units[PF_UNITS];
static int pf_identify(struct pf_unit *pf);
static void pf_lock(struct pf_unit *pf, int func);
static void pf_eject(struct pf_unit *pf);
static unsigned int pf_check_events(struct gendisk *disk,
unsigned int clearing);
static char pf_scratch[512]; /* scratch block buffer */
/* the variables below are used mainly in the I/O request engine, which
processes only one request at a time.
*/
static int pf_retries = 0; /* i/o error retry count */
static int pf_busy = 0; /* request being processed ? */
static struct request *pf_req; /* current request */
static int pf_block; /* address of next requested block */
static int pf_count; /* number of blocks still to do */
static int pf_run; /* sectors in current cluster */
static int pf_cmd; /* current command READ/WRITE */
static struct pf_unit *pf_current;/* unit of current request */
static int pf_mask; /* stopper for pseudo-int */
static char *pf_buf; /* buffer for request in progress */
/* kernel glue structures */
static const struct block_device_operations pf_fops = {
.owner = THIS_MODULE,
.open = pf_open,
.release = pf_release,
.ioctl = pf_ioctl,
.getgeo = pf_getgeo,
.check_events = pf_check_events,
};
static void __init pf_init_units(void)
{
struct pf_unit *pf;
int unit;
pf_drive_count = 0;
for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
struct gendisk *disk = alloc_disk(1);
if (!disk)
continue;
pf->disk = disk;
pf->pi = &pf->pia;
pf->media_status = PF_NM;
pf->drive = (*drives[unit])[D_SLV];
pf->lun = (*drives[unit])[D_LUN];
snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
disk->major = major;
disk->first_minor = unit;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
}
static int pf_open(struct block_device *bdev, fmode_t mode)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
int ret;
mutex_lock(&pf_mutex);
pf_identify(pf);
ret = -ENODEV;
if (pf->media_status == PF_NM)
goto out;
ret = -EROFS;
if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
goto out;
ret = 0;
pf->access++;
if (pf->removable)
pf_lock(pf, 1);
out:
mutex_unlock(&pf_mutex);
return ret;
}
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
sector_t capacity = get_capacity(pf->disk);
if (capacity < PF_FD_MAX) {
geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
geo->heads = PF_FD_HDS;
geo->sectors = PF_FD_SPT;
} else {
geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
geo->heads = PF_HD_HDS;
geo->sectors = PF_HD_SPT;
}
return 0;
}
static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
if (cmd != CDROMEJECT)
return -EINVAL;
if (pf->access != 1)
return -EBUSY;
mutex_lock(&pf_mutex);
pf_eject(pf);
mutex_unlock(&pf_mutex);
return 0;
}
static void pf_release(struct gendisk *disk, fmode_t mode)
{
struct pf_unit *pf = disk->private_data;
mutex_lock(&pf_mutex);
if (pf->access <= 0) {
mutex_unlock(&pf_mutex);
WARN_ON(1);
return;
}
pf->access--;
if (!pf->access && pf->removable)
pf_lock(pf, 0);
mutex_unlock(&pf_mutex);
}
static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
{
return DISK_EVENT_MEDIA_CHANGE;
}
static inline int status_reg(struct pf_unit *pf)
{
return pi_read_regr(pf->pi, 1, 6);
}
static inline int read_reg(struct pf_unit *pf, int reg)
{
return pi_read_regr(pf->pi, 0, reg);
}
static inline void write_reg(struct pf_unit *pf, int reg, int val)
{
pi_write_regr(pf->pi, 0, reg, val);
}
static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
{
int j, r, e, s, p;
j = 0;
while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop))))
&& (j++ < PF_SPIN))
udelay(PF_SPIN_DEL);
if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) {
s = read_reg(pf, 7);
e = read_reg(pf, 1);
p = read_reg(pf, 2);
if (j > PF_SPIN)
e |= 0x100;
if (fun)
printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
" loop=%d phase=%d\n",
pf->name, fun, msg, r, s, e, j, p);
return (e << 8) + s;
}
return 0;
}
static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun)
{
pi_connect(pf->pi);
write_reg(pf, 6, 0xa0+0x10*pf->drive);
if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
pi_disconnect(pf->pi);
return -1;
}
write_reg(pf, 4, dlen % 256);
write_reg(pf, 5, dlen / 256);
write_reg(pf, 7, 0xa0); /* ATAPI packet command */
if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
pi_disconnect(pf->pi);
return -1;
}
if (read_reg(pf, 2) != 1) {
printk("%s: %s: command phase error\n", pf->name, fun);
pi_disconnect(pf->pi);
return -1;
}
pi_write_block(pf->pi, cmd, 12);
return 0;
}
static int pf_completion(struct pf_unit *pf, char *buf, char *fun)
{
int r, s, n;
r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
fun, "completion");
if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) {
n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) +
3) & 0xfffc);
pi_read_block(pf->pi, buf, n);
}
s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
pi_disconnect(pf->pi);
return (r ? r : s);
}
static void pf_req_sense(struct pf_unit *pf, int quiet)
{
char rs_cmd[12] =
{ ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
char buf[16];
int r;
r = pf_command(pf, rs_cmd, 16, "Request sense");
mdelay(1);
if (!r)
pf_completion(pf, buf, "Request sense");
if ((!r) && (!quiet))
printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
pf->name, buf[2] & 0xf, buf[12], buf[13]);
}
static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun)
{
int r;
r = pf_command(pf, cmd, dlen, fun);
mdelay(1);
if (!r)
r = pf_completion(pf, buf, fun);
if (r)
pf_req_sense(pf, !fun);
return r;
}
static void pf_lock(struct pf_unit *pf, int func)
{
char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock");
}
static void pf_eject(struct pf_unit *pf)
{
char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
pf_lock(pf, 0);
pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject");
}
#define PF_RESET_TMO 30 /* in tenths of a second */
static void pf_sleep(int cs)
{
schedule_timeout_interruptible(cs);
}
/* the ATAPI standard actually specifies the contents of all 7 registers
after a reset, but the specification is ambiguous concerning the last
two bytes, and different drives interpret the standard differently.
*/
static int pf_reset(struct pf_unit *pf)
{
int i, k, flg;
int expect[5] = { 1, 1, 1, 0x14, 0xeb };
pi_connect(pf->pi);
write_reg(pf, 6, 0xa0+0x10*pf->drive);
write_reg(pf, 7, 8);
pf_sleep(20 * HZ / 1000);
k = 0;
while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY))
pf_sleep(HZ / 10);
flg = 1;
for (i = 0; i < 5; i++)
flg &= (read_reg(pf, i + 1) == expect[i]);
if (verbose) {
printk("%s: Reset (%d) signature = ", pf->name, k);
for (i = 0; i < 5; i++)
printk("%3x", read_reg(pf, i + 1));
if (!flg)
printk(" (incorrect)");
printk("\n");
}
pi_disconnect(pf->pi);
return flg - 1;
}
static void pf_mode_sense(struct pf_unit *pf)
{
char ms_cmd[12] =
{ ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
char buf[8];
pf_atapi(pf, ms_cmd, 8, buf, "mode sense");
pf->media_status = PF_RW;
if (buf[3] & 0x80)
pf->media_status = PF_RO;
}
static void xs(char *buf, char *targ, int offs, int len)
{
int j, k, l;
j = 0;
l = 0;
for (k = 0; k < len; k++)
if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
l = targ[j++] = buf[k + offs];
if (l == 0x20)
j--;
targ[j] = 0;
}
static int xl(char *buf, int offs)
{
int v, k;
v = 0;
for (k = 0; k < 4; k++)
v = v * 256 + (buf[k + offs] & 0xff);
return v;
}
static void pf_get_capacity(struct pf_unit *pf)
{
char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
char buf[8];
int bs;
if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) {
pf->media_status = PF_NM;
return;
}
set_capacity(pf->disk, xl(buf, 0) + 1);
bs = xl(buf, 4);
if (bs != 512) {
set_capacity(pf->disk, 0);
if (verbose)
printk("%s: Drive %d, LUN %d,"
" unsupported block size %d\n",
pf->name, pf->drive, pf->lun, bs);
}
}
static int pf_identify(struct pf_unit *pf)
{
int dt, s;
char *ms[2] = { "master", "slave" };
char mf[10], id[18];
char id_cmd[12] =
{ ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
char buf[36];
s = pf_atapi(pf, id_cmd, 36, buf, "identify");
if (s)
return -1;
dt = buf[0] & 0x1f;
if ((dt != 0) && (dt != 7)) {
if (verbose)
printk("%s: Drive %d, LUN %d, unsupported type %d\n",
pf->name, pf->drive, pf->lun, dt);
return -1;
}
xs(buf, mf, 8, 8);
xs(buf, id, 16, 16);
pf->removable = (buf[1] & 0x80);
pf_mode_sense(pf);
pf_mode_sense(pf);
pf_mode_sense(pf);
pf_get_capacity(pf);
printk("%s: %s %s, %s LUN %d, type %d",
pf->name, mf, id, ms[pf->drive], pf->lun, dt);
if (pf->removable)
printk(", removable");
if (pf->media_status == PF_NM)
printk(", no media\n");
else {
if (pf->media_status == PF_RO)
printk(", RO");
printk(", %llu blocks\n",
(unsigned long long)get_capacity(pf->disk));
}
return 0;
}
/* returns 0, with id set if drive is detected
-1, if drive detection failed
*/
static int pf_probe(struct pf_unit *pf)
{
if (pf->drive == -1) {
for (pf->drive = 0; pf->drive <= 1; pf->drive++)
if (!pf_reset(pf)) {
if (pf->lun != -1)
return pf_identify(pf);
else
for (pf->lun = 0; pf->lun < 8; pf->lun++)
if (!pf_identify(pf))
return 0;
}
} else {
if (pf_reset(pf))
return -1;
if (pf->lun != -1)
return pf_identify(pf);
for (pf->lun = 0; pf->lun < 8; pf->lun++)
if (!pf_identify(pf))
return 0;
}
return -1;
}
static int pf_detect(void)
{
struct pf_unit *pf = units;
int k, unit;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PF_VERSION, major, cluster, nice);
k = 0;
if (pf_drive_count == 0) {
if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
verbose, pf->name)) {
if (!pf_probe(pf) && pf->disk) {
pf->present = 1;
k++;
} else
pi_release(pf->pi);
}
} else
for (unit = 0; unit < PF_UNITS; unit++, pf++) {
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
pf_scratch, PI_PF, verbose, pf->name)) {
if (pf->disk && !pf_probe(pf)) {
pf->present = 1;
k++;
} else
pi_release(pf->pi);
}
}
if (k)
return 0;
printk("%s: No ATAPI disk detected\n", name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
put_disk(pf->disk);
return -1;
}
/* The i/o request engine */
static int pf_start(struct pf_unit *pf, int cmd, int b, int c)
{
int i;
char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
for (i = 0; i < 4; i++) {
io_cmd[5 - i] = b & 0xff;
b = b >> 8;
}
io_cmd[8] = c & 0xff;
io_cmd[7] = (c >> 8) & 0xff;
i = pf_command(pf, io_cmd, c * 512, "start i/o");
mdelay(1);
return i;
}
static int pf_ready(void)
{
return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask));
}
static struct request_queue *pf_queue;
static void pf_end_request(int err)
{
if (pf_req && !__blk_end_request_cur(pf_req, err))
pf_req = NULL;
}
static void do_pf_request(struct request_queue * q)
{
if (pf_busy)
return;
repeat:
if (!pf_req) {
pf_req = blk_fetch_request(q);
if (!pf_req)
return;
}
pf_current = pf_req->rq_disk->private_data;
pf_block = blk_rq_pos(pf_req);
pf_run = blk_rq_sectors(pf_req);
pf_count = blk_rq_cur_sectors(pf_req);
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
pf_end_request(-EIO);
goto repeat;
}
pf_cmd = rq_data_dir(pf_req);
pf_buf = pf_req->buffer;
pf_retries = 0;
pf_busy = 1;
if (pf_cmd == READ)
pi_do_claimed(pf_current->pi, do_pf_read);
else if (pf_cmd == WRITE)
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
pf_end_request(-EIO);
goto repeat;
}
}
static int pf_next_buf(void)
{
unsigned long saved_flags;
pf_count--;
pf_run--;
pf_buf += 512;
pf_block++;
if (!pf_run)
return 1;
if (!pf_count) {
spin_lock_irqsave(&pf_spin_lock, saved_flags);
pf_end_request(0);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
if (!pf_req)
return 1;
pf_count = blk_rq_cur_sectors(pf_req);
pf_buf = pf_req->buffer;
}
return 0;
}
static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
pf_end_request(err);
pf_busy = 0;
do_pf_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
}
/* detach from the calling context - in case the spinlock is held */
static void do_pf_read(void)
{
ps_set_intr(do_pf_read_start, NULL, 0, nice);
}
static void do_pf_read_start(void)
{
pf_busy = 1;
if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
next_request(-EIO);
return;
}
pf_mask = STAT_DRQ;
ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice);
}
static void do_pf_read_drq(void)
{
while (1) {
if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
"read block", "completion") & STAT_ERR) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_req_sense(pf_current, 0);
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
next_request(-EIO);
return;
}
pi_read_block(pf_current->pi, pf_buf, 512);
if (pf_next_buf())
break;
}
pi_disconnect(pf_current->pi);
next_request(0);
}
static void do_pf_write(void)
{
ps_set_intr(do_pf_write_start, NULL, 0, nice);
}
static void do_pf_write_start(void)
{
pf_busy = 1;
if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(-EIO);
return;
}
while (1) {
if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
"write block", "data wait") & STAT_ERR) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(-EIO);
return;
}
pi_write_block(pf_current->pi, pf_buf, 512);
if (pf_next_buf())
break;
}
pf_mask = 0;
ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice);
}
static void do_pf_write_done(void)
{
if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(-EIO);
return;
}
pi_disconnect(pf_current->pi);
next_request(0);
}
static int __init pf_init(void)
{ /* preliminary initialisation */
struct pf_unit *pf;
int unit;
if (disable)
return -EINVAL;
pf_init_units();
if (pf_detect())
return -ENODEV;
pf_busy = 0;
if (register_blkdev(major, name)) {
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
put_disk(pf->disk);
return -EBUSY;
}
pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock);
if (!pf_queue) {
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
put_disk(pf->disk);
return -ENOMEM;
}
blk_queue_max_segments(pf_queue, cluster);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
struct gendisk *disk = pf->disk;
if (!pf->present)
continue;
disk->private_data = pf;
disk->queue = pf_queue;
add_disk(disk);
}
return 0;
}
static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->present)
continue;
del_gendisk(pf->disk);
put_disk(pf->disk);
pi_release(pf->pi);
}
blk_cleanup_queue(pf_queue);
}
MODULE_LICENSE("GPL");
module_init(pf_init)
module_exit(pf_exit)
| gpl-2.0 |
lyfest/android_kernel_samsung_sltechn | drivers/net/hamradio/scc.c | 4923 | 55500 | #define RCS_ID "$Id: scc.c,v 1.75 1998/11/04 15:15:01 jreuter Exp jreuter $"
#define VERSION "3.0"
/*
* Please use z8530drv-utils-3.0 with this version.
* ------------------
*
* You can find a subset of the documentation in
* Documentation/networking/z8530drv.txt.
*/
/*
********************************************************************
* SCC.C - Linux driver for Z8530 based HDLC cards for AX.25 *
********************************************************************
********************************************************************
Copyright (c) 1993, 2000 Joerg Reuter DL1BKE
portions (c) 1993 Guido ten Dolle PE1NNZ
********************************************************************
The driver and the programs in the archive are UNDER CONSTRUCTION.
The code is likely to fail, and so your kernel could --- even
a whole network.
This driver is intended for Amateur Radio use. If you are running it
for commercial purposes, please drop me a note. I am nosy...
...BUT:
! You m u s t recognize the appropriate legislations of your country !
! before you connect a radio to the SCC board and start to transmit or !
! receive. The GPL allows you to use the d r i v e r, NOT the RADIO! !
For non-Amateur-Radio use please note that you might need a special
allowance/licence from the designer of the SCC Board and/or the
MODEM.
This program is free software; you can redistribute it and/or modify
it under the terms of the (modified) GNU General Public License
delivered with the Linux kernel source.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should find a copy of the GNU General Public License in
/usr/src/linux/COPYING;
********************************************************************
Incomplete history of z8530drv:
-------------------------------
1994-09-13 started to write the driver, rescued most of my own
code (and Hans Alblas' memory buffer pool concept) from
an earlier project "sccdrv" which was initiated by
Guido ten Dolle. Not much of the old driver survived,
though. The first version I put my hands on was sccdrv1.3
from August 1993. The memory buffer pool concept
appeared in an unauthorized sccdrv version (1.5) from
August 1994.
1995-01-31 changed copyright notice to GPL without limitations.
.
. <SNIP>
.
1996-10-05 New semester, new driver...
* KISS TNC emulator removed (TTY driver)
* Source moved to drivers/net/
* Includes Z8530 defines from drivers/net/z8530.h
* Uses sk_buffer memory management
* Reduced overhead of /proc/net/z8530drv output
* Streamlined quite a lot things
* Invents brand new bugs... ;-)
The move to version number 3.0 reflects theses changes.
You can use 'kissbridge' if you need a KISS TNC emulator.
1996-12-13 Fixed for Linux networking changes. (G4KLX)
1997-01-08 Fixed the remaining problems.
1997-04-02 Hopefully fixed the problems with the new *_timer()
routines, added calibration code.
1997-10-12 Made SCC_DELAY a CONFIG option, added CONFIG_SCC_TRXECHO
1998-01-29 Small fix to avoid lock-up on initialization
1998-09-29 Fixed the "grouping" bugs, tx_inhibit works again,
using dev->tx_queue_len now instead of MAXQUEUE now.
1998-10-21 Postponed the spinlock changes, would need a lot of
testing I currently don't have the time to. Softdcd doesn't
work.
1998-11-04 Softdcd does not work correctly in DPLL mode, in fact it
never did. The DPLL locks on noise, the SYNC unit sees
flags that aren't... Restarting the DPLL does not help
either, it resynchronizes too slow and the first received
frame gets lost.
2000-02-13 Fixed for new network driver interface changes, still
does TX timeouts itself since it uses its own queue
scheme.
Thanks to all who contributed to this driver with ideas and bug
reports!
NB -- if you find errors, change something, please let me know
first before you distribute it... And please don't touch
the version number. Just replace my callsign in
"v3.0.dl1bke" with your own. Just to avoid confusion...
If you want to add your modification to the linux distribution
please (!) contact me first.
New versions of the driver will be announced on the linux-hams
mailing list on vger.kernel.org. To subscribe send an e-mail
to majordomo@vger.kernel.org with the following line in
the body of the mail:
subscribe linux-hams
The content of the "Subject" field will be ignored.
vy 73,
Joerg Reuter ampr-net: dl1bke@db0pra.ampr.org
AX-25 : DL1BKE @ DB0ABH.#BAY.DEU.EU
Internet: jreuter@yaina.de
www : http://yaina.de/jreuter
*/
/* ----------------------------------------------------------------------- */
#undef SCC_LDELAY /* slow it even a bit more down */
#undef SCC_DONT_CHECK /* don't look if the SCCs you specified are available */
#define SCC_MAXCHIPS 4 /* number of max. supported chips */
#define SCC_BUFSIZE 384 /* must not exceed 4096 */
#undef SCC_DEBUG
#define SCC_DEFAULT_CLOCK 4915200
/* default pclock if nothing is specified */
/* ----------------------------------------------------------------------- */
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <linux/socket.h>
#include <linux/init.h>
#include <linux/scc.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <net/net_namespace.h>
#include <net/ax25.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include "z8530.h"
static const char banner[] __initdata = KERN_INFO \
"AX.25: Z8530 SCC driver version "VERSION".dl1bke\n";
static void t_dwait(unsigned long);
static void t_txdelay(unsigned long);
static void t_tail(unsigned long);
static void t_busy(unsigned long);
static void t_maxkeyup(unsigned long);
static void t_idle(unsigned long);
static void scc_tx_done(struct scc_channel *);
static void scc_start_tx_timer(struct scc_channel *, void (*)(unsigned long), unsigned long);
static void scc_start_maxkeyup(struct scc_channel *);
static void scc_start_defer(struct scc_channel *);
static void z8530_init(void);
static void init_channel(struct scc_channel *scc);
static void scc_key_trx (struct scc_channel *scc, char tx);
static void scc_init_timer(struct scc_channel *scc);
static int scc_net_alloc(const char *name, struct scc_channel *scc);
static void scc_net_setup(struct net_device *dev);
static int scc_net_open(struct net_device *dev);
static int scc_net_close(struct net_device *dev);
static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb);
static netdev_tx_t scc_net_tx(struct sk_buff *skb,
struct net_device *dev);
static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static int scc_net_set_mac_address(struct net_device *dev, void *addr);
static struct net_device_stats * scc_net_get_stats(struct net_device *dev);
static unsigned char SCC_DriverName[] = "scc";
static struct irqflags { unsigned char used : 1; } Ivec[NR_IRQS];
static struct scc_channel SCC_Info[2 * SCC_MAXCHIPS]; /* information per channel */
static struct scc_ctrl {
io_port chan_A;
io_port chan_B;
int irq;
} SCC_ctrl[SCC_MAXCHIPS+1];
static unsigned char Driver_Initialized;
static int Nchips;
static io_port Vector_Latch;
/* ******************************************************************** */
/* * Port Access Functions * */
/* ******************************************************************** */
/* These provide interrupt save 2-step access to the Z8530 registers */
static DEFINE_SPINLOCK(iolock); /* Guards paired accesses */
static inline unsigned char InReg(io_port port, unsigned char reg)
{
unsigned long flags;
unsigned char r;
spin_lock_irqsave(&iolock, flags);
#ifdef SCC_LDELAY
Outb(port, reg);
udelay(SCC_LDELAY);
r=Inb(port);
udelay(SCC_LDELAY);
#else
Outb(port, reg);
r=Inb(port);
#endif
spin_unlock_irqrestore(&iolock, flags);
return r;
}
static inline void OutReg(io_port port, unsigned char reg, unsigned char val)
{
unsigned long flags;
spin_lock_irqsave(&iolock, flags);
#ifdef SCC_LDELAY
Outb(port, reg); udelay(SCC_LDELAY);
Outb(port, val); udelay(SCC_LDELAY);
#else
Outb(port, reg);
Outb(port, val);
#endif
spin_unlock_irqrestore(&iolock, flags);
}
static inline void wr(struct scc_channel *scc, unsigned char reg,
unsigned char val)
{
OutReg(scc->ctrl, reg, (scc->wreg[reg] = val));
}
static inline void or(struct scc_channel *scc, unsigned char reg, unsigned char val)
{
OutReg(scc->ctrl, reg, (scc->wreg[reg] |= val));
}
static inline void cl(struct scc_channel *scc, unsigned char reg, unsigned char val)
{
OutReg(scc->ctrl, reg, (scc->wreg[reg] &= ~val));
}
/* ******************************************************************** */
/* * Some useful macros * */
/* ******************************************************************** */
static inline void scc_discard_buffers(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
if (scc->tx_buff != NULL)
{
dev_kfree_skb(scc->tx_buff);
scc->tx_buff = NULL;
}
while (!skb_queue_empty(&scc->tx_queue))
dev_kfree_skb(skb_dequeue(&scc->tx_queue));
spin_unlock_irqrestore(&scc->lock, flags);
}
/* ******************************************************************** */
/* * Interrupt Service Routines * */
/* ******************************************************************** */
/* ----> subroutines for the interrupt handlers <---- */
static inline void scc_notify(struct scc_channel *scc, int event)
{
struct sk_buff *skb;
char *bp;
if (scc->kiss.fulldup != KISS_DUPLEX_OPTIMA)
return;
skb = dev_alloc_skb(2);
if (skb != NULL)
{
bp = skb_put(skb, 2);
*bp++ = PARAM_HWEVENT;
*bp++ = event;
scc_net_rx(scc, skb);
} else
scc->stat.nospace++;
}
static inline void flush_rx_FIFO(struct scc_channel *scc)
{
int k;
for (k=0; k<3; k++)
Inb(scc->data);
if(scc->rx_buff != NULL) /* did we receive something? */
{
scc->stat.rxerrs++; /* then count it as an error */
dev_kfree_skb_irq(scc->rx_buff);
scc->rx_buff = NULL;
}
}
static void start_hunt(struct scc_channel *scc)
{
if ((scc->modem.clocksrc != CLK_EXTERNAL))
OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]); /* DPLL: enter search mode */
or(scc,R3,ENT_HM|RxENABLE); /* enable the receiver, hunt mode */
}
/* ----> four different interrupt handlers for Tx, Rx, changing of */
/* DCD/CTS and Rx/Tx errors */
/* Transmitter interrupt handler */
static inline void scc_txint(struct scc_channel *scc)
{
struct sk_buff *skb;
scc->stat.txints++;
skb = scc->tx_buff;
/* send first octet */
if (skb == NULL)
{
skb = skb_dequeue(&scc->tx_queue);
scc->tx_buff = skb;
netif_wake_queue(scc->dev);
if (skb == NULL)
{
scc_tx_done(scc);
Outb(scc->ctrl, RES_Tx_P);
return;
}
if (skb->len == 0) /* Paranoia... */
{
dev_kfree_skb_irq(skb);
scc->tx_buff = NULL;
scc_tx_done(scc);
Outb(scc->ctrl, RES_Tx_P);
return;
}
scc->stat.tx_state = TXS_ACTIVE;
OutReg(scc->ctrl, R0, RES_Tx_CRC);
/* reset CRC generator */
or(scc,R10,ABUNDER); /* re-install underrun protection */
Outb(scc->data,*skb->data); /* send byte */
skb_pull(skb, 1);
if (!scc->enhanced) /* reset EOM latch */
Outb(scc->ctrl,RES_EOM_L);
return;
}
/* End Of Frame... */
if (skb->len == 0)
{
Outb(scc->ctrl, RES_Tx_P); /* reset pending int */
cl(scc, R10, ABUNDER); /* send CRC */
dev_kfree_skb_irq(skb);
scc->tx_buff = NULL;
scc->stat.tx_state = TXS_NEWFRAME; /* next frame... */
return;
}
/* send octet */
Outb(scc->data,*skb->data);
skb_pull(skb, 1);
}
/* External/Status interrupt handler */
static inline void scc_exint(struct scc_channel *scc)
{
unsigned char status,changes,chg_and_stat;
scc->stat.exints++;
status = InReg(scc->ctrl,R0);
changes = status ^ scc->status;
chg_and_stat = changes & status;
/* ABORT: generated whenever DCD drops while receiving */
if (chg_and_stat & BRK_ABRT) /* Received an ABORT */
flush_rx_FIFO(scc);
/* HUNT: software DCD; on = waiting for SYNC, off = receiving frame */
if ((changes & SYNC_HUNT) && scc->kiss.softdcd)
{
if (status & SYNC_HUNT)
{
scc->dcd = 0;
flush_rx_FIFO(scc);
if ((scc->modem.clocksrc != CLK_EXTERNAL))
OutReg(scc->ctrl,R14,SEARCH|scc->wreg[R14]); /* DPLL: enter search mode */
} else {
scc->dcd = 1;
}
scc_notify(scc, scc->dcd? HWEV_DCD_OFF:HWEV_DCD_ON);
}
/* DCD: on = start to receive packet, off = ABORT condition */
/* (a successfully received packet generates a special condition int) */
if((changes & DCD) && !scc->kiss.softdcd) /* DCD input changed state */
{
if(status & DCD) /* DCD is now ON */
{
start_hunt(scc);
scc->dcd = 1;
} else { /* DCD is now OFF */
cl(scc,R3,ENT_HM|RxENABLE); /* disable the receiver */
flush_rx_FIFO(scc);
scc->dcd = 0;
}
scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
}
#ifdef notdef
/* CTS: use external TxDelay (what's that good for?!)
* Anyway: If we _could_ use it (BayCom USCC uses CTS for
* own purposes) we _should_ use the "autoenable" feature
* of the Z8530 and not this interrupt...
*/
if (chg_and_stat & CTS) /* CTS is now ON */
{
if (scc->kiss.txdelay == 0) /* zero TXDELAY = wait for CTS */
scc_start_tx_timer(scc, t_txdelay, 0);
}
#endif
if (scc->stat.tx_state == TXS_ACTIVE && (status & TxEOM))
{
scc->stat.tx_under++; /* oops, an underrun! count 'em */
Outb(scc->ctrl, RES_EXT_INT); /* reset ext/status interrupts */
if (scc->tx_buff != NULL)
{
dev_kfree_skb_irq(scc->tx_buff);
scc->tx_buff = NULL;
}
or(scc,R10,ABUNDER);
scc_start_tx_timer(scc, t_txdelay, 0); /* restart transmission */
}
scc->status = status;
Outb(scc->ctrl,RES_EXT_INT);
}
/* Receiver interrupt handler */
static inline void scc_rxint(struct scc_channel *scc)
{
struct sk_buff *skb;
scc->stat.rxints++;
if((scc->wreg[5] & RTS) && scc->kiss.fulldup == KISS_DUPLEX_HALF)
{
Inb(scc->data); /* discard char */
or(scc,R3,ENT_HM); /* enter hunt mode for next flag */
return;
}
skb = scc->rx_buff;
if (skb == NULL)
{
skb = dev_alloc_skb(scc->stat.bufsize);
if (skb == NULL)
{
scc->dev_stat.rx_dropped++;
scc->stat.nospace++;
Inb(scc->data);
or(scc, R3, ENT_HM);
return;
}
scc->rx_buff = skb;
*(skb_put(skb, 1)) = 0; /* KISS data */
}
if (skb->len >= scc->stat.bufsize)
{
#ifdef notdef
printk(KERN_DEBUG "z8530drv: oops, scc_rxint() received huge frame...\n");
#endif
dev_kfree_skb_irq(skb);
scc->rx_buff = NULL;
Inb(scc->data);
or(scc, R3, ENT_HM);
return;
}
*(skb_put(skb, 1)) = Inb(scc->data);
}
/* Receive Special Condition interrupt handler */
static inline void scc_spint(struct scc_channel *scc)
{
unsigned char status;
struct sk_buff *skb;
scc->stat.spints++;
status = InReg(scc->ctrl,R1); /* read receiver status */
Inb(scc->data); /* throw away Rx byte */
skb = scc->rx_buff;
if(status & Rx_OVR) /* receiver overrun */
{
scc->stat.rx_over++; /* count them */
or(scc,R3,ENT_HM); /* enter hunt mode for next flag */
if (skb != NULL)
dev_kfree_skb_irq(skb);
scc->rx_buff = skb = NULL;
}
if(status & END_FR && skb != NULL) /* end of frame */
{
/* CRC okay, frame ends on 8 bit boundary and received something ? */
if (!(status & CRC_ERR) && (status & 0xe) == RES8 && skb->len > 0)
{
/* ignore last received byte (first of the CRC bytes) */
skb_trim(skb, skb->len-1);
scc_net_rx(scc, skb);
scc->rx_buff = NULL;
scc->stat.rxframes++;
} else { /* a bad frame */
dev_kfree_skb_irq(skb);
scc->rx_buff = NULL;
scc->stat.rxerrs++;
}
}
Outb(scc->ctrl,ERR_RES);
}
/* ----> interrupt service routine for the Z8530 <---- */
static void scc_isr_dispatch(struct scc_channel *scc, int vector)
{
spin_lock(&scc->lock);
switch (vector & VECTOR_MASK)
{
case TXINT: scc_txint(scc); break;
case EXINT: scc_exint(scc); break;
case RXINT: scc_rxint(scc); break;
case SPINT: scc_spint(scc); break;
}
spin_unlock(&scc->lock);
}
/* If the card has a latch for the interrupt vector (like the PA0HZP card)
use it to get the number of the chip that generated the int.
If not: poll all defined chips.
*/
#define SCC_IRQTIMEOUT 30000
static irqreturn_t scc_isr(int irq, void *dev_id)
{
int chip_irq = (long) dev_id;
unsigned char vector;
struct scc_channel *scc;
struct scc_ctrl *ctrl;
int k;
if (Vector_Latch)
{
for(k=0; k < SCC_IRQTIMEOUT; k++)
{
Outb(Vector_Latch, 0); /* Generate INTACK */
/* Read the vector */
if((vector=Inb(Vector_Latch)) >= 16 * Nchips) break;
if (vector & 0x01) break;
scc=&SCC_Info[vector >> 3 ^ 0x01];
if (!scc->dev) break;
scc_isr_dispatch(scc, vector);
OutReg(scc->ctrl,R0,RES_H_IUS); /* Reset Highest IUS */
}
if (k == SCC_IRQTIMEOUT)
printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?\n");
return IRQ_HANDLED;
}
/* Find the SCC generating the interrupt by polling all attached SCCs
* reading RR3A (the interrupt pending register)
*/
ctrl = SCC_ctrl;
while (ctrl->chan_A)
{
if (ctrl->irq != chip_irq)
{
ctrl++;
continue;
}
scc = NULL;
for (k = 0; InReg(ctrl->chan_A,R3) && k < SCC_IRQTIMEOUT; k++)
{
vector=InReg(ctrl->chan_B,R2); /* Read the vector */
if (vector & 0x01) break;
scc = &SCC_Info[vector >> 3 ^ 0x01];
if (!scc->dev) break;
scc_isr_dispatch(scc, vector);
}
if (k == SCC_IRQTIMEOUT)
{
printk(KERN_WARNING "z8530drv: endless loop in scc_isr()?!\n");
break;
}
/* This looks weird and it is. At least the BayCom USCC doesn't
* use the Interrupt Daisy Chain, thus we'll have to start
* all over again to be sure not to miss an interrupt from
* (any of) the other chip(s)...
* Honestly, the situation *is* braindamaged...
*/
if (scc != NULL)
{
OutReg(scc->ctrl,R0,RES_H_IUS);
ctrl = SCC_ctrl;
} else
ctrl++;
}
return IRQ_HANDLED;
}
/* ******************************************************************** */
/* * Init Channel */
/* ******************************************************************** */
/* ----> set SCC channel speed <---- */
static inline void set_brg(struct scc_channel *scc, unsigned int tc)
{
cl(scc,R14,BRENABL); /* disable baudrate generator */
wr(scc,R12,tc & 255); /* brg rate LOW */
wr(scc,R13,tc >> 8); /* brg rate HIGH */
or(scc,R14,BRENABL); /* enable baudrate generator */
}
static inline void set_speed(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
if (scc->modem.speed > 0) /* paranoia... */
set_brg(scc, (unsigned) (scc->clock / (scc->modem.speed * 64)) - 2);
spin_unlock_irqrestore(&scc->lock, flags);
}
/* ----> initialize a SCC channel <---- */
static inline void init_brg(struct scc_channel *scc)
{
wr(scc, R14, BRSRC); /* BRG source = PCLK */
OutReg(scc->ctrl, R14, SSBR|scc->wreg[R14]); /* DPLL source = BRG */
OutReg(scc->ctrl, R14, SNRZI|scc->wreg[R14]); /* DPLL NRZI mode */
}
/*
* Initialization according to the Z8530 manual (SGS-Thomson's version):
*
* 1. Modes and constants
*
* WR9 11000000 chip reset
* WR4 XXXXXXXX Tx/Rx control, async or sync mode
* WR1 0XX00X00 select W/REQ (optional)
* WR2 XXXXXXXX program interrupt vector
* WR3 XXXXXXX0 select Rx control
* WR5 XXXX0XXX select Tx control
* WR6 XXXXXXXX sync character
* WR7 XXXXXXXX sync character
* WR9 000X0XXX select interrupt control
* WR10 XXXXXXXX miscellaneous control (optional)
* WR11 XXXXXXXX clock control
* WR12 XXXXXXXX time constant lower byte (optional)
* WR13 XXXXXXXX time constant upper byte (optional)
* WR14 XXXXXXX0 miscellaneous control
* WR14 XXXSSSSS commands (optional)
*
* 2. Enables
*
* WR14 000SSSS1 baud rate enable
* WR3 SSSSSSS1 Rx enable
* WR5 SSSS1SSS Tx enable
* WR0 10000000 reset Tx CRG (optional)
* WR1 XSS00S00 DMA enable (optional)
*
* 3. Interrupt status
*
* WR15 XXXXXXXX enable external/status
* WR0 00010000 reset external status
* WR0 00010000 reset external status twice
* WR1 SSSXXSXX enable Rx, Tx and Ext/status
* WR9 000SXSSS enable master interrupt enable
*
* 1 = set to one, 0 = reset to zero
* X = user defined, S = same as previous init
*
*
* Note that the implementation differs in some points from above scheme.
*
*/
static void init_channel(struct scc_channel *scc)
{
del_timer(&scc->tx_t);
del_timer(&scc->tx_wdog);
disable_irq(scc->irq);
wr(scc,R4,X1CLK|SDLC); /* *1 clock, SDLC mode */
wr(scc,R1,0); /* no W/REQ operation */
wr(scc,R3,Rx8|RxCRC_ENAB); /* RX 8 bits/char, CRC, disabled */
wr(scc,R5,Tx8|DTR|TxCRC_ENAB); /* TX 8 bits/char, disabled, DTR */
wr(scc,R6,0); /* SDLC address zero (not used) */
wr(scc,R7,FLAG); /* SDLC flag value */
wr(scc,R9,VIS); /* vector includes status */
wr(scc,R10,(scc->modem.nrz? NRZ : NRZI)|CRCPS|ABUNDER); /* abort on underrun, preset CRC generator, NRZ(I) */
wr(scc,R14, 0);
/* set clock sources:
CLK_DPLL: normal halfduplex operation
RxClk: use DPLL
TxClk: use DPLL
TRxC mode DPLL output
CLK_EXTERNAL: external clocking (G3RUH or DF9IC modem)
BayCom: others:
TxClk = pin RTxC TxClk = pin TRxC
RxClk = pin TRxC RxClk = pin RTxC
CLK_DIVIDER:
RxClk = use DPLL
TxClk = pin RTxC
BayCom: others:
pin TRxC = DPLL pin TRxC = BRG
(RxClk * 1) (RxClk * 32)
*/
switch(scc->modem.clocksrc)
{
case CLK_DPLL:
wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
init_brg(scc);
break;
case CLK_DIVIDER:
wr(scc, R11, ((scc->brand & BAYCOM)? TRxCDP : TRxCBR) | RCDPLL|TCRTxCP|TRxCOI);
init_brg(scc);
break;
case CLK_EXTERNAL:
wr(scc, R11, (scc->brand & BAYCOM)? RCTRxCP|TCRTxCP : RCRTxCP|TCTRxCP);
OutReg(scc->ctrl, R14, DISDPLL);
break;
}
set_speed(scc); /* set baudrate */
if(scc->enhanced)
{
or(scc,R15,SHDLCE|FIFOE); /* enable FIFO, SDLC/HDLC Enhancements (From now R7 is R7') */
wr(scc,R7,AUTOEOM);
}
if(scc->kiss.softdcd || (InReg(scc->ctrl,R0) & DCD))
/* DCD is now ON */
{
start_hunt(scc);
}
/* enable ABORT, DCD & SYNC/HUNT interrupts */
wr(scc,R15, BRKIE|TxUIE|(scc->kiss.softdcd? SYNCIE:DCDIE));
Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
Outb(scc->ctrl,RES_EXT_INT); /* must be done twice */
or(scc,R1,INT_ALL_Rx|TxINT_ENAB|EXT_INT_ENAB); /* enable interrupts */
scc->status = InReg(scc->ctrl,R0); /* read initial status */
or(scc,R9,MIE); /* master interrupt enable */
scc_init_timer(scc);
enable_irq(scc->irq);
}
/* ******************************************************************** */
/* * SCC timer functions * */
/* ******************************************************************** */
/* ----> scc_key_trx sets the time constant for the baudrate
generator and keys the transmitter <---- */
static void scc_key_trx(struct scc_channel *scc, char tx)
{
unsigned int time_const;
if (scc->brand & PRIMUS)
Outb(scc->ctrl + 4, scc->option | (tx? 0x80 : 0));
if (scc->modem.speed < 300)
scc->modem.speed = 1200;
time_const = (unsigned) (scc->clock / (scc->modem.speed * (tx? 2:64))) - 2;
disable_irq(scc->irq);
if (tx)
{
or(scc, R1, TxINT_ENAB); /* t_maxkeyup may have reset these */
or(scc, R15, TxUIE);
}
if (scc->modem.clocksrc == CLK_DPLL)
{ /* force simplex operation */
if (tx)
{
#ifdef CONFIG_SCC_TRXECHO
cl(scc, R3, RxENABLE|ENT_HM); /* switch off receiver */
cl(scc, R15, DCDIE|SYNCIE); /* No DCD changes, please */
#endif
set_brg(scc, time_const); /* reprogram baudrate generator */
/* DPLL -> Rx clk, BRG -> Tx CLK, TRxC mode output, TRxC = BRG */
wr(scc, R11, RCDPLL|TCBR|TRxCOI|TRxCBR);
/* By popular demand: tx_inhibit */
if (scc->kiss.tx_inhibit)
{
or(scc,R5, TxENAB);
scc->wreg[R5] |= RTS;
} else {
or(scc,R5,RTS|TxENAB); /* set the RTS line and enable TX */
}
} else {
cl(scc,R5,RTS|TxENAB);
set_brg(scc, time_const); /* reprogram baudrate generator */
/* DPLL -> Rx clk, DPLL -> Tx CLK, TRxC mode output, TRxC = DPLL */
wr(scc, R11, RCDPLL|TCDPLL|TRxCOI|TRxCDP);
#ifndef CONFIG_SCC_TRXECHO
if (scc->kiss.softdcd)
#endif
{
or(scc,R15, scc->kiss.softdcd? SYNCIE:DCDIE);
start_hunt(scc);
}
}
} else {
if (tx)
{
#ifdef CONFIG_SCC_TRXECHO
if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
{
cl(scc, R3, RxENABLE);
cl(scc, R15, DCDIE|SYNCIE);
}
#endif
if (scc->kiss.tx_inhibit)
{
or(scc,R5, TxENAB);
scc->wreg[R5] |= RTS;
} else {
or(scc,R5,RTS|TxENAB); /* enable tx */
}
} else {
cl(scc,R5,RTS|TxENAB); /* disable tx */
if ((scc->kiss.fulldup == KISS_DUPLEX_HALF) &&
#ifndef CONFIG_SCC_TRXECHO
scc->kiss.softdcd)
#else
1)
#endif
{
or(scc, R15, scc->kiss.softdcd? SYNCIE:DCDIE);
start_hunt(scc);
}
}
}
enable_irq(scc->irq);
}
/* ----> SCC timer interrupt handler and friends. <---- */
static void __scc_start_tx_timer(struct scc_channel *scc, void (*handler)(unsigned long), unsigned long when)
{
del_timer(&scc->tx_t);
if (when == 0)
{
handler((unsigned long) scc);
} else
if (when != TIMER_OFF)
{
scc->tx_t.data = (unsigned long) scc;
scc->tx_t.function = handler;
scc->tx_t.expires = jiffies + (when*HZ)/100;
add_timer(&scc->tx_t);
}
}
static void scc_start_tx_timer(struct scc_channel *scc, void (*handler)(unsigned long), unsigned long when)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
__scc_start_tx_timer(scc, handler, when);
spin_unlock_irqrestore(&scc->lock, flags);
}
static void scc_start_defer(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
del_timer(&scc->tx_wdog);
if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
{
scc->tx_wdog.data = (unsigned long) scc;
scc->tx_wdog.function = t_busy;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
add_timer(&scc->tx_wdog);
}
spin_unlock_irqrestore(&scc->lock, flags);
}
static void scc_start_maxkeyup(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
del_timer(&scc->tx_wdog);
if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
{
scc->tx_wdog.data = (unsigned long) scc;
scc->tx_wdog.function = t_maxkeyup;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
add_timer(&scc->tx_wdog);
}
spin_unlock_irqrestore(&scc->lock, flags);
}
/*
* This is called from scc_txint() when there are no more frames to send.
* Not exactly a timer function, but it is a close friend of the family...
*/
static void scc_tx_done(struct scc_channel *scc)
{
/*
* trx remains keyed in fulldup mode 2 until t_idle expires.
*/
switch (scc->kiss.fulldup)
{
case KISS_DUPLEX_LINK:
scc->stat.tx_state = TXS_IDLE2;
if (scc->kiss.idletime != TIMER_OFF)
scc_start_tx_timer(scc, t_idle,
scc->kiss.idletime*100);
break;
case KISS_DUPLEX_OPTIMA:
scc_notify(scc, HWEV_ALL_SENT);
break;
default:
scc->stat.tx_state = TXS_BUSY;
scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
}
netif_wake_queue(scc->dev);
}
static unsigned char Rand = 17;
static inline int is_grouped(struct scc_channel *scc)
{
int k;
struct scc_channel *scc2;
unsigned char grp1, grp2;
grp1 = scc->kiss.group;
for (k = 0; k < (Nchips * 2); k++)
{
scc2 = &SCC_Info[k];
grp2 = scc2->kiss.group;
if (scc2 == scc || !(scc2->dev && grp2))
continue;
if ((grp1 & 0x3f) == (grp2 & 0x3f))
{
if ( (grp1 & TXGROUP) && (scc2->wreg[R5] & RTS) )
return 1;
if ( (grp1 & RXGROUP) && scc2->dcd )
return 1;
}
}
return 0;
}
/* DWAIT and SLOTTIME expired
*
* fulldup == 0: DCD is active or Rand > P-persistence: start t_busy timer
* else key trx and start txdelay
* fulldup == 1: key trx and start txdelay
* fulldup == 2: mintime expired, reset status or key trx and start txdelay
*/
static void t_dwait(unsigned long channel)
{
struct scc_channel *scc = (struct scc_channel *) channel;
if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */
{
if (skb_queue_empty(&scc->tx_queue)) { /* nothing to send */
scc->stat.tx_state = TXS_IDLE;
netif_wake_queue(scc->dev); /* t_maxkeyup locked it. */
return;
}
scc->stat.tx_state = TXS_BUSY;
}
if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
{
Rand = Rand * 17 + 31;
if (scc->dcd || (scc->kiss.persist) < Rand || (scc->kiss.group && is_grouped(scc)) )
{
scc_start_defer(scc);
scc_start_tx_timer(scc, t_dwait, scc->kiss.slottime);
return ;
}
}
if ( !(scc->wreg[R5] & RTS) )
{
scc_key_trx(scc, TX_ON);
scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
} else {
scc_start_tx_timer(scc, t_txdelay, 0);
}
}
/* TXDELAY expired
*
* kick transmission by a fake scc_txint(scc), start 'maxkeyup' watchdog.
*/
static void t_txdelay(unsigned long channel)
{
struct scc_channel *scc = (struct scc_channel *) channel;
scc_start_maxkeyup(scc);
if (scc->tx_buff == NULL)
{
disable_irq(scc->irq);
scc_txint(scc);
enable_irq(scc->irq);
}
}
/* TAILTIME expired
*
* switch off transmitter. If we were stopped by Maxkeyup restart
* transmission after 'mintime' seconds
*/
static void t_tail(unsigned long channel)
{
struct scc_channel *scc = (struct scc_channel *) channel;
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
del_timer(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
spin_unlock_irqrestore(&scc->lock, flags);
if (scc->stat.tx_state == TXS_TIMEOUT) /* we had a timeout? */
{
scc->stat.tx_state = TXS_WAIT;
scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
return;
}
scc->stat.tx_state = TXS_IDLE;
netif_wake_queue(scc->dev);
}
/* BUSY timeout
*
* throw away send buffers if DCD remains active too long.
*/
static void t_busy(unsigned long channel)
{
struct scc_channel *scc = (struct scc_channel *) channel;
del_timer(&scc->tx_t);
netif_stop_queue(scc->dev); /* don't pile on the wabbit! */
scc_discard_buffers(scc);
scc->stat.txerrs++;
scc->stat.tx_state = TXS_IDLE;
netif_wake_queue(scc->dev);
}
/* MAXKEYUP timeout
*
* this is our watchdog.
*/
static void t_maxkeyup(unsigned long channel)
{
struct scc_channel *scc = (struct scc_channel *) channel;
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
/*
* let things settle down before we start to
* accept new data.
*/
netif_stop_queue(scc->dev);
scc_discard_buffers(scc);
del_timer(&scc->tx_t);
cl(scc, R1, TxINT_ENAB); /* force an ABORT, but don't */
cl(scc, R15, TxUIE); /* count it. */
OutReg(scc->ctrl, R0, RES_Tx_P);
spin_unlock_irqrestore(&scc->lock, flags);
scc->stat.txerrs++;
scc->stat.tx_state = TXS_TIMEOUT;
scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
}
/* IDLE timeout
*
* in fulldup mode 2 it keys down the transmitter after 'idle' seconds
* of inactivity. We will not restart transmission before 'mintime'
* expires.
*/
static void t_idle(unsigned long channel)
{
struct scc_channel *scc = (struct scc_channel *) channel;
del_timer(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
if(scc->kiss.mintime)
scc_start_tx_timer(scc, t_dwait, scc->kiss.mintime*100);
scc->stat.tx_state = TXS_WAIT;
}
static void scc_init_timer(struct scc_channel *scc)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
scc->stat.tx_state = TXS_IDLE;
spin_unlock_irqrestore(&scc->lock, flags);
}
/* ******************************************************************** */
/* * Set/get L1 parameters * */
/* ******************************************************************** */
/*
* this will set the "hardware" parameters through KISS commands or ioctl()
*/
#define CAST(x) (unsigned long)(x)
static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, unsigned int arg)
{
switch (cmd)
{
case PARAM_TXDELAY: scc->kiss.txdelay=arg; break;
case PARAM_PERSIST: scc->kiss.persist=arg; break;
case PARAM_SLOTTIME: scc->kiss.slottime=arg; break;
case PARAM_TXTAIL: scc->kiss.tailtime=arg; break;
case PARAM_FULLDUP: scc->kiss.fulldup=arg; break;
case PARAM_DTR: break; /* does someone need this? */
case PARAM_GROUP: scc->kiss.group=arg; break;
case PARAM_IDLE: scc->kiss.idletime=arg; break;
case PARAM_MIN: scc->kiss.mintime=arg; break;
case PARAM_MAXKEY: scc->kiss.maxkeyup=arg; break;
case PARAM_WAIT: scc->kiss.waittime=arg; break;
case PARAM_MAXDEFER: scc->kiss.maxdefer=arg; break;
case PARAM_TX: scc->kiss.tx_inhibit=arg; break;
case PARAM_SOFTDCD:
scc->kiss.softdcd=arg;
if (arg)
{
or(scc, R15, SYNCIE);
cl(scc, R15, DCDIE);
start_hunt(scc);
} else {
or(scc, R15, DCDIE);
cl(scc, R15, SYNCIE);
}
break;
case PARAM_SPEED:
if (arg < 256)
scc->modem.speed=arg*100;
else
scc->modem.speed=arg;
if (scc->stat.tx_state == 0) /* only switch baudrate on rx... ;-) */
set_speed(scc);
break;
case PARAM_RTS:
if ( !(scc->wreg[R5] & RTS) )
{
if (arg != TX_OFF) {
scc_key_trx(scc, TX_ON);
scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
}
} else {
if (arg == TX_OFF)
{
scc->stat.tx_state = TXS_BUSY;
scc_start_tx_timer(scc, t_tail, scc->kiss.tailtime);
}
}
break;
case PARAM_HWEVENT:
scc_notify(scc, scc->dcd? HWEV_DCD_ON:HWEV_DCD_OFF);
break;
default: return -EINVAL;
}
return 0;
}
static unsigned long scc_get_param(struct scc_channel *scc, unsigned int cmd)
{
switch (cmd)
{
case PARAM_TXDELAY: return CAST(scc->kiss.txdelay);
case PARAM_PERSIST: return CAST(scc->kiss.persist);
case PARAM_SLOTTIME: return CAST(scc->kiss.slottime);
case PARAM_TXTAIL: return CAST(scc->kiss.tailtime);
case PARAM_FULLDUP: return CAST(scc->kiss.fulldup);
case PARAM_SOFTDCD: return CAST(scc->kiss.softdcd);
case PARAM_DTR: return CAST((scc->wreg[R5] & DTR)? 1:0);
case PARAM_RTS: return CAST((scc->wreg[R5] & RTS)? 1:0);
case PARAM_SPEED: return CAST(scc->modem.speed);
case PARAM_GROUP: return CAST(scc->kiss.group);
case PARAM_IDLE: return CAST(scc->kiss.idletime);
case PARAM_MIN: return CAST(scc->kiss.mintime);
case PARAM_MAXKEY: return CAST(scc->kiss.maxkeyup);
case PARAM_WAIT: return CAST(scc->kiss.waittime);
case PARAM_MAXDEFER: return CAST(scc->kiss.maxdefer);
case PARAM_TX: return CAST(scc->kiss.tx_inhibit);
default: return NO_SUCH_PARAM;
}
}
#undef CAST
/* ******************************************************************* */
/* * Send calibration pattern * */
/* ******************************************************************* */
static void scc_stop_calibrate(unsigned long channel)
{
struct scc_channel *scc = (struct scc_channel *) channel;
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
del_timer(&scc->tx_wdog);
scc_key_trx(scc, TX_OFF);
wr(scc, R6, 0);
wr(scc, R7, FLAG);
Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
Outb(scc->ctrl,RES_EXT_INT);
netif_wake_queue(scc->dev);
spin_unlock_irqrestore(&scc->lock, flags);
}
static void
scc_start_calibrate(struct scc_channel *scc, int duration, unsigned char pattern)
{
unsigned long flags;
spin_lock_irqsave(&scc->lock, flags);
netif_stop_queue(scc->dev);
scc_discard_buffers(scc);
del_timer(&scc->tx_wdog);
scc->tx_wdog.data = (unsigned long) scc;
scc->tx_wdog.function = scc_stop_calibrate;
scc->tx_wdog.expires = jiffies + HZ*duration;
add_timer(&scc->tx_wdog);
/* This doesn't seem to work. Why not? */
wr(scc, R6, 0);
wr(scc, R7, pattern);
/*
* Don't know if this works.
* Damn, where is my Z8530 programming manual...?
*/
Outb(scc->ctrl,RES_EXT_INT); /* reset ext/status interrupts */
Outb(scc->ctrl,RES_EXT_INT);
scc_key_trx(scc, TX_ON);
spin_unlock_irqrestore(&scc->lock, flags);
}
/* ******************************************************************* */
/* * Init channel structures, special HW, etc... * */
/* ******************************************************************* */
/*
* Reset the Z8530s and setup special hardware
*/
static void z8530_init(void)
{
struct scc_channel *scc;
int chip, k;
unsigned long flags;
char *flag;
printk(KERN_INFO "Init Z8530 driver: %u channels, IRQ", Nchips*2);
flag=" ";
for (k = 0; k < nr_irqs; k++)
if (Ivec[k].used)
{
printk("%s%d", flag, k);
flag=",";
}
printk("\n");
/* reset and pre-init all chips in the system */
for (chip = 0; chip < Nchips; chip++)
{
scc=&SCC_Info[2*chip];
if (!scc->ctrl) continue;
/* Special SCC cards */
if(scc->brand & EAGLE) /* this is an EAGLE card */
Outb(scc->special,0x08); /* enable interrupt on the board */
if(scc->brand & (PC100 | PRIMUS)) /* this is a PC100/PRIMUS card */
Outb(scc->special,scc->option); /* set the MODEM mode (0x22) */
/* Reset and pre-init Z8530 */
spin_lock_irqsave(&scc->lock, flags);
Outb(scc->ctrl, 0);
OutReg(scc->ctrl,R9,FHWRES); /* force hardware reset */
udelay(100); /* give it 'a bit' more time than required */
wr(scc, R2, chip*16); /* interrupt vector */
wr(scc, R9, VIS); /* vector includes status */
spin_unlock_irqrestore(&scc->lock, flags);
}
Driver_Initialized = 1;
}
/*
* Allocate device structure, err, instance, and register driver
*/
static int scc_net_alloc(const char *name, struct scc_channel *scc)
{
int err;
struct net_device *dev;
dev = alloc_netdev(0, name, scc_net_setup);
if (!dev)
return -ENOMEM;
dev->ml_priv = scc;
scc->dev = dev;
spin_lock_init(&scc->lock);
init_timer(&scc->tx_t);
init_timer(&scc->tx_wdog);
err = register_netdevice(dev);
if (err) {
printk(KERN_ERR "%s: can't register network device (%d)\n",
name, err);
free_netdev(dev);
scc->dev = NULL;
return err;
}
return 0;
}
/* ******************************************************************** */
/* * Network driver methods * */
/* ******************************************************************** */
static const struct net_device_ops scc_netdev_ops = {
.ndo_open = scc_net_open,
.ndo_stop = scc_net_close,
.ndo_start_xmit = scc_net_tx,
.ndo_set_mac_address = scc_net_set_mac_address,
.ndo_get_stats = scc_net_get_stats,
.ndo_do_ioctl = scc_net_ioctl,
};
/* ----> Initialize device <----- */
static void scc_net_setup(struct net_device *dev)
{
dev->tx_queue_len = 16; /* should be enough... */
dev->netdev_ops = &scc_netdev_ops;
dev->header_ops = &ax25_header_ops;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
dev->flags = 0;
dev->type = ARPHRD_AX25;
dev->hard_header_len = AX25_MAX_HEADER_LEN + AX25_BPQ_HEADER_LEN;
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
}
/* ----> open network device <---- */
static int scc_net_open(struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
if (!scc->init)
return -EINVAL;
scc->tx_buff = NULL;
skb_queue_head_init(&scc->tx_queue);
init_channel(scc);
netif_start_queue(dev);
return 0;
}
/* ----> close network device <---- */
static int scc_net_close(struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
unsigned long flags;
netif_stop_queue(dev);
spin_lock_irqsave(&scc->lock, flags);
Outb(scc->ctrl,0); /* Make sure pointer is written */
wr(scc,R1,0); /* disable interrupts */
wr(scc,R3,0);
spin_unlock_irqrestore(&scc->lock, flags);
del_timer_sync(&scc->tx_t);
del_timer_sync(&scc->tx_wdog);
scc_discard_buffers(scc);
return 0;
}
/* ----> receive frame, called from scc_rxint() <---- */
static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
{
if (skb->len == 0) {
dev_kfree_skb_irq(skb);
return;
}
scc->dev_stat.rx_packets++;
scc->dev_stat.rx_bytes += skb->len;
skb->protocol = ax25_type_trans(skb, scc->dev);
netif_rx(skb);
}
/* ----> transmit frame <---- */
static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
unsigned long flags;
char kisscmd;
if (skb->len > scc->stat.bufsize || skb->len < 2) {
scc->dev_stat.tx_dropped++; /* bogus frame */
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
scc->dev_stat.tx_packets++;
scc->dev_stat.tx_bytes += skb->len;
scc->stat.txframes++;
kisscmd = *skb->data & 0x1f;
skb_pull(skb, 1);
if (kisscmd) {
scc_set_param(scc, kisscmd, *skb->data);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
spin_lock_irqsave(&scc->lock, flags);
if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) {
struct sk_buff *skb_del;
skb_del = skb_dequeue(&scc->tx_queue);
dev_kfree_skb(skb_del);
}
skb_queue_tail(&scc->tx_queue, skb);
dev->trans_start = jiffies;
/*
* Start transmission if the trx state is idle or
* t_idle hasn't expired yet. Use dwait/persistence/slottime
* algorithm for normal halfduplex operation.
*/
if(scc->stat.tx_state == TXS_IDLE || scc->stat.tx_state == TXS_IDLE2) {
scc->stat.tx_state = TXS_BUSY;
if (scc->kiss.fulldup == KISS_DUPLEX_HALF)
__scc_start_tx_timer(scc, t_dwait, scc->kiss.waittime);
else
__scc_start_tx_timer(scc, t_dwait, 0);
}
spin_unlock_irqrestore(&scc->lock, flags);
return NETDEV_TX_OK;
}
/* ----> ioctl functions <---- */
/*
* SIOCSCCCFG - configure driver arg: (struct scc_hw_config *) arg
* SIOCSCCINI - initialize driver arg: ---
* SIOCSCCCHANINI - initialize channel arg: (struct scc_modem *) arg
* SIOCSCCSMEM - set memory arg: (struct scc_mem_config *) arg
* SIOCSCCGKISS - get level 1 parameter arg: (struct scc_kiss_cmd *) arg
* SIOCSCCSKISS - set level 1 parameter arg: (struct scc_kiss_cmd *) arg
* SIOCSCCGSTAT - get driver status arg: (struct scc_stat *) arg
* SIOCSCCCAL - send calib. pattern arg: (struct scc_calibrate *) arg
*/
static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct scc_kiss_cmd kiss_cmd;
struct scc_mem_config memcfg;
struct scc_hw_config hwcfg;
struct scc_calibrate cal;
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
int chan;
unsigned char device_name[IFNAMSIZ];
void __user *arg = ifr->ifr_data;
if (!Driver_Initialized)
{
if (cmd == SIOCSCCCFG)
{
int found = 1;
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
if (!arg) return -EFAULT;
if (Nchips >= SCC_MAXCHIPS)
return -EINVAL;
if (copy_from_user(&hwcfg, arg, sizeof(hwcfg)))
return -EFAULT;
if (hwcfg.irq == 2) hwcfg.irq = 9;
if (hwcfg.irq < 0 || hwcfg.irq >= nr_irqs)
return -EINVAL;
if (!Ivec[hwcfg.irq].used && hwcfg.irq)
{
if (request_irq(hwcfg.irq, scc_isr,
IRQF_DISABLED, "AX.25 SCC",
(void *)(long) hwcfg.irq))
printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
else
Ivec[hwcfg.irq].used = 1;
}
if (hwcfg.vector_latch && !Vector_Latch) {
if (!request_region(hwcfg.vector_latch, 1, "scc vector latch"))
printk(KERN_WARNING "z8530drv: warning, cannot reserve vector latch port 0x%lx\n, disabled.", hwcfg.vector_latch);
else
Vector_Latch = hwcfg.vector_latch;
}
if (hwcfg.clock == 0)
hwcfg.clock = SCC_DEFAULT_CLOCK;
#ifndef SCC_DONT_CHECK
if(request_region(hwcfg.ctrl_a, 1, "scc-probe"))
{
disable_irq(hwcfg.irq);
Outb(hwcfg.ctrl_a, 0);
OutReg(hwcfg.ctrl_a, R9, FHWRES);
udelay(100);
OutReg(hwcfg.ctrl_a,R13,0x55); /* is this chip really there? */
udelay(5);
if (InReg(hwcfg.ctrl_a,R13) != 0x55)
found = 0;
enable_irq(hwcfg.irq);
release_region(hwcfg.ctrl_a, 1);
}
else
found = 0;
#endif
if (found)
{
SCC_Info[2*Nchips ].ctrl = hwcfg.ctrl_a;
SCC_Info[2*Nchips ].data = hwcfg.data_a;
SCC_Info[2*Nchips ].irq = hwcfg.irq;
SCC_Info[2*Nchips+1].ctrl = hwcfg.ctrl_b;
SCC_Info[2*Nchips+1].data = hwcfg.data_b;
SCC_Info[2*Nchips+1].irq = hwcfg.irq;
SCC_ctrl[Nchips].chan_A = hwcfg.ctrl_a;
SCC_ctrl[Nchips].chan_B = hwcfg.ctrl_b;
SCC_ctrl[Nchips].irq = hwcfg.irq;
}
for (chan = 0; chan < 2; chan++)
{
sprintf(device_name, "%s%i", SCC_DriverName, 2*Nchips+chan);
SCC_Info[2*Nchips+chan].special = hwcfg.special;
SCC_Info[2*Nchips+chan].clock = hwcfg.clock;
SCC_Info[2*Nchips+chan].brand = hwcfg.brand;
SCC_Info[2*Nchips+chan].option = hwcfg.option;
SCC_Info[2*Nchips+chan].enhanced = hwcfg.escc;
#ifdef SCC_DONT_CHECK
printk(KERN_INFO "%s: data port = 0x%3.3x control port = 0x%3.3x\n",
device_name,
SCC_Info[2*Nchips+chan].data,
SCC_Info[2*Nchips+chan].ctrl);
#else
printk(KERN_INFO "%s: data port = 0x%3.3lx control port = 0x%3.3lx -- %s\n",
device_name,
chan? hwcfg.data_b : hwcfg.data_a,
chan? hwcfg.ctrl_b : hwcfg.ctrl_a,
found? "found" : "missing");
#endif
if (found)
{
request_region(SCC_Info[2*Nchips+chan].ctrl, 1, "scc ctrl");
request_region(SCC_Info[2*Nchips+chan].data, 1, "scc data");
if (Nchips+chan != 0 &&
scc_net_alloc(device_name,
&SCC_Info[2*Nchips+chan]))
return -EINVAL;
}
}
if (found) Nchips++;
return 0;
}
if (cmd == SIOCSCCINI)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (Nchips == 0)
return -EINVAL;
z8530_init();
return 0;
}
return -EINVAL; /* confuse the user */
}
if (!scc->init)
{
if (cmd == SIOCSCCCHANINI)
{
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (!arg) return -EINVAL;
scc->stat.bufsize = SCC_BUFSIZE;
if (copy_from_user(&scc->modem, arg, sizeof(struct scc_modem)))
return -EINVAL;
/* default KISS Params */
if (scc->modem.speed < 4800)
{
scc->kiss.txdelay = 36; /* 360 ms */
scc->kiss.persist = 42; /* 25% persistence */ /* was 25 */
scc->kiss.slottime = 16; /* 160 ms */
scc->kiss.tailtime = 4; /* minimal reasonable value */
scc->kiss.fulldup = 0; /* CSMA */
scc->kiss.waittime = 50; /* 500 ms */
scc->kiss.maxkeyup = 10; /* 10 s */
scc->kiss.mintime = 3; /* 3 s */
scc->kiss.idletime = 30; /* 30 s */
scc->kiss.maxdefer = 120; /* 2 min */
scc->kiss.softdcd = 0; /* hardware dcd */
} else {
scc->kiss.txdelay = 10; /* 100 ms */
scc->kiss.persist = 64; /* 25% persistence */ /* was 25 */
scc->kiss.slottime = 8; /* 160 ms */
scc->kiss.tailtime = 1; /* minimal reasonable value */
scc->kiss.fulldup = 0; /* CSMA */
scc->kiss.waittime = 50; /* 500 ms */
scc->kiss.maxkeyup = 7; /* 7 s */
scc->kiss.mintime = 3; /* 3 s */
scc->kiss.idletime = 30; /* 30 s */
scc->kiss.maxdefer = 120; /* 2 min */
scc->kiss.softdcd = 0; /* hardware dcd */
}
scc->tx_buff = NULL;
skb_queue_head_init(&scc->tx_queue);
scc->init = 1;
return 0;
}
return -EINVAL;
}
switch(cmd)
{
case SIOCSCCRESERVED:
return -ENOIOCTLCMD;
case SIOCSCCSMEM:
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
if (!arg || copy_from_user(&memcfg, arg, sizeof(memcfg)))
return -EINVAL;
scc->stat.bufsize = memcfg.bufsize;
return 0;
case SIOCSCCGSTAT:
if (!arg || copy_to_user(arg, &scc->stat, sizeof(scc->stat)))
return -EINVAL;
return 0;
case SIOCSCCGKISS:
if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
return -EINVAL;
kiss_cmd.param = scc_get_param(scc, kiss_cmd.command);
if (copy_to_user(arg, &kiss_cmd, sizeof(kiss_cmd)))
return -EINVAL;
return 0;
case SIOCSCCSKISS:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
if (!arg || copy_from_user(&kiss_cmd, arg, sizeof(kiss_cmd)))
return -EINVAL;
return scc_set_param(scc, kiss_cmd.command, kiss_cmd.param);
case SIOCSCCCAL:
if (!capable(CAP_SYS_RAWIO)) return -EPERM;
if (!arg || copy_from_user(&cal, arg, sizeof(cal)) || cal.time == 0)
return -EINVAL;
scc_start_calibrate(scc, cal.time, cal.pattern);
return 0;
default:
return -ENOIOCTLCMD;
}
return -EINVAL;
}
/* ----> set interface callsign <---- */
static int scc_net_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *) addr;
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
return 0;
}
/* ----> get statistics <---- */
static struct net_device_stats *scc_net_get_stats(struct net_device *dev)
{
struct scc_channel *scc = (struct scc_channel *) dev->ml_priv;
scc->dev_stat.rx_errors = scc->stat.rxerrs + scc->stat.rx_over;
scc->dev_stat.tx_errors = scc->stat.txerrs + scc->stat.tx_under;
scc->dev_stat.rx_fifo_errors = scc->stat.rx_over;
scc->dev_stat.tx_fifo_errors = scc->stat.tx_under;
return &scc->dev_stat;
}
/* ******************************************************************** */
/* * dump statistics to /proc/net/z8530drv * */
/* ******************************************************************** */
#ifdef CONFIG_PROC_FS
static inline struct scc_channel *scc_net_seq_idx(loff_t pos)
{
int k;
for (k = 0; k < Nchips*2; ++k) {
if (!SCC_Info[k].init)
continue;
if (pos-- == 0)
return &SCC_Info[k];
}
return NULL;
}
static void *scc_net_seq_start(struct seq_file *seq, loff_t *pos)
{
return *pos ? scc_net_seq_idx(*pos - 1) : SEQ_START_TOKEN;
}
static void *scc_net_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
unsigned k;
struct scc_channel *scc = v;
++*pos;
for (k = (v == SEQ_START_TOKEN) ? 0 : (scc - SCC_Info)+1;
k < Nchips*2; ++k) {
if (SCC_Info[k].init)
return &SCC_Info[k];
}
return NULL;
}
static void scc_net_seq_stop(struct seq_file *seq, void *v)
{
}
static int scc_net_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "z8530drv-"VERSION"\n");
} else if (!Driver_Initialized) {
seq_puts(seq, "not initialized\n");
} else if (!Nchips) {
seq_puts(seq, "chips missing\n");
} else {
const struct scc_channel *scc = v;
const struct scc_stat *stat = &scc->stat;
const struct scc_kiss *kiss = &scc->kiss;
/* dev data ctrl irq clock brand enh vector special option
* baud nrz clocksrc softdcd bufsize
* rxints txints exints spints
* rcvd rxerrs over / xmit txerrs under / nospace bufsize
* txd pers slot tail ful wait min maxk idl defr txof grp
* W ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
* R ## ## XX ## ## ## ## ## XX ## ## ## ## ## ## ##
*/
seq_printf(seq, "%s\t%3.3lx %3.3lx %d %lu %2.2x %d %3.3lx %3.3lx %d\n",
scc->dev->name,
scc->data, scc->ctrl, scc->irq, scc->clock, scc->brand,
scc->enhanced, Vector_Latch, scc->special,
scc->option);
seq_printf(seq, "\t%lu %d %d %d %d\n",
scc->modem.speed, scc->modem.nrz,
scc->modem.clocksrc, kiss->softdcd,
stat->bufsize);
seq_printf(seq, "\t%lu %lu %lu %lu\n",
stat->rxints, stat->txints, stat->exints, stat->spints);
seq_printf(seq, "\t%lu %lu %d / %lu %lu %d / %d %d\n",
stat->rxframes, stat->rxerrs, stat->rx_over,
stat->txframes, stat->txerrs, stat->tx_under,
stat->nospace, stat->tx_state);
#define K(x) kiss->x
seq_printf(seq, "\t%d %d %d %d %d %d %d %d %d %d %d %d\n",
K(txdelay), K(persist), K(slottime), K(tailtime),
K(fulldup), K(waittime), K(mintime), K(maxkeyup),
K(idletime), K(maxdefer), K(tx_inhibit), K(group));
#undef K
#ifdef SCC_DEBUG
{
int reg;
seq_printf(seq, "\tW ");
for (reg = 0; reg < 16; reg++)
seq_printf(seq, "%2.2x ", scc->wreg[reg]);
seq_printf(seq, "\n");
seq_printf(seq, "\tR %2.2x %2.2x XX ", InReg(scc->ctrl,R0), InReg(scc->ctrl,R1));
for (reg = 3; reg < 8; reg++)
seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
seq_printf(seq, "XX ");
for (reg = 9; reg < 16; reg++)
seq_printf(seq, "%2.2x ", InReg(scc->ctrl, reg));
seq_printf(seq, "\n");
}
#endif
seq_putc(seq, '\n');
}
return 0;
}
static const struct seq_operations scc_net_seq_ops = {
.start = scc_net_seq_start,
.next = scc_net_seq_next,
.stop = scc_net_seq_stop,
.show = scc_net_seq_show,
};
static int scc_net_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scc_net_seq_ops);
}
static const struct file_operations scc_net_seq_fops = {
.owner = THIS_MODULE,
.open = scc_net_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#endif /* CONFIG_PROC_FS */
/* ******************************************************************** */
/* * Init SCC driver * */
/* ******************************************************************** */
static int __init scc_init_driver (void)
{
char devname[IFNAMSIZ];
printk(banner);
sprintf(devname,"%s0", SCC_DriverName);
rtnl_lock();
if (scc_net_alloc(devname, SCC_Info)) {
rtnl_unlock();
printk(KERN_ERR "z8530drv: cannot initialize module\n");
return -EIO;
}
rtnl_unlock();
proc_net_fops_create(&init_net, "z8530drv", 0, &scc_net_seq_fops);
return 0;
}
static void __exit scc_cleanup_driver(void)
{
io_port ctrl;
int k;
struct scc_channel *scc;
struct net_device *dev;
if (Nchips == 0 && (dev = SCC_Info[0].dev))
{
unregister_netdev(dev);
free_netdev(dev);
}
/* Guard against chip prattle */
local_irq_disable();
for (k = 0; k < Nchips; k++)
if ( (ctrl = SCC_ctrl[k].chan_A) )
{
Outb(ctrl, 0);
OutReg(ctrl,R9,FHWRES); /* force hardware reset */
udelay(50);
}
/* To unload the port must be closed so no real IRQ pending */
for (k = 0; k < nr_irqs ; k++)
if (Ivec[k].used) free_irq(k, NULL);
local_irq_enable();
/* Now clean up */
for (k = 0; k < Nchips*2; k++)
{
scc = &SCC_Info[k];
if (scc->ctrl)
{
release_region(scc->ctrl, 1);
release_region(scc->data, 1);
}
if (scc->dev)
{
unregister_netdev(scc->dev);
free_netdev(scc->dev);
}
}
if (Vector_Latch)
release_region(Vector_Latch, 1);
proc_net_remove(&init_net, "z8530drv");
}
MODULE_AUTHOR("Joerg Reuter <jreuter@yaina.de>");
MODULE_DESCRIPTION("AX.25 Device Driver for Z8530 based HDLC cards");
MODULE_SUPPORTED_DEVICE("Z8530 based SCC cards for Amateur Radio");
MODULE_LICENSE("GPL");
module_init(scc_init_driver);
module_exit(scc_cleanup_driver);
| gpl-2.0 |
MoKee/android_kernel_cyanogen_msm8916-amss | net/dccp/ccids/lib/loss_interval.c | 4923 | 5653 | /*
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
* Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
* Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <net/sock.h>
#include "tfrc.h"
static struct kmem_cache *tfrc_lh_slab __read_mostly;
/* Loss Interval weights from [RFC 3448, 5.4], scaled by 10 */
static const int tfrc_lh_weights[NINTERVAL] = { 10, 10, 10, 10, 8, 6, 4, 2 };
/* implements LIFO semantics on the array */
static inline u8 LIH_INDEX(const u8 ctr)
{
return LIH_SIZE - 1 - (ctr % LIH_SIZE);
}
/* the `counter' index always points at the next entry to be populated */
static inline struct tfrc_loss_interval *tfrc_lh_peek(struct tfrc_loss_hist *lh)
{
return lh->counter ? lh->ring[LIH_INDEX(lh->counter - 1)] : NULL;
}
/* given i with 0 <= i <= k, return I_i as per the rfc3448bis notation */
static inline u32 tfrc_lh_get_interval(struct tfrc_loss_hist *lh, const u8 i)
{
BUG_ON(i >= lh->counter);
return lh->ring[LIH_INDEX(lh->counter - i - 1)]->li_length;
}
/*
* On-demand allocation and de-allocation of entries
*/
static struct tfrc_loss_interval *tfrc_lh_demand_next(struct tfrc_loss_hist *lh)
{
if (lh->ring[LIH_INDEX(lh->counter)] == NULL)
lh->ring[LIH_INDEX(lh->counter)] = kmem_cache_alloc(tfrc_lh_slab,
GFP_ATOMIC);
return lh->ring[LIH_INDEX(lh->counter)];
}
void tfrc_lh_cleanup(struct tfrc_loss_hist *lh)
{
if (!tfrc_lh_is_initialised(lh))
return;
for (lh->counter = 0; lh->counter < LIH_SIZE; lh->counter++)
if (lh->ring[LIH_INDEX(lh->counter)] != NULL) {
kmem_cache_free(tfrc_lh_slab,
lh->ring[LIH_INDEX(lh->counter)]);
lh->ring[LIH_INDEX(lh->counter)] = NULL;
}
}
static void tfrc_lh_calc_i_mean(struct tfrc_loss_hist *lh)
{
u32 i_i, i_tot0 = 0, i_tot1 = 0, w_tot = 0;
int i, k = tfrc_lh_length(lh) - 1; /* k is as in rfc3448bis, 5.4 */
if (k <= 0)
return;
for (i = 0; i <= k; i++) {
i_i = tfrc_lh_get_interval(lh, i);
if (i < k) {
i_tot0 += i_i * tfrc_lh_weights[i];
w_tot += tfrc_lh_weights[i];
}
if (i > 0)
i_tot1 += i_i * tfrc_lh_weights[i-1];
}
lh->i_mean = max(i_tot0, i_tot1) / w_tot;
}
/**
* tfrc_lh_update_i_mean - Update the `open' loss interval I_0
* For recomputing p: returns `true' if p > p_prev <=> 1/p < 1/p_prev
*/
u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
{
struct tfrc_loss_interval *cur = tfrc_lh_peek(lh);
u32 old_i_mean = lh->i_mean;
s64 len;
if (cur == NULL) /* not initialised */
return 0;
len = dccp_delta_seqno(cur->li_seqno, DCCP_SKB_CB(skb)->dccpd_seq) + 1;
if (len - (s64)cur->li_length <= 0) /* duplicate or reordered */
return 0;
if (SUB16(dccp_hdr(skb)->dccph_ccval, cur->li_ccval) > 4)
/*
* Implements RFC 4342, 10.2:
* If a packet S (skb) exists whose seqno comes `after' the one
* starting the current loss interval (cur) and if the modulo-16
* distance from C(cur) to C(S) is greater than 4, consider all
* subsequent packets as belonging to a new loss interval. This
* test is necessary since CCVal may wrap between intervals.
*/
cur->li_is_closed = 1;
if (tfrc_lh_length(lh) == 1) /* due to RFC 3448, 6.3.1 */
return 0;
cur->li_length = len;
tfrc_lh_calc_i_mean(lh);
return lh->i_mean < old_i_mean;
}
/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */
static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
struct tfrc_rx_hist_entry *new_loss)
{
return dccp_delta_seqno(cur->li_seqno, new_loss->tfrchrx_seqno) > 0 &&
(cur->li_is_closed || SUB16(new_loss->tfrchrx_ccval, cur->li_ccval) > 4);
}
/**
* tfrc_lh_interval_add - Insert new record into the Loss Interval database
* @lh: Loss Interval database
* @rh: Receive history containing a fresh loss event
* @calc_first_li: Caller-dependent routine to compute length of first interval
* @sk: Used by @calc_first_li in caller-specific way (subtyping)
*
* Updates I_mean and returns 1 if a new interval has in fact been added to @lh.
*/
int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
u32 (*calc_first_li)(struct sock *), struct sock *sk)
{
struct tfrc_loss_interval *cur = tfrc_lh_peek(lh), *new;
if (cur != NULL && !tfrc_lh_is_new_loss(cur, tfrc_rx_hist_loss_prev(rh)))
return 0;
new = tfrc_lh_demand_next(lh);
if (unlikely(new == NULL)) {
DCCP_CRIT("Cannot allocate/add loss record.");
return 0;
}
new->li_seqno = tfrc_rx_hist_loss_prev(rh)->tfrchrx_seqno;
new->li_ccval = tfrc_rx_hist_loss_prev(rh)->tfrchrx_ccval;
new->li_is_closed = 0;
if (++lh->counter == 1)
lh->i_mean = new->li_length = (*calc_first_li)(sk);
else {
cur->li_length = dccp_delta_seqno(cur->li_seqno, new->li_seqno);
new->li_length = dccp_delta_seqno(new->li_seqno,
tfrc_rx_hist_last_rcv(rh)->tfrchrx_seqno) + 1;
if (lh->counter > (2*LIH_SIZE))
lh->counter -= LIH_SIZE;
tfrc_lh_calc_i_mean(lh);
}
return 1;
}
int __init tfrc_li_init(void)
{
tfrc_lh_slab = kmem_cache_create("tfrc_li_hist",
sizeof(struct tfrc_loss_interval), 0,
SLAB_HWCACHE_ALIGN, NULL);
return tfrc_lh_slab == NULL ? -ENOBUFS : 0;
}
void tfrc_li_exit(void)
{
if (tfrc_lh_slab != NULL) {
kmem_cache_destroy(tfrc_lh_slab);
tfrc_lh_slab = NULL;
}
}
| gpl-2.0 |
intervigilium/android_kernel_samsung_klte | drivers/watchdog/softdog.c | 7227 | 5343 | /*
* SoftDog: A Software Watchdog Device
*
* (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
* warranty for any of this software. This material is provided
* "AS-IS" and at no charge.
*
* (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Software only watchdog driver. Unlike its big brother the WDT501P
* driver this won't always recover a failed machine.
*
* 03/96: Angelo Haritsis <ah@doc.ic.ac.uk> :
* Modularised.
* Added soft_margin; use upon insmod to change the timer delay.
* NB: uses same minor as wdt (WATCHDOG_MINOR); we could use separate
* minors.
*
* 19980911 Alan Cox
* Made SMP safe for 2.3.x
*
* 20011127 Joel Becker (jlbec@evilplan.org>
* Added soft_noboot; Allows testing the softdog trigger without
* requiring a recompile.
* Added WDIOC_GETTIMEOUT and WDIOC_SETTIMOUT.
*
* 20020530 Joel Becker <joel.becker@oracle.com>
* Added Matt Domsch's nowayout module option.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#define TIMER_MARGIN 60 /* Default is 60 seconds */
static unsigned int soft_margin = TIMER_MARGIN; /* in seconds */
module_param(soft_margin, uint, 0);
MODULE_PARM_DESC(soft_margin,
"Watchdog soft_margin in seconds. (0 < soft_margin < 65536, default="
__MODULE_STRING(TIMER_MARGIN) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static int soft_noboot = 0;
module_param(soft_noboot, int, 0);
MODULE_PARM_DESC(soft_noboot,
"Softdog action, set to 1 to ignore reboots, 0 to reboot (default=0)");
static int soft_panic;
module_param(soft_panic, int, 0);
MODULE_PARM_DESC(soft_panic,
"Softdog action, set to 1 to panic, 0 to reboot (default=0)");
/*
* Our timer
*/
static void watchdog_fire(unsigned long);
static struct timer_list watchdog_ticktock =
TIMER_INITIALIZER(watchdog_fire, 0, 0);
/*
* If the timer expires..
*/
static void watchdog_fire(unsigned long data)
{
if (soft_noboot)
pr_crit("Triggered - Reboot ignored\n");
else if (soft_panic) {
pr_crit("Initiating panic\n");
panic("Software Watchdog Timer expired");
} else {
pr_crit("Initiating system reboot\n");
emergency_restart();
pr_crit("Reboot didn't ?????\n");
}
}
/*
* Softdog operations
*/
static int softdog_ping(struct watchdog_device *w)
{
mod_timer(&watchdog_ticktock, jiffies+(w->timeout*HZ));
return 0;
}
static int softdog_stop(struct watchdog_device *w)
{
del_timer(&watchdog_ticktock);
return 0;
}
static int softdog_set_timeout(struct watchdog_device *w, unsigned int t)
{
w->timeout = t;
return 0;
}
/*
* Notifier for system down
*/
static int softdog_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
/* Turn the WDT off */
softdog_stop(NULL);
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static struct notifier_block softdog_notifier = {
.notifier_call = softdog_notify_sys,
};
static struct watchdog_info softdog_info = {
.identity = "Software Watchdog",
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
};
static struct watchdog_ops softdog_ops = {
.owner = THIS_MODULE,
.start = softdog_ping,
.stop = softdog_stop,
.ping = softdog_ping,
.set_timeout = softdog_set_timeout,
};
static struct watchdog_device softdog_dev = {
.info = &softdog_info,
.ops = &softdog_ops,
.min_timeout = 1,
.max_timeout = 0xFFFF
};
static int __init watchdog_init(void)
{
int ret;
/* Check that the soft_margin value is within it's range;
if not reset to the default */
if (soft_margin < 1 || soft_margin > 65535) {
pr_info("soft_margin must be 0 < soft_margin < 65536, using %d\n",
TIMER_MARGIN);
return -EINVAL;
}
softdog_dev.timeout = soft_margin;
watchdog_set_nowayout(&softdog_dev, nowayout);
ret = register_reboot_notifier(&softdog_notifier);
if (ret) {
pr_err("cannot register reboot notifier (err=%d)\n", ret);
return ret;
}
ret = watchdog_register_device(&softdog_dev);
if (ret) {
unregister_reboot_notifier(&softdog_notifier);
return ret;
}
pr_info("Software Watchdog Timer: 0.08 initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n",
soft_noboot, soft_margin, soft_panic, nowayout);
return 0;
}
static void __exit watchdog_exit(void)
{
watchdog_unregister_device(&softdog_dev);
unregister_reboot_notifier(&softdog_notifier);
}
module_init(watchdog_init);
module_exit(watchdog_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("Software Watchdog Device Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
| gpl-2.0 |
EloYGomeZ/kernel_huawei_msm8610 | sound/soc/blackfin/bf5xx-ad193x.c | 7227 | 4011 | /*
* File: sound/soc/blackfin/bf5xx-ad193x.c
* Author: Barry Song <Barry.Song@analog.com>
*
* Created: Thur June 4 2009
* Description: Board driver for ad193x sound chip
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/portmux.h>
#include "../codecs/ad193x.h"
#include "bf5xx-tdm-pcm.h"
#include "bf5xx-tdm.h"
static struct snd_soc_card bf5xx_ad193x;
static int bf5xx_ad193x_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
unsigned int clk = 0;
unsigned int channel_map[] = {0, 1, 2, 3, 4, 5, 6, 7};
int ret = 0;
switch (params_rate(params)) {
case 48000:
clk = 24576000;
break;
}
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
/* set codec DAI slots, 8 channels, all channels are enabled */
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xFF, 0xFF, 8, 32);
if (ret < 0)
return ret;
/* set cpu DAI channel mapping */
ret = snd_soc_dai_set_channel_map(cpu_dai, ARRAY_SIZE(channel_map),
channel_map, ARRAY_SIZE(channel_map), channel_map);
if (ret < 0)
return ret;
return 0;
}
#define BF5XX_AD193X_DAIFMT (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_IF | \
SND_SOC_DAIFMT_CBM_CFM)
static struct snd_soc_ops bf5xx_ad193x_ops = {
.hw_params = bf5xx_ad193x_hw_params,
};
static struct snd_soc_dai_link bf5xx_ad193x_dai[] = {
{
.name = "ad193x",
.stream_name = "AD193X",
.cpu_dai_name = "bfin-tdm.0",
.codec_dai_name ="ad193x-hifi",
.platform_name = "bfin-tdm-pcm-audio",
.codec_name = "spi0.5",
.ops = &bf5xx_ad193x_ops,
.dai_fmt = BF5XX_AD193X_DAIFMT,
},
{
.name = "ad193x",
.stream_name = "AD193X",
.cpu_dai_name = "bfin-tdm.1",
.codec_dai_name ="ad193x-hifi",
.platform_name = "bfin-tdm-pcm-audio",
.codec_name = "spi0.5",
.ops = &bf5xx_ad193x_ops,
.dai_fmt = BF5XX_AD193X_DAIFMT,
},
};
static struct snd_soc_card bf5xx_ad193x = {
.name = "bfin-ad193x",
.owner = THIS_MODULE,
.dai_link = &bf5xx_ad193x_dai[CONFIG_SND_BF5XX_SPORT_NUM],
.num_links = 1,
};
static struct platform_device *bfxx_ad193x_snd_device;
static int __init bf5xx_ad193x_init(void)
{
int ret;
bfxx_ad193x_snd_device = platform_device_alloc("soc-audio", -1);
if (!bfxx_ad193x_snd_device)
return -ENOMEM;
platform_set_drvdata(bfxx_ad193x_snd_device, &bf5xx_ad193x);
ret = platform_device_add(bfxx_ad193x_snd_device);
if (ret)
platform_device_put(bfxx_ad193x_snd_device);
return ret;
}
static void __exit bf5xx_ad193x_exit(void)
{
platform_device_unregister(bfxx_ad193x_snd_device);
}
module_init(bf5xx_ad193x_init);
module_exit(bf5xx_ad193x_exit);
/* Module information */
MODULE_AUTHOR("Barry Song");
MODULE_DESCRIPTION("ALSA SoC AD193X board driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jledet/linux | drivers/mtd/chips/chipreg.c | 9787 | 2346 | /*
* Registration for chip drivers
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
static DEFINE_SPINLOCK(chip_drvs_lock);
static LIST_HEAD(chip_drvs_list);
void register_mtd_chip_driver(struct mtd_chip_driver *drv)
{
spin_lock(&chip_drvs_lock);
list_add(&drv->list, &chip_drvs_list);
spin_unlock(&chip_drvs_lock);
}
void unregister_mtd_chip_driver(struct mtd_chip_driver *drv)
{
spin_lock(&chip_drvs_lock);
list_del(&drv->list);
spin_unlock(&chip_drvs_lock);
}
static struct mtd_chip_driver *get_mtd_chip_driver (const char *name)
{
struct list_head *pos;
struct mtd_chip_driver *ret = NULL, *this;
spin_lock(&chip_drvs_lock);
list_for_each(pos, &chip_drvs_list) {
this = list_entry(pos, typeof(*this), list);
if (!strcmp(this->name, name)) {
ret = this;
break;
}
}
if (ret && !try_module_get(ret->module))
ret = NULL;
spin_unlock(&chip_drvs_lock);
return ret;
}
/* Hide all the horrid details, like some silly person taking
get_module_symbol() away from us, from the caller. */
struct mtd_info *do_map_probe(const char *name, struct map_info *map)
{
struct mtd_chip_driver *drv;
struct mtd_info *ret;
drv = get_mtd_chip_driver(name);
if (!drv && !request_module("%s", name))
drv = get_mtd_chip_driver(name);
if (!drv)
return NULL;
ret = drv->probe(map);
/* We decrease the use count here. It may have been a
probe-only module, which is no longer required from this
point, having given us a handle on (and increased the use
count of) the actual driver code.
*/
module_put(drv->module);
return ret;
}
/*
* Destroy an MTD device which was created for a map device.
* Make sure the MTD device is already unregistered before calling this
*/
void map_destroy(struct mtd_info *mtd)
{
struct map_info *map = mtd->priv;
if (map->fldrv->destroy)
map->fldrv->destroy(mtd);
module_put(map->fldrv->module);
kfree(mtd);
}
EXPORT_SYMBOL(register_mtd_chip_driver);
EXPORT_SYMBOL(unregister_mtd_chip_driver);
EXPORT_SYMBOL(do_map_probe);
EXPORT_SYMBOL(map_destroy);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_DESCRIPTION("Core routines for registering and invoking MTD chip drivers");
| gpl-2.0 |
SlimLG2/msm8974_caf_G2 | drivers/broadcast/tdmb/fc8080/src/tdmb_tunerbbdrv_fc8080.c | 60 | 42094 | /*****************************************************************************
Copyright(c) 2009 LG Electronics Inc. All Rights Reserved
File name : Tunerbb_drv_fc8080.c
Description : fc8080 made by FCI Driver Code
History :
----------------------------------------------------------------------
Dec. 25, 2009 : FCI release for LG MC
*******************************************************************************/
#include "../inc/fci_tun.h"
#include "../inc/fc8080_regs.h"
#include "../inc/fc8080_demux.h"
#include "../inc/fc8080_isr.h"
#include "../inc/bbm.h"
#include "../inc/fci_types.h"
#include "../inc/tdmb_tunerbbdrv_fc8080def.h"
#include "../inc/fci_oal.h"
#include "../inc/broadcast_fc8080.h"
#include <linux/string.h>
/*============================================================
** 1. DEFINITIONS
*============================================================*/
/* Example of Return value */
#define FC8080_RESULT_ERROR (int8) 0
#define FC8080_RESULT_SUCCESS (int8) 1
#undef FEATURE_FIC_BER
#undef FEATURE_RSSI_DEBUG
// LGE ADD
#define FREQ_SEARCH_IN_TABLE /* Freq conversion in Table Searching */
#define CH_LOW_NUM 71 /* 7A index 71 for UI */
#define CH_UPPER_NUM 133 /* 13C index 131 for UI*/
#ifdef FREQ_SEARCH_IN_TABLE
#define MAX_KOREABAND_FULL_CHANNEL 21
#define INDEX_KOR_CH_NUM_DEC 0
#define INDEX_KOR_FREQ_NUM 1
#else
#define C7A_CEN_FREQ 175280 /* 7A Center Frequency */
#define ENS_GAP_FREQ 6000 /* Frequency Gap-Interval between Other Ensemble */
#define CH_GAP_FREQ 1728 /* Channel Center frequency interval between channel number */
#define TDMB_ENS_NUM 7 /* Korea TDMB Ensemble Number 7 ~ 13 */
#endif
// LGE ADD
#define MAX_MSC_BER 20000
#define MAX_VA_BER 20000
#define DMB_SVC_ID 0
#define DAB_SVC_ID 2
#define DAT_SVC_ID 1
#define FEATURE_GET_FIC_POLLING
/* change memcpy mscBuffer -> msc_data -> buffer to mscBuffer->buffer */
#define NOT_MSCDATA_MULTIPLE_MEMCPY
/* LGE_ADD_S, [hyun118.shin@lge.com], TDMB Antenna Leveling */
#define START_SYNC_CNT 3
#define MAX_ANT_BUFF_CNT 2
/* LGE_ADD_E, [hyun118.shin@lge.com], TDMB Antenna Leveling */
uint32 tp_total_cnt=0;
/* -----------------------------------------------------------------------
< HOST Interface FEATURE Usage>
1. STREAM_TS_UPLOAD : HOST Interface between MSM and FC8080 is TSIF(I2C)
2. STREAM_SLAVE_PARALLEL_UPLOAD : HOST Interface between MSM and FC8080 is EBI2
3. STREAM_SPI_UPLOAD : HOST Interface between MSM and FC8080 is SPI
------------------------------------------------------------------------- */
/*============================================================
** 2. External Variables
*============================================================*/
/*============================================================
** 3. External Functions
*============================================================*/
#ifdef STREAM_SLAVE_PARALLEL_UPLOAD
boolean send_fic_int_sig_isr2task(void);
#endif
extern int tunerbb_drv_fc8080_fic_cb(uint32 userdata, uint8 *data, int length);
extern int tunerbb_drv_fc8080_msc_cb(uint32 userdata, uint8 subChId, uint8 *data, int length);
/*============================================================
** 4. Local constant variables
*============================================================*/
/*============================================================
** 5. Local Typedef
*============================================================*/
typedef struct _BUFFR_TAG
{
uint8 valid;
uint32 address;
uint32 length;
uint8 subch_id;
}DATA_BUFFER;
typedef struct _HEADER_TYPE
{
uint16 length;
uint8 subch_id;
uint8 svc_id;
}FCI_HEADER_TYPE;
/*============================================================
** 6. Global Variables
*============================================================*/
fc8080_service_type serviceType[FC8080_SERVICE_MAX];
fci_u8 tot_subch_cnt=0;
#if !defined(STREAM_TS_UPLOAD)
DATA_BUFFER msc_buffer;
DATA_BUFFER fic_buffer;
fci_u8 g_chinfo[64];
#ifdef NOT_MSCDATA_MULTIPLE_MEMCPY
fci_u8* msc_data = NULL;
#else
fci_u8 msc_data[188*8*8];
#endif
fci_u8 msc_multi_data[188*8*8];
#endif
/*============================================================
** 7. Static Variables
*============================================================*/
//LGE ADD
#ifdef FREQ_SEARCH_IN_TABLE
static int32 gKOREnsembleFullFreqTbl[MAX_KOREABAND_FULL_CHANNEL][2] =
{
{71,175280},{72,177008},{73,178736},{81,181280},{82,183008},{83,184736},
{91,187280},{92,189008},{93,190736},{101,193280},{102,195008},{103,196736},
{111,199280},{112,201008},{113,202736},{121,205280},{122,207008},{123,208736}
,{131,211280},{132,213008},{133,214736}
};
#endif
static uint16 is_tdmb_probe = 0;
//static uint16 data_sequence_count = 0;
/* LGE_ADD_S, [hyun118.shin@lge.com], TDMB Antenna Leveling */
static uint32 antBuffIdx = 0;
static uint16 antBuff[MAX_ANT_BUFF_CNT] = {0, };
static uint8 calAntLevel = 0;
static uint8 syncLockCnt = 0;
/* LGE_ADD_E, [hyun118.shin@lge.com], TDMB Antenna Leveling */
/*============================================================
** 8. Local Function Prototype
*============================================================*/
static int32 tunerbb_drv_convert_chnum_to_freq(uint32 ch_num);
static uint32 tunerbb_drv_fc8080_get_viterbi_ber(void);
static int8 tunerbb_drv_fc8080_get_sync_status(void);
//static uint32 tunerbb_drv_fc8080_get_rs_ber(void);
static int8 tunerbb_drv_fc8080_check_overrun(uint8 op_mode);
/* LGE_ADD_S, [hyun118.shin@lge.com], TDMB Antenna Leveling */
static void tunerbb_drv_fc8080_init_antlevel_val(void);
/* LGE_ADD_E, [hyun118.shin@lge.com], TDMB Antenna Leveling */
void tunerbb_drv_fc8080_isr_control(fci_u8 onoff);
#ifdef FEATURE_RSSI_DEBUG
void tunerbb_drv_fc8080_get_dm(fci_u32 *mscber, fci_u32 *tp_err, fci_u16 *tpcnt, fci_u32 *vaber, fci_s8 *rssi);
#else
void tunerbb_drv_fc8080_get_dm(fci_u32 *mscber, fci_u32 *tp_err, fci_u16 *tpcnt, fci_u32 *vaber);
#endif
int8 tunerbb_drv_fc8080_power_on(void)
{
return tdmb_fc8080_power_on();
}
int8 tunerbb_drv_fc8080_power_off(void)
{
is_tdmb_probe = 0;
return tdmb_fc8080_power_off();
}
int8 tunerbb_drv_fc8080_select_antenna(unsigned int sel)
{
return tdmb_fc8080_select_antenna(sel);
}
int8 tunerbb_drv_fc8080_reset_ch(void)
{
tunerbb_drv_fc8080_stop();
return FC8080_RESULT_SUCCESS;
}
int8 tunerbb_drv_fc8080_re_syncdetector(uint8 op_mode)
{
return FC8080_RESULT_SUCCESS;
}
int8 tunerbb_drv_fc8080_set_channel(int32 freq_num, uint8 subch_id, uint8 op_mode)
{
int8 ret_val;
/* LGE_ADD_S, [hyun118.shin@lge.com], TDMB Antenna Leveling */
tunerbb_drv_fc8080_init_antlevel_val();
/*LGE_ADD_E, [hyun118.shin@lge.com], TDMB Antenna Leveling */
ret_val = tunerbb_drv_fc8080_multi_set_channel(freq_num, 1, &subch_id, &op_mode);
return ret_val;
}
void tunerbb_drv_fc8080_set_userstop(int mode)
{
tdmb_fc8080_set_userstop(mode);
}
int tunerbb_drv_fc8080_is_on(void)
{
return tdmb_fc8080_tdmb_is_on();
}
// LGE ADD
static int32 tunerbb_drv_convert_chnum_to_freq(uint32 ch_num)
{
#ifdef FREQ_SEARCH_IN_TABLE
int32 loop;
int32 current_idx = 0;
#else
uint32 ensemble_idx = (ch_num/10-TDMB_ENS_NUM);
uint32 subch_idx =(ch_num%10 -1);
#endif
if((ch_num < CH_LOW_NUM ) || (ch_num > CH_UPPER_NUM))
{
return 0;
}
#ifdef FREQ_SEARCH_IN_TABLE
for(loop = 0; loop < MAX_KOREABAND_FULL_CHANNEL; loop ++)
{
if(gKOREnsembleFullFreqTbl[loop][INDEX_KOR_CH_NUM_DEC] == (int32)ch_num)
{
current_idx = loop;
break;
}
}
if(loop >= MAX_KOREABAND_FULL_CHANNEL)
{
return 0;
}
return (gKOREnsembleFullFreqTbl[current_idx][INDEX_KOR_FREQ_NUM]);
#else
return ((C7A_CEN_FREQ + ENS_GAP_FREQ*ensemble_idx) + (CH_GAP_FREQ *subch_idx ));
#endif
}
// LGE ADD
#if !defined(STREAM_TS_UPLOAD)
/*=======================================================
Function : tunerbb_drv_fc8080_fic_cb
Description : set fic data param after ISR process
Parameter :
uint32 userdata : Not Used
uint8 *data : fic buffer address
int length : fic data length
Return Value :
SUCCESS : 1
FAIL : 0 or negative interger (If there is error code)
when model who edit history
-------------------------------------------------------
2010/08/17 MOBIT somesoo Code review
======================================================== */
int tunerbb_drv_fc8080_fic_cb(uint32 userdata, uint8 *data, int length)
{
fic_buffer.address = (uint32)(data);
fic_buffer.length = length;
fic_buffer.valid = 1;
// FC8000 °ü·Ã codeÀÎ send_fic_int_sig_isr2task() in mbs_dshmain.c¸¦ »©´Ù º¸´Ï, ÇöÀç´Â polling ¹æ½ÄÀ̳ª ÇâÈÄ ISR¹æ½ÄÀ¸·Î Àû¿ë½Ã ÇÊ¿äÇϹǷΠfeature¸¦ Ãß°¡ÇÔ
#ifndef FEATURE_GET_FIC_POLLING
send_fic_int_sig_isr2task();
#endif // FEATURE_GET_FIC_POLLING
return FC8080_RESULT_SUCCESS;
}
/*--------------------------------------------------------------------------
int tunerbb_drv_fc8080_msc_cb(uint32 userdata, uint8 subChId, uint8 *data, int length)
(1) set msc data param after ISR process.
(2) Return Value
SUCCESS : 1
FAIL : 0 or negative interger (If there is error code)
(3) Argument
uint32 userdata : Not Used
uint8 subChId : Subchannel ID
uint8 *data : msc buffer address
int length : msc data length
---------------------------------------------------------------------------- */
int tunerbb_drv_fc8080_msc_cb(uint32 userdata, uint8 subChId, uint8 *data, int length)
{
TDMB_BB_HEADER_TYPE dmb_header;
uint16 head_size = 0;
dmb_header.data_type = (serviceType[0] == FC8080_DAB?TDMB_BB_DATA_DAB:TDMB_BB_DATA_TS);
dmb_header.size = length;
dmb_header.subch_id = subChId;
dmb_header.reserved = 0;//data_sequence_count++;//0xDEAD;
dmb_header.ack_bit = 0;
head_size = sizeof(TDMB_BB_HEADER_TYPE);
/* TEST FOR AV Check 110407 */
//printk("tunerbb_drv_fc8080_msc_cb data0[0x%x] data1[0x%x] data2[0x%x] data3[0x%x] \n", *(data), *(data+1), *(data+2), *(data+3));
memcpy(&msc_data[0/*msc_buffer.length*/], &dmb_header, sizeof(TDMB_BB_HEADER_TYPE));
memcpy(&msc_data[head_size], data, length);
msc_buffer.length = head_size + length;
msc_buffer.valid=1;
return FC8080_RESULT_SUCCESS;
}
#endif
/*=======================================================
Function : tunerbb_drv_fc8080_init
Description : Initializing the FC8080 Chip after power on
Parameter : VOID
Return Value :
SUCCESS : 1
FAIL : 0 or negative interger (If there is error code)
when model who edit history
-------------------------------------------------------
2010/05/17 MOBIT prajuna EBI2 configuration
2010/05/31 MOBIT prajuna Removed test code
2010/06/09 MOBIT prajuna TDMB porting(KB3 Rev. A patch)
2010/07/15 MOBIT prajuna TDMB tuning for QSC
2010/07/16 MOBIT somesoo TDMB tuning for QSC with FCI ÃÖ±Ô¿ø °úÀå
2010/07/17 MOBIT somesoo TDMB porting(VG)
2010/08/19 MOBIT prajuna Code review
2010/09/10 MOBIT prajuna TDMB porting(Aloe)
======================================================== */
int8 tunerbb_drv_fc8080_init(void)
{
uint8 res;
/*test*/
/*
uint16 i;
uint32 wdata = 0;
uint32 ldata = 0;
uint32 data = 0;
uint32 temp = 0;
*/
/* Common Code */
#if defined(STREAM_SLAVE_PARALLEL_UPLOAD)
/* EBI2 Specific Code */
bbm_com_hostif_select(NULL, BBM_PPI);
#elif defined(STREAM_TS_UPLOAD)
/* TSIF Specific Code */
bbm_com_hostif_select(NULL, BBM_I2C);
#elif defined(STREAM_SPI_UPLOAD)
/* SPI Specific. Code */
bbm_com_hostif_select(NULL, BBM_SPI);
#else
#error code not present
#endif
#if !defined(STREAM_TS_UPLOAD)
bbm_com_fic_callback_register((fci_u32)NULL, tunerbb_drv_fc8080_fic_cb);
bbm_com_msc_callback_register((fci_u32)NULL, tunerbb_drv_fc8080_msc_cb);
#endif
res = bbm_com_probe(NULL);
#ifdef FEATURE_POWER_ON_RETRY
if(res)
res = tdmb_fc8080_power_on_retry();
#endif
res |= bbm_com_init(NULL);
if(res)
{
is_tdmb_probe = 0;
printk("fc8080 chip id read error , so is_tdmb_probe = %d\n", is_tdmb_probe);
return FC8080_RESULT_ERROR;
}
else
{
#if !defined(STREAM_TS_UPLOAD)
memset((void*)&g_chinfo, 0xff, sizeof(g_chinfo));
memset((void*)&msc_buffer, 0x00, sizeof(DATA_BUFFER));
memset((void*)&fic_buffer, 0x00, sizeof(DATA_BUFFER));
#endif
}
is_tdmb_probe = 1;
res = bbm_com_tuner_select(0, FC8080_TUNER, BAND3_TYPE);
#if 0 //fc8080 <-> Host(MSM) °£ÀÇ Interface TEST¸¦ À§ÇÑ code
/* test */
for(i=0;i<5000;i++)
{
// dog_kick();
bbm_com_write(NULL, 0xa4, i & 0xff);
bbm_com_read(NULL, 0xa4, &data);
if((i & 0xff) != data)
printk("FC8080 byte test (0x%x,0x%x)\n", i & 0xff, data);
}
for(i=0;i<5000;i++)
{
bbm_com_word_write(NULL, 0xa4, i & 0xffff);
bbm_com_word_read(NULL, 0xa4, &wdata);
if((i & 0xffff) != wdata)
printk("FC8080 word test (0x%x,0x%x)\n", i & 0xffff, wdata);
}
for(i=0;i<5000;i++)
{
bbm_com_long_write(NULL, 0xa4, i & 0xffffffff);
bbm_com_long_read(NULL, 0xa4, &ldata);
if((i & 0xffffffff) != ldata)
printk("FC8080 long test (0x%x,0x%x)\n", i & 0xffffffff, ldata);
}
data = 0;
for(i=0;i<5000;i++)
{
temp = i&0xff;
bbm_com_tuner_write(NULL, 0x58, 0x01, &temp, 0x01);
bbm_com_tuner_read(NULL, 0x58, 0x01, &data, 0x01);
if((i & 0xff) != data)
printk("FC8080 tuner test (0x%x,0x%x)\n", i & 0xff, data);
}
/* test */
#endif
if(res)
{
printk("[FC8080] BBM_TUNER_SELECT Error = (%d)\n", res);
return FC8080_RESULT_ERROR;
}
else
return FC8080_RESULT_SUCCESS;
}
/*--------------------------------------------------------------------------
int8 tunrbb_drv_fc8080_stop(void)
(1) Stopping the FC8080 Chip Operation
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
VOID
---------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_stop(void)
{
uint8 res;
res=bbm_com_audio_deselect(0, 0, DAB_SVC_ID);
res|=bbm_com_video_deselect(0, 0, DMB_SVC_ID, 0);
res|=bbm_com_data_deselect(0, 0, DAT_SVC_ID);
ms_must_wait(60);
#if !defined(STREAM_TS_UPLOAD)
memset((void*)&g_chinfo, 0xff, sizeof(g_chinfo));
memset((void*)&msc_buffer, 0x00, sizeof(DATA_BUFFER));
memset((void*)&fic_buffer, 0x00, sizeof(DATA_BUFFER));
#endif
if(res)
return FC8080_RESULT_ERROR;
else
return FC8080_RESULT_SUCCESS;
}
/*--------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_control_fic(void)
(1) fic interrupt control on/off
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
enable : on/off
---------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_control_fic(uint8 enable)
{
unsigned short mask;
bbm_com_word_read(NULL, BBM_BUF_INT, &mask);
if(enable ==1)
mask |= 0x100;
else
mask &= ~0x100;
bbm_com_word_write(NULL, BBM_BUF_INT, mask);
return FC8080_RESULT_SUCCESS;
}
#if 0
static fci_u16 tunerbb_drv_fc8080_rserror_count(fci_u16 *nframe)//for dummy channel.
{
fci_u16 rt_nframe, rt_rserror;
bbm_com_write(NULL, 0xe01, 0x0f);
bbm_com_word_read(NULL, 0xe30, &rt_nframe);
bbm_com_word_read(NULL, 0xe32, &rt_rserror);
*nframe = rt_nframe; //½Ç½Ã°£À¸·Î count µÇ´Â frame ¼ö
return rt_rserror;
}
#endif
/*--------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_get_bbinfo(tdmb_status_rsp_type* dmb_bb_info)
(1) Getting the RF/BB Information
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
tdmb_status_rsp_type* dmb_bb_info (IN/OUT)
typedef struct tdmb_status_rsp_status
{
uint32 dab_ok;
uint32 msc_ber;
uint32 sync_lock;
uint32 afc_ok;
uint32 cir;
uint32 fic_ber;
uint32 tp_lock;
uint32 sch_ber;
uint32 tp_err_cnt;
uint32 va_ber;
byte srv_state_flag;
};
These paramters are dependent on Information supplied by Device.
---------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_get_ber(struct broadcast_tdmb_sig_info *dmb_bb_info)
{
uint8 sync_status;
uint32 tp_err_cnt=0;
#ifdef FEATURE_RSSI_DEBUG
int8 rssi;
#endif
uint16 nframe = 0;
/* LGE_ADD_S, [hyun118.shin@lge.com], TDMB Antenna Leveling */
uint8 loop;
uint16 antTable[4][2] =
{
{4, 6000},
{3, 8000},
{2, 9000},
{1, 12000},
};
uint16 avgBER = 0;
uint8 refAntLevel = 0;
/* LGE_ADD_E, [hyun118.shin@lge.com], TDMB Antenna Leveling */
if(is_tdmb_probe == 0)
{
dmb_bb_info->msc_ber = 20000;
dmb_bb_info->tp_err_cnt = 255;
printk("is_tdmb_probe 0. so msc_ber is 20000, tp_err_cnt = 255. \n");
return FC8080_RESULT_SUCCESS;
}
tunerbb_drv_fc8080_check_overrun(serviceType[0]);
#ifdef FEATURE_RSSI_DEBUG
tunerbb_drv_fc8080_get_dm(&dmb_bb_info->msc_ber, &tp_err_cnt, &nframe, &dmb_bb_info->va_ber, &rssi);
#else
tunerbb_drv_fc8080_get_dm(&dmb_bb_info->msc_ber, &tp_err_cnt, &nframe, &dmb_bb_info->va_ber);
#endif
//dmb_bb_info->msc_ber = tunerbb_drv_fc8080_get_viterbi_ber();
sync_status = tunerbb_drv_fc8080_get_sync_status();
#ifdef FEATURE_RSSI_DEBUG
printk("[FC8080] sync_status = 0x%x, msc_ber = %d, tp_err_cnt = %d, nframe = %d, va_ber = %d rssi = %d\n", sync_status, dmb_bb_info->msc_ber, tp_err_cnt, nframe, dmb_bb_info->va_ber, rssi);
#endif
dmb_bb_info->sync_lock = ((sync_status & 0x10) ? 1 : 0);
dmb_bb_info->cir = ((sync_status & 0x08) ? 1 : 0);
dmb_bb_info->afc_ok = (((sync_status & 0x06)==0x06) ? 1 : 0);
if(dmb_bb_info->cir && dmb_bb_info->sync_lock)
{
dmb_bb_info->sch_ber = 1;
// dab_ok : channel impulse response
dmb_bb_info->dab_ok = 1;
}
else
{
dmb_bb_info->sch_ber = 0;
// dab_ok : channel impulse response
dmb_bb_info->dab_ok = 0;
}
if(serviceType[0] == FC8080_DMB || serviceType[0] == FC8080_VISUAL)
{
//tp_err_cnt = tunerbb_drv_fc8080_rserror_count(&nframe); //½Ç½Ã°£ frame¼ö üũ
if((dmb_bb_info->sync_lock == 0) || (tp_total_cnt == 0))
{
dmb_bb_info->tp_err_cnt = 0;
dmb_bb_info->tp_lock = 0;
}
else if(tp_err_cnt == 0)
{
dmb_bb_info->tp_err_cnt = 0;
dmb_bb_info->tp_lock = 1;
}
else //if(bb_info.tp_err_cnt > 0)
{
dmb_bb_info->tp_err_cnt = (uint32)((tp_err_cnt *1000)/(3*tp_total_cnt));
dmb_bb_info->tp_lock = 0;
}
// initialize information
tp_total_cnt = 0;
//dmb_bb_info->va_ber = tunerbb_drv_fc8080_get_rs_ber();
}
else
{
dmb_bb_info->tp_err_cnt = 0;
dmb_bb_info->tp_lock = 0;
dmb_bb_info->va_ber = 0;
}
dmb_bb_info->fic_ber = 0;
/* LGE_ADD_S, [hyun118.shin@lge.com], TDMB Antenna Leveling */
antBuff[antBuffIdx++ %MAX_ANT_BUFF_CNT] = dmb_bb_info->msc_ber;
for(loop = 0, avgBER = 0; loop < MAX_ANT_BUFF_CNT; loop++)
avgBER += antBuff[loop];
if(antBuffIdx < MAX_ANT_BUFF_CNT)
avgBER = dmb_bb_info->msc_ber;
else
avgBER /= MAX_ANT_BUFF_CNT;
for(loop = 0; loop < 4; loop++)
{
if(avgBER >= antTable[3][1])
{
refAntLevel = 0;
break;
}
if(avgBER < antTable[loop][1])
{
refAntLevel = antTable[loop][0];
break;
}
}
if(!(dmb_bb_info->sync_lock))
{
syncLockCnt = 0;
refAntLevel = 0;
}
else
{
if(syncLockCnt != START_SYNC_CNT) // draw after 1.5secs since sync lock
{
syncLockCnt++;
refAntLevel = 0;
}
}
if((refAntLevel == 1) && (dmb_bb_info->msc_ber >= antTable[3][1]))
refAntLevel = 0;
if(calAntLevel > refAntLevel)
calAntLevel--;
else
calAntLevel = refAntLevel;
dmb_bb_info->antenna_level = calAntLevel;
/* LGE_ADD_E, [hyun118.shin@lge.com], TDMB Antenna Leveling */
#if 0
//ä³ÎÀº Àâ¾ÒÀ¸³ª (sync_status == 0x3f) frameÀÌ µé¾î¿ÀÁö ¾Ê´Â °æ¿ì(nframe == 0) - MBN V-Radio
if((sync_status==0x3f)&&(nframe==0))
{
//antenna levelÀ» 0À¸·Î ¸¸µë.
dmb_bb_info->antenna_level = 0;
}
#endif
return FC8080_RESULT_SUCCESS;
}
/*--------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_get_msc_ber(void)
(1) Getting the msc ber
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
uint32* pmsc_ber (IN/OUT)
---------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_get_msc_ber(uint32* pmsc_ber )
{
*pmsc_ber = tunerbb_drv_fc8080_get_viterbi_ber();
return FC8080_RESULT_SUCCESS;
}
/*-------------------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_mulit_set_channel(int32 freq_num, uint8 subch_cnt, uint8 subch_id[ ], uint8 op_mode[ ])
(1) Setting the frequency , subch_id and op_mode.
This function is used in Single Service and Mulitiple Service
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
int32 freq_num (IN)
- TDMB Frequency index(e.g 7A(71), 13C(133) etc). Convert frequency if needed
uint8 subch_cnt (IN)
- The number of multiple service. This value is 1 in case of Single Service
uint8 subch_id[ ] (IN)
- Service Componet Sub-Channel ID
uint8 op_mode[ ] (IN)
- Service Operation Mode
DAB = 1;
DMB = 2;
VISUAL = 3;
DATA = 4;
TPEG = 5;
ENSQUERY = 6
<notice> The size of subch_cnt[ ] and op_mode[ ] is the maximum number being supported by FC8080
--------------------------------------------------------------------------------------- */
// Modified by somesoo 20100730 for removing green block effect
int8 tunerbb_drv_fc8080_multi_set_channel(int32 freq_num, uint8 subch_cnt, uint8 subch_id[ ], uint8 op_mode[ ])
{
int8 res = BBM_OK;
int32 freq = 0;
uint8 dmb_cnt=0;
int i;
fc8080_service_type svcType = FC8080_SERVICE_MAX;
unsigned short mask = 0;
bbm_com_word_write(NULL, BBM_BUF_ENABLE, 0x0000);
tunerbb_drv_fc8080_isr_control(0);
for(i=0;i<subch_cnt;i++)
{
serviceType[i] = op_mode[i];
if(FC8080_ENSQUERY == op_mode[i])
svcType = FC8080_ENSQUERY;
}
tunerbb_drv_fc8080_control_fic(0);
/* Change freq_num(channel num) to frequency */
freq = tunerbb_drv_convert_chnum_to_freq(freq_num);
if(freq == 0)
{
return FC8080_RESULT_ERROR;
}
res = bbm_com_tuner_set_freq(0, freq);
if(res)
{
return FC8080_RESULT_ERROR;
}
if(svcType == FC8080_ENSQUERY)
{
#ifdef FEATURE_FIC_BER
bbm_com_write(0, 0xe12, 0x1f);
#endif
if(bbm_com_scan_status(0))
{
return FC8080_RESULT_ERROR;
}
}
#ifdef FEATURE_FIC_BER
else
bbm_com_write(0, 0xe12, 0x3f);
#endif
for(i=0;i<subch_cnt;i++)
{
switch(serviceType[i])
{
case FC8080_DAB:
mask |= (1<<DAB_SVC_ID);
res = bbm_com_audio_select(0, subch_id[i],DAB_SVC_ID);
#ifdef STREAM_TS_UPLOAD
fc8080_demux_select_channel(subch_id[i], DAB_SVC_ID);
#else
g_chinfo[subch_id[i]]=DAB_SVC_ID;
#endif
break;
case FC8080_DMB:
case FC8080_VISUAL:
case FC8080_BLT_TEST:
mask |= (1 << (DMB_SVC_ID+dmb_cnt));
if(dmb_cnt<2)
{
res = bbm_com_video_select(0, subch_id[i], DMB_SVC_ID+dmb_cnt, dmb_cnt);
#ifdef STREAM_TS_UPLOAD
fc8080_demux_select_video(subch_id[i], DMB_SVC_ID+dmb_cnt);
#else
g_chinfo[subch_id[i]]=dmb_cnt;
#endif
//dmb_cnt++;
}
else
res=BBM_NOK;
break;
case FC8080_DATA:
mask |= (1<<DAT_SVC_ID);
res = bbm_com_data_select(0, subch_id[i], DAT_SVC_ID);
#ifdef STREAM_TS_UPLOAD
fc8080_demux_select_channel(subch_id[i], DAT_SVC_ID);
#else
g_chinfo[subch_id[i]]=DAT_SVC_ID;
#endif
break;
case FC8080_ENSQUERY:
tunerbb_drv_fc8080_control_fic(1);
mask |= 0x100;
res = BBM_OK;
break;
default:
res = BBM_NOK;
break;
}
}
bbm_com_word_write(NULL, BBM_BUF_ENABLE, mask);
tot_subch_cnt = subch_cnt;
// Added by somesoo 20100730 for removing green block effect
if(svcType != FC8080_ENSQUERY)
tunerbb_drv_fc8080_isr_control(1);
if(res)
return FC8080_RESULT_ERROR;
else
return FC8080_RESULT_SUCCESS;
}
/*-------------------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_get_fic(uint8* buffer, uint32* buffer_size)
(1) Getting the FIC data after calling tunerbb_drv_fc8080_multi_set_channel(freq, 1, ignore, ENSQUERY)
In case of ENSQUERY, set_channel function must return Channel LOCKING or Not.
Get_FIC is called in case of LOCKING Success
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
uint8* buffer (IN/OUT)
- buffer for FIC data
uint32* buffer_size (IN /OUT)
- FIC Data Size
<notice> This function is used in All HOST Interface
--------------------------------------------------------------------------------------- */
#ifdef FEATURE_GET_FIC_POLLING
int8 tunerbb_drv_fc8080_get_fic(uint8* buffer, uint32* buffer_size /*, uint8 crc_on_off */)
{
HANDLE hDevice = NULL;
fci_u16 mfIntStatus = 0;
if(buffer==NULL)
return FC8080_RESULT_ERROR;
bbm_com_word_read(hDevice, BBM_BUF_STATUS, &mfIntStatus);
if(mfIntStatus == 0)
return FC8080_RESULT_ERROR;
bbm_com_word_write(hDevice, BBM_BUF_STATUS, mfIntStatus);
if(mfIntStatus & 0x0100)
{
bbm_com_data(hDevice, BBM_RD_FIC, buffer, FIC_BUF_LENGTH/2);
*buffer_size = FIC_BUF_LENGTH/2;
}
if(mfIntStatus & 0x0100)
return FC8080_RESULT_SUCCESS;
else
return FC8080_RESULT_ERROR;
}
#else
#if !defined(STREAM_TS_UPLOAD)
int8 tunerbb_drv_fc8080_get_fic(uint8* buffer, uint32* buffer_size /*, uint8 crc_on_off */)
{
if(buffer==NULL)
return FC8080_RESULT_ERROR;
if(fic_buffer.valid && fic_buffer.length)
{
*buffer_size = fic_buffer.length;
memcpy(buffer, (uint8*)fic_buffer.address, fic_buffer.length);
}
else
{
*buffer_size = 0;
}
fic_buffer.valid = 0;
return FC8080_RESULT_SUCCESS;
}
#endif
#endif
#if !defined(STREAM_TS_UPLOAD)
/*-------------------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_read_data(uint8* buffer, uint32* buffer_size)
(1) Reading MSC or MSC + FIC etc Data.
This function is used in EBI2 HOST Interface
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
uint8* buffer (IN/OUT)
- buffer for Data
uint32* buffer_size (IN /OUT)
- Data Size
<notice> This function is used in only EBI2 HOST Interface
--------------------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_read_data(uint8* buffer, uint32* buffer_size)
{
int8 retval = FC8080_RESULT_ERROR;
if(buffer == NULL || buffer_size == NULL)
{
return retval;
}
/* initialize length and valid value before isr routine */
msc_buffer.valid = 0;
msc_buffer.length=0;
#ifdef NOT_MSCDATA_MULTIPLE_MEMCPY
msc_data = buffer;
#endif
fc8080_isr(NULL);
if(msc_buffer.valid && msc_buffer.length)
{
*buffer_size = msc_buffer.length;
#ifndef NOT_MSCDATA_MULTIPLE_MEMCPY
memcpy(buffer, &msc_data[0], msc_buffer.length);
#endif
retval = FC8080_RESULT_SUCCESS;
}
return retval;
}
#endif
/*-------------------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_process_multi_data(uint8 subch_cnt, uint8* input_buf, uint32 input_size, uint32* read_size)
(1) Process Multi or Single Service Data. The Driver must process multi or single data and stroe them in other buffer
for supplying data requested by tunerbb_drv_fc8080_get_multi_data( ) function
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
uint8 subch_cnt (IN)
- Service Sub-Channel Count
uint8* input_buf (IN)
- The buffer pointer containing Multi or Single Data(FIC/DMB/DAB or Mixed data) read from TSIF or EBI2 buffer
uint32 input_size (IN)
- input_buf has input_size data
uint32* read_size (IN /OUT)
- data size + subch_id header size supply to Application
<notice>
(1) read_size is the mulit or single data + header size.
(2) LGE supply the headr type
(3) For example
- DMB Single Service case : read_size = DMB MSC Data size + dmb_header size
- FIC + DMB + PACKET multi case :
read_size FIC data size + dmb_header + DMB data size + dmb_header + Packet data size + dmb_header
--------------------------------------------------------------------------------------- */
#ifdef STREAM_TS_UPLOAD
int8 tunerbb_drv_fc8080_process_multi_data(uint8 subch_cnt, uint8* input_buf, uint32 input_size, uint32* read_size)
{
uint32 fic_len=0, nvideo_len=0, video_len=0, data_len=0;
uint8 i, header_cnt=0;
if(input_buf == NULL || read_size == NULL)
{
return FC8080_RESULT_ERROR;
}
fc8080_demux(input_buf, input_size);
if(!fc8080_get_ts_datalen(TS_DAT_FIC, 0, &fic_len))
{
data_len=fic_len;
header_cnt++;
}
for(i=0;i<2;i++)
{
if(!fc8080_get_ts_datalen(TS_DAT_VIDEO_I, i, &nvideo_len))
{
data_len+=nvideo_len;
header_cnt++;
}
}
for(i=0;i<8;i++)
{
if(!fc8080_get_ts_datalen(TS_DAT_NVIDEO, i, &video_len))
{
data_len+=video_len;
header_cnt++;
}
}
data_len+=sizeof(TDMB_BB_HEADER_TYPE)*header_cnt;
*read_size=data_len;
return FC8080_RESULT_SUCCESS;
}
#else
int8 tunerbb_drv_fc8080_process_multi_data(uint8 subch_cnt, uint8* input_buf, uint32 input_size, uint32* read_size)
{
uint32 i=0;
FCI_HEADER_TYPE header;
uint8 ch_cnt=0;
(*read_size) = 0;
// Modified by FCI 20100309 for DAB error fatal(TD10172)
// Modified by somesoo 20100831 for DAB error fatal(TD10172)
for(ch_cnt = 0; ch_cnt<tot_subch_cnt; ch_cnt++)
{
memcpy(&header, &input_buf[i], sizeof(FCI_HEADER_TYPE));
memcpy(&msc_multi_data[i], &input_buf[i], sizeof(FCI_HEADER_TYPE));
i+=sizeof(FCI_HEADER_TYPE);
memcpy(&msc_multi_data[i], &input_buf[i], header.length);
i+=header.length;
(*read_size) += header.length;
(*read_size) += sizeof(TDMB_BB_HEADER_TYPE);
}
return FC8080_RESULT_SUCCESS;
}
#endif
#ifdef STREAM_TS_UPLOAD
/*-------------------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_get_multi_data(uint8 subch_cnt, uint8* buf_ptr, uint32 buf_size)
(1) Getting the processed data.
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
uint8 subch_cnt
- Sub-Channel count
uint8* buf_ptr (IN/OUT)
- buffer for Data
uint32 buffer_size (IN)
- Data Size
<notice>
(1) format : dmb_header |data|dmb_header|data
--------------------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_get_multi_data(uint8 subch_cnt, uint8* buf_ptr, uint32 buf_size)
{
uint32 nDataSize;
int i;
TDMB_BB_HEADER_TYPE dmb_header;
uint32 read_size = 0;
if(buf_ptr == NULL || buf_size == 0)
{
return FC8080_RESULT_ERROR;
}
if(!fc8080_get_ts_datalen(TS_DAT_FIC, 0, &nDataSize))
{
dmb_header.reserved = 0xFC85;
dmb_header.data_type = TDMB_BB_DATA_FIC;
dmb_header.size = nDataSize;
dmb_header.subch_id = 0;
dmb_header.ack_bit = 0;
memcpy(buf_ptr, &dmb_header, sizeof(TDMB_BB_HEADER_TYPE));
buf_ptr += sizeof(TDMB_BB_HEADER_TYPE);
read_size += sizeof(TDMB_BB_HEADER_TYPE);
fc8080_get_multi_data(TS_DAT_FIC, 0, buf_ptr, &dmb_header.subch_id);
buf_ptr += nDataSize;
read_size += nDataSize;
}
for(i=0;i<2;i++)
{
if(!fc8080_get_ts_datalen(TS_DAT_VIDEO_I, i, &nDataSize))
{
dmb_header.reserved = 0xFC85;
dmb_header.data_type = TDMB_BB_DATA_TS;
dmb_header.size = nDataSize;
dmb_header.subch_id = 0;
dmb_header.ack_bit = 0;
memcpy(buf_ptr, &dmb_header, sizeof(TDMB_BB_HEADER_TYPE));
buf_ptr += sizeof(TDMB_BB_HEADER_TYPE);
read_size += sizeof(TDMB_BB_HEADER_TYPE);
fc8080_get_multi_data(TS_DAT_VIDEO_I, i, buf_ptr, &dmb_header.subch_id);
buf_ptr += nDataSize;
read_size += nDataSize;
}
}
for(i=0;i<8;i++)
{
if(!fc8080_get_ts_datalen(TS_DAT_NVIDEO, i, &nDataSize))
{
dmb_header.reserved = 0xFC85;
if(i==DAB_SVC_ID)
dmb_header.data_type = TDMB_BB_DATA_DAB;
else if(i==DAT_SVC_ID)
dmb_header.data_type = TDMB_BB_DATA_PACK;
dmb_header.size = nDataSize;
dmb_header.subch_id = 0;
dmb_header.ack_bit = 0;
memcpy(buf_ptr, &dmb_header, sizeof(TDMB_BB_HEADER_TYPE));
buf_ptr += sizeof(TDMB_BB_HEADER_TYPE);
read_size += sizeof(TDMB_BB_HEADER_TYPE);
fc8080_get_multi_data(TS_DAT_NVIDEO, i, buf_ptr, &dmb_header.subch_id);
buf_ptr += nDataSize;
read_size += nDataSize;
}
}
if(read_size != buf_size)
{
return FC8080_RESULT_ERROR;
}
else
{
return FC8080_RESULT_SUCCESS;
}
}
#else
int8 tunerbb_drv_fc8080_get_multi_data(uint8 subch_cnt, uint8* buf_ptr, uint32 buf_size)
{
uint32 nDataSize;
int i=0;
TDMB_BB_HEADER_TYPE dmb_header;
uint32 read_size = 0;
FCI_HEADER_TYPE header;
uint8 ch_cnt=0;
if(buf_ptr == NULL || buf_size == 0)
{
return FC8080_RESULT_ERROR;
}
// Modified by FCI 20100309 for DAB error fatal(TD10172)
// Modified by somesoo 20100831 for DAB error fatal(TD10172)
for(ch_cnt = 0; ch_cnt<tot_subch_cnt; ch_cnt++)
{
memcpy(&header, &msc_multi_data[i], sizeof(FCI_HEADER_TYPE));
dmb_header.reserved = 0xFC85;
if(header.svc_id==DMB_SVC_ID)
dmb_header.data_type = TDMB_BB_DATA_TS;
else if(header.svc_id==DAB_SVC_ID)
dmb_header.data_type = TDMB_BB_DATA_DAB;
else if(header.svc_id==DAT_SVC_ID)
dmb_header.data_type = TDMB_BB_DATA_PACK;
dmb_header.size = header.length;
dmb_header.subch_id = header.subch_id;
dmb_header.ack_bit = 0;
i+=sizeof(FCI_HEADER_TYPE);
memcpy(buf_ptr, &dmb_header, sizeof(TDMB_BB_HEADER_TYPE));
buf_ptr += sizeof(TDMB_BB_HEADER_TYPE);
read_size += sizeof(TDMB_BB_HEADER_TYPE);
memcpy(buf_ptr, &msc_multi_data[i], header.length);
nDataSize=header.length;
buf_ptr += nDataSize;
read_size += nDataSize;
i+=(header.length+sizeof(TDMB_BB_HEADER_TYPE)-sizeof(FCI_HEADER_TYPE));
}
if(read_size != buf_size)
{
return FC8080_RESULT_ERROR;
}
else
{
return FC8080_RESULT_SUCCESS;
}
}
#endif
/*-------------------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_start_tii(void)
(1) Starting TII
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
VOID
--------------------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_start_tii(void)
{
return FC8080_RESULT_ERROR;
}
/*-------------------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_stop_tii(void)
(1) Stopping TII
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
VOID
--------------------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_stop_tii(void)
{
return FC8080_RESULT_ERROR;
}
/*-------------------------------------------------------------------------------------
int8 tunerbb_drv_fc8080_check_tii(uint8* main_tii_ptr, uint8* sub_tii_ptr)
(1) Stopping TII
(2) Return Value
Sucess : 1
Fail : 0 or negative interger (If there is error code)
(3) Argument
uint8* main_tii_ptr
- Main TII value
uint8* sub_tii_ptr
- SUB TII value
--------------------------------------------------------------------------------------- */
int8 tunerbb_drv_fc8080_check_tii(uint8* main_tii_ptr, uint8* sub_tii_ptr)
{
if(( NULL == main_tii_ptr) ||( NULL == sub_tii_ptr))
{
return FC8080_RESULT_ERROR;
}
*main_tii_ptr = 0xFF;
*sub_tii_ptr = 0xFF;
return FC8080_RESULT_ERROR;
}
#ifdef FEATURE_RSSI_DEBUG
void tunerbb_drv_fc8080_get_dm(fci_u32 *mscber, fci_u32 *tp_err, fci_u16 *tpcnt, fci_u32 *vaber, fci_s8 *rssi)
#else
void tunerbb_drv_fc8080_get_dm(fci_u32 *mscber, fci_u32 *tp_err, fci_u16 *tpcnt, fci_u32 *vaber)
#endif
{
#ifdef FEATURE_RSSI_DEBUG
fci_s8 rssi_value;
#endif
struct dm_st {
fci_u32 start;
fci_u16 vit_ber_rxd_rsps;
fci_u16 vit_ber_err_rsps;
fci_u32 vit_ber_err_bits;
fci_u32 dmp_ber_rxd_bits;
fci_u32 dmp_ber_err_bits;
fci_u16 ber_rxd_rsps;
fci_u16 ber_err_rsps;
fci_u32 ber_err_bits;
};
struct dm_st dm;
bbm_com_bulk_read(NULL, BBM_DM, (fci_u8*) &dm, sizeof(dm));
#ifdef FEATURE_RSSI_DEBUG
rssi_value = (fci_s8) ((dm.ber_err_bits & 0xff000000) >> 24);
#endif
dm.ber_err_bits &= 0x00ffffff;
dm.dmp_ber_err_bits &= 0x00ffffff;
if(dm.dmp_ber_rxd_bits == 0)
*mscber = MAX_MSC_BER;
else if(dm.dmp_ber_err_bits == 0)
*mscber = 0;
else
{
if(dm.dmp_ber_err_bits > 42949)
*mscber = ((dm.dmp_ber_err_bits * 1000)/dm.dmp_ber_rxd_bits)*100;
else
*mscber = (dm.dmp_ber_err_bits*100000)/dm.dmp_ber_rxd_bits;
}
*mscber = (*mscber >= MAX_MSC_BER) ? MAX_MSC_BER : *mscber;
/* ber must bigger than 0 because FactoryTest issue */
if(*mscber == 0)
{
*mscber = 1;
}
if(dm.ber_rxd_rsps == 0)
*vaber = MAX_VA_BER;
else if((dm.ber_err_bits == 0) && (dm.ber_err_rsps == 0))
{
*vaber = 0;
}
else
{
if(dm.ber_err_bits > 42949)
*vaber = ((dm.ber_err_bits * 1000)/(dm.ber_rxd_rsps * 204 * 8))*100;
else
*vaber = (dm.ber_err_bits*100000)/(dm.ber_rxd_rsps * 204 * 8);
}
*vaber = (*vaber >= MAX_VA_BER) ? MAX_VA_BER : *vaber;
*tp_err = dm.ber_err_rsps;
*tpcnt = dm.ber_rxd_rsps;
#ifdef FEATURE_RSSI_DEBUG
*rssi = rssi_value;
#endif
}
static uint32 tunerbb_drv_fc8080_get_viterbi_ber(void) //msc_ber
{
uint32 bper, tbe;
uint32 ber;
bbm_com_write(NULL, 0xe01, 0x04);
bbm_com_long_read(NULL, 0xe40, &bper);
bbm_com_long_read(NULL, 0xe44, &tbe);
if(bper == 0)
ber = MAX_MSC_BER;
else if(tbe == 0)
ber = 0;
else
{
if(tbe > 42949)
{
ber = ((tbe * 1000)/bper)*100;
}
else
{
ber = (tbe*100000)/bper;
}
}
ber = (ber >= MAX_MSC_BER) ? MAX_MSC_BER : ber;
/* ber must bigger than 0 because FactoryTest issue */
if(ber == 0)
{
ber = 1;
}
return ber;
}
static int8 tunerbb_drv_fc8080_get_sync_status(void)
{
uint8 sync_status;
bbm_com_read(0, BBM_SYNC_STAT, &sync_status);
return sync_status;
}
#if 0
static uint32 tunerbb_drv_fc8080_get_rs_ber(void) //va_ber
{
uint16 nframe, rserror;
uint32 esum;
uint32 ber;
bbm_com_write(NULL, 0xe01, 0x01);
bbm_com_word_read(NULL, 0xe30, &nframe);
bbm_com_word_read(NULL, 0xe32, &rserror);
bbm_com_long_read(NULL, 0xe34, &esum);
if(nframe == 0)
ber = MAX_VA_BER;
else if((esum == 0) && (rserror == 0))
{
ber = 0;
}
else
{
if(esum > 42949)
ber = ((esum * 1000)/(nframe * 204 * 8))*100;
else
ber = (esum*100000)/(nframe * 204 * 8);
}
ber = (ber >= MAX_VA_BER) ? MAX_VA_BER : ber;
return ber;
}
#endif
/*
static fci_u8 ficBuffer[1024];
extern int (*pFicCallback)(fci_u32 userdata, fci_u8 *data, int length);
extern fci_u32 gFicUserData;
void tunerbb_drv_fc8080_process_polling_data()
{
HANDLE hDevice = NULL;
fci_u16 mfIntStatus = 0;
fci_u16 size;
int i;
bbm_com_write(hDevice, BBM_COM_INT_ENABLE, 0x00);
bbm_com_write(hDevice, BBM_COM_STATUS_ENABLE, 0x00);
bbm_com_word_read(hDevice, BBM_BUF_INT, 0x01ff);
bbm_com_word_read(hDevice, BBM_BUF_ENABLE, 0x01ff);
for(i = 0 ; i < 200 ; i++)
{
bbm_com_word_read(hDevice, BBM_BUF_STATUS, &mfIntStatus);
if(mfIntStatus)
break;
}
if(mfIntStatus == 0)
{
bbm_com_word_read(hDevice, BBM_BUF_INT, 0x00ff);
bbm_com_word_read(hDevice, BBM_BUF_ENABLE, 0x00ff);
bbm_com_write(hDevice, BBM_COM_INT_ENABLE, ENABLE_INT_MASK);
bbm_com_write(hDevice, BBM_COM_STATUS_ENABLE, ENABLE_INT_MASK);
return;
}
bbm_com_word_write(hDevice, BBM_BUF_STATUS, mfIntStatus);
bbm_com_word_write(hDevice, BBM_BUF_STATUS, 0x0000);
if(mfIntStatus & 0x0100)
{
bbm_com_word_read(hDevice, BBM_BUF_FIC_THR, &size);
size += 1;
if(size-1)
{
bbm_com_data(hDevice, BBM_COM_FIC_DATA, &ficBuffer[0], size);
if(pFicCallback)
(*pFicCallback)(gFicUserData, &ficBuffer[0], size);
}
}
bbm_com_word_read(hDevice, BBM_BUF_INT, 0x00ff);
bbm_com_word_read(hDevice, BBM_BUF_ENABLE, 0x00ff);
bbm_com_write(hDevice, BBM_COM_INT_ENABLE, ENABLE_INT_MASK);
bbm_com_write(hDevice, BBM_COM_STATUS_ENABLE, ENABLE_INT_MASK);
}
*/
static int8 tunerbb_drv_fc8080_check_overrun(uint8 op_mode)
{
uint16 mfoverStatus;
uint8 mask;
if(op_mode == FC8080_DAB)
{
mask = 0x04;
}
else if(op_mode == FC8080_DMB || op_mode == FC8080_VISUAL)
{
mask = 0x01;
}
else
{
//printk("fc8080 invaild op_mode %d\n", op_mode);
return FC8080_RESULT_ERROR; /* invaild op_mode */
}
{
bbm_com_word_read(NULL, BBM_BUF_OVERRUN, &mfoverStatus);
if(mfoverStatus & mask)
{
bbm_com_word_write(NULL, BBM_BUF_OVERRUN, mfoverStatus);
printk("======== FC8080 OvernRun and Buffer Reset Done mask (0x%X) over (0x%X) =======\n", mask,mfoverStatus );
}
}
return FC8080_RESULT_SUCCESS;
}
void tunerbb_drv_fc8080_isr_control(fci_u8 onoff)
{
if(onoff)
bbm_com_write(0, BBM_MD_INT_EN, BBM_MF_INT);
else
bbm_com_write(0, BBM_MD_INT_EN, 0);
}
/* LGE_CHANGE_S, [hyun118.shin@lge.com], TDMB Antennal Leveling */
static void tunerbb_drv_fc8080_init_antlevel_val(void)
{
uint8 i = 0;
for(i = 0; i < MAX_ANT_BUFF_CNT; i++)
{
antBuff[i] = 0;
}
antBuffIdx = 0;
calAntLevel = 0;
}
/* LGE_CHANGE_E, [hyun118.shin@lge.com], TDMB Antennal Leveling */ | gpl-2.0 |
ChaOSChriS/android_kernel_asus_flo | arch/powerpc/mm/numa.c | 60 | 37312 | /*
* pSeries NUMA support
*
* Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/threads.h>
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/pfn.h>
#include <linux/cpuset.h>
#include <linux/node.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/firmware.h>
#include <asm/paca.h>
#include <asm/hvcall.h>
#include <asm/setup.h>
static int numa_enabled = 1;
static char *cmdline __initdata;
static int numa_debug;
#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
int numa_cpu_lookup_table[NR_CPUS];
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(node_to_cpumask_map);
EXPORT_SYMBOL(node_data);
static int min_common_depth;
static int n_mem_addr_cells, n_mem_size_cells;
static int form1_affinity;
#define MAX_DISTANCE_REF_POINTS 4
static int distance_ref_points_depth;
static const unsigned int *distance_ref_points;
static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
/*
* Allocate node_to_cpumask_map based on number of available nodes
* Requires node_possible_map to be valid.
*
* Note: cpumask_of_node() is not valid until after this is done.
*/
static void __init setup_node_to_cpumask_map(void)
{
unsigned int node, num = 0;
/* setup nr_node_ids if not done yet */
if (nr_node_ids == MAX_NUMNODES) {
for_each_node_mask(node, node_possible_map)
num = node;
nr_node_ids = num + 1;
}
/* allocate the map */
for (node = 0; node < nr_node_ids; node++)
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
/* cpumask_of_node() will now work */
dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
}
static int fake_numa_create_new_node(unsigned long end_pfn,
unsigned int *nid)
{
unsigned long long mem;
char *p = cmdline;
static unsigned int fake_nid;
static unsigned long long curr_boundary;
/*
* Modify node id, iff we started creating NUMA nodes
* We want to continue from where we left of the last time
*/
if (fake_nid)
*nid = fake_nid;
/*
* In case there are no more arguments to parse, the
* node_id should be the same as the last fake node id
* (we've handled this above).
*/
if (!p)
return 0;
mem = memparse(p, &p);
if (!mem)
return 0;
if (mem < curr_boundary)
return 0;
curr_boundary = mem;
if ((end_pfn << PAGE_SHIFT) > mem) {
/*
* Skip commas and spaces
*/
while (*p == ',' || *p == ' ' || *p == '\t')
p++;
cmdline = p;
fake_nid++;
*nid = fake_nid;
dbg("created new fake_node with id %d\n", fake_nid);
return 1;
}
return 0;
}
/*
* get_node_active_region - Return active region containing pfn
* Active range returned is empty if none found.
* @pfn: The page to return the region for
* @node_ar: Returned set to the active region containing @pfn
*/
static void __init get_node_active_region(unsigned long pfn,
struct node_active_region *node_ar)
{
unsigned long start_pfn, end_pfn;
int i, nid;
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
if (pfn >= start_pfn && pfn < end_pfn) {
node_ar->nid = nid;
node_ar->start_pfn = start_pfn;
node_ar->end_pfn = end_pfn;
break;
}
}
}
static void map_cpu_to_node(int cpu, int node)
{
numa_cpu_lookup_table[cpu] = node;
dbg("adding cpu %d to node %d\n", cpu, node);
if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
}
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
static void unmap_cpu_from_node(unsigned long cpu)
{
int node = numa_cpu_lookup_table[cpu];
dbg("removing cpu %lu from node %d\n", cpu, node);
if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
} else {
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
cpu, node);
}
}
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
/* must hold reference to node during call */
static const int *of_get_associativity(struct device_node *dev)
{
return of_get_property(dev, "ibm,associativity", NULL);
}
/*
* Returns the property linux,drconf-usable-memory if
* it exists (the property exists only in kexec/kdump kernels,
* added by kexec-tools)
*/
static const u32 *of_get_usable_memory(struct device_node *memory)
{
const u32 *prop;
u32 len;
prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
if (!prop || len < sizeof(unsigned int))
return 0;
return prop;
}
int __node_distance(int a, int b)
{
int i;
int distance = LOCAL_DISTANCE;
if (!form1_affinity)
return distance;
for (i = 0; i < distance_ref_points_depth; i++) {
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
break;
/* Double the distance for each NUMA level */
distance *= 2;
}
return distance;
}
static void initialize_distance_lookup_table(int nid,
const unsigned int *associativity)
{
int i;
if (!form1_affinity)
return;
for (i = 0; i < distance_ref_points_depth; i++) {
distance_lookup_table[nid][i] =
associativity[distance_ref_points[i]];
}
}
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
* info is found.
*/
static int associativity_to_nid(const unsigned int *associativity)
{
int nid = -1;
if (min_common_depth == -1)
goto out;
if (associativity[0] >= min_common_depth)
nid = associativity[min_common_depth];
/* POWER4 LPAR uses 0xffff as invalid node */
if (nid == 0xffff || nid >= MAX_NUMNODES)
nid = -1;
if (nid > 0 && associativity[0] >= distance_ref_points_depth)
initialize_distance_lookup_table(nid, associativity);
out:
return nid;
}
/* Returns the nid associated with the given device tree node,
* or -1 if not found.
*/
static int of_node_to_nid_single(struct device_node *device)
{
int nid = -1;
const unsigned int *tmp;
tmp = of_get_associativity(device);
if (tmp)
nid = associativity_to_nid(tmp);
return nid;
}
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
struct device_node *tmp;
int nid = -1;
of_node_get(device);
while (device) {
nid = of_node_to_nid_single(device);
if (nid != -1)
break;
tmp = device;
device = of_get_parent(tmp);
of_node_put(tmp);
}
of_node_put(device);
return nid;
}
EXPORT_SYMBOL_GPL(of_node_to_nid);
static int __init find_min_common_depth(void)
{
int depth;
struct device_node *chosen;
struct device_node *root;
const char *vec5;
if (firmware_has_feature(FW_FEATURE_OPAL))
root = of_find_node_by_path("/ibm,opal");
else
root = of_find_node_by_path("/rtas");
if (!root)
root = of_find_node_by_path("/");
/*
* This property is a set of 32-bit integers, each representing
* an index into the ibm,associativity nodes.
*
* With form 0 affinity the first integer is for an SMP configuration
* (should be all 0's) and the second is for a normal NUMA
* configuration. We have only one level of NUMA.
*
* With form 1 affinity the first integer is the most significant
* NUMA boundary and the following are progressively less significant
* boundaries. There can be more than one level of NUMA.
*/
distance_ref_points = of_get_property(root,
"ibm,associativity-reference-points",
&distance_ref_points_depth);
if (!distance_ref_points) {
dbg("NUMA: ibm,associativity-reference-points not found.\n");
goto err;
}
distance_ref_points_depth /= sizeof(int);
#define VEC5_AFFINITY_BYTE 5
#define VEC5_AFFINITY 0x80
if (firmware_has_feature(FW_FEATURE_OPAL))
form1_affinity = 1;
else {
chosen = of_find_node_by_path("/chosen");
if (chosen) {
vec5 = of_get_property(chosen,
"ibm,architecture-vec-5", NULL);
if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
VEC5_AFFINITY)) {
dbg("Using form 1 affinity\n");
form1_affinity = 1;
}
}
}
if (form1_affinity) {
depth = distance_ref_points[0];
} else {
if (distance_ref_points_depth < 2) {
printk(KERN_WARNING "NUMA: "
"short ibm,associativity-reference-points\n");
goto err;
}
depth = distance_ref_points[1];
}
/*
* Warn and cap if the hardware supports more than
* MAX_DISTANCE_REF_POINTS domains.
*/
if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
printk(KERN_WARNING "NUMA: distance array capped at "
"%d entries\n", MAX_DISTANCE_REF_POINTS);
distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
}
of_node_put(root);
return depth;
err:
of_node_put(root);
return -1;
}
static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
{
struct device_node *memory = NULL;
memory = of_find_node_by_type(memory, "memory");
if (!memory)
panic("numa.c: No memory nodes found!");
*n_addr_cells = of_n_addr_cells(memory);
*n_size_cells = of_n_size_cells(memory);
of_node_put(memory);
}
static unsigned long read_n_cells(int n, const unsigned int **buf)
{
unsigned long result = 0;
while (n--) {
result = (result << 32) | **buf;
(*buf)++;
}
return result;
}
struct of_drconf_cell {
u64 base_addr;
u32 drc_index;
u32 reserved;
u32 aa_index;
u32 flags;
};
#define DRCONF_MEM_ASSIGNED 0x00000008
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
/*
* Read the next memblock list entry from the ibm,dynamic-memory property
* and return the information in the provided of_drconf_cell structure.
*/
static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
{
const u32 *cp;
drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
cp = *cellp;
drmem->drc_index = cp[0];
drmem->reserved = cp[1];
drmem->aa_index = cp[2];
drmem->flags = cp[3];
*cellp = cp + 4;
}
/*
* Retrieve and validate the ibm,dynamic-memory property of the device tree.
*
* The layout of the ibm,dynamic-memory property is a number N of memblock
* list entries followed by N memblock list entries. Each memblock list entry
* contains information as laid out in the of_drconf_cell struct above.
*/
static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
{
const u32 *prop;
u32 len, entries;
prop = of_get_property(memory, "ibm,dynamic-memory", &len);
if (!prop || len < sizeof(unsigned int))
return 0;
entries = *prop++;
/* Now that we know the number of entries, revalidate the size
* of the property read in to ensure we have everything
*/
if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
return 0;
*dm = prop;
return entries;
}
/*
* Retrieve and validate the ibm,lmb-size property for drconf memory
* from the device tree.
*/
static u64 of_get_lmb_size(struct device_node *memory)
{
const u32 *prop;
u32 len;
prop = of_get_property(memory, "ibm,lmb-size", &len);
if (!prop || len < sizeof(unsigned int))
return 0;
return read_n_cells(n_mem_size_cells, &prop);
}
struct assoc_arrays {
u32 n_arrays;
u32 array_sz;
const u32 *arrays;
};
/*
* Retrieve and validate the list of associativity arrays for drconf
* memory from the ibm,associativity-lookup-arrays property of the
* device tree..
*
* The layout of the ibm,associativity-lookup-arrays property is a number N
* indicating the number of associativity arrays, followed by a number M
* indicating the size of each associativity array, followed by a list
* of N associativity arrays.
*/
static int of_get_assoc_arrays(struct device_node *memory,
struct assoc_arrays *aa)
{
const u32 *prop;
u32 len;
prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
if (!prop || len < 2 * sizeof(unsigned int))
return -1;
aa->n_arrays = *prop++;
aa->array_sz = *prop++;
/* Now that we know the number of arrays and size of each array,
* revalidate the size of the property read in.
*/
if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
return -1;
aa->arrays = prop;
return 0;
}
/*
* This is like of_node_to_nid_single() for memory represented in the
* ibm,dynamic-reconfiguration-memory node.
*/
static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
struct assoc_arrays *aa)
{
int default_nid = 0;
int nid = default_nid;
int index;
if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
!(drmem->flags & DRCONF_MEM_AI_INVALID) &&
drmem->aa_index < aa->n_arrays) {
index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
nid = aa->arrays[index];
if (nid == 0xffff || nid >= MAX_NUMNODES)
nid = default_nid;
}
return nid;
}
/*
* Figure out to which domain a cpu belongs and stick it there.
* Return the id of the domain used.
*/
static int numa_setup_cpu(unsigned long lcpu)
{
int nid = 0;
struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
if (!cpu) {
WARN_ON(1);
goto out;
}
nid = of_node_to_nid_single(cpu);
if (nid < 0 || !node_online(nid))
nid = first_online_node;
out:
map_cpu_to_node(lcpu, nid);
of_node_put(cpu);
return nid;
}
static int cpu_numa_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
unsigned long lcpu = (unsigned long)hcpu;
int ret = NOTIFY_DONE;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
numa_setup_cpu(lcpu);
ret = NOTIFY_OK;
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
unmap_cpu_from_node(lcpu);
break;
ret = NOTIFY_OK;
#endif
}
return ret;
}
/*
* Check and possibly modify a memory region to enforce the memory limit.
*
* Returns the size the region should have to enforce the memory limit.
* This will either be the original value of size, a truncated value,
* or zero. If the returned value of size is 0 the region should be
* discarded as it lies wholly above the memory limit.
*/
static unsigned long __init numa_enforce_memory_limit(unsigned long start,
unsigned long size)
{
/*
* We use memblock_end_of_DRAM() in here instead of memory_limit because
* we've already adjusted it for the limit and it takes care of
* having memory holes below the limit. Also, in the case of
* iommu_is_off, memory_limit is not set but is implicitly enforced.
*/
if (start + size <= memblock_end_of_DRAM())
return size;
if (start >= memblock_end_of_DRAM())
return 0;
return memblock_end_of_DRAM() - start;
}
/*
* Reads the counter for a given entry in
* linux,drconf-usable-memory property
*/
static inline int __init read_usm_ranges(const u32 **usm)
{
/*
* For each lmb in ibm,dynamic-memory a corresponding
* entry in linux,drconf-usable-memory property contains
* a counter followed by that many (base, size) duple.
* read the counter from linux,drconf-usable-memory
*/
return read_n_cells(n_mem_size_cells, usm);
}
/*
* Extract NUMA information from the ibm,dynamic-reconfiguration-memory
* node. This assumes n_mem_{addr,size}_cells have been set.
*/
static void __init parse_drconf_memory(struct device_node *memory)
{
const u32 *dm, *usm;
unsigned int n, rc, ranges, is_kexec_kdump = 0;
unsigned long lmb_size, base, size, sz;
int nid;
struct assoc_arrays aa;
n = of_get_drconf_memory(memory, &dm);
if (!n)
return;
lmb_size = of_get_lmb_size(memory);
if (!lmb_size)
return;
rc = of_get_assoc_arrays(memory, &aa);
if (rc)
return;
/* check if this is a kexec/kdump kernel */
usm = of_get_usable_memory(memory);
if (usm != NULL)
is_kexec_kdump = 1;
for (; n != 0; --n) {
struct of_drconf_cell drmem;
read_drconf_cell(&drmem, &dm);
/* skip this block if the reserved bit is set in flags (0x80)
or if the block is not assigned to this partition (0x8) */
if ((drmem.flags & DRCONF_MEM_RESERVED)
|| !(drmem.flags & DRCONF_MEM_ASSIGNED))
continue;
base = drmem.base_addr;
size = lmb_size;
ranges = 1;
if (is_kexec_kdump) {
ranges = read_usm_ranges(&usm);
if (!ranges) /* there are no (base, size) duple */
continue;
}
do {
if (is_kexec_kdump) {
base = read_n_cells(n_mem_addr_cells, &usm);
size = read_n_cells(n_mem_size_cells, &usm);
}
nid = of_drconf_to_nid_single(&drmem, &aa);
fake_numa_create_new_node(
((base + size) >> PAGE_SHIFT),
&nid);
node_set_online(nid);
sz = numa_enforce_memory_limit(base, size);
if (sz)
memblock_set_node(base, sz, nid);
} while (--ranges);
}
}
static int __init parse_numa_properties(void)
{
struct device_node *memory;
int default_nid = 0;
unsigned long i;
if (numa_enabled == 0) {
printk(KERN_WARNING "NUMA disabled by user\n");
return -1;
}
min_common_depth = find_min_common_depth();
if (min_common_depth < 0)
return min_common_depth;
dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
/*
* Even though we connect cpus to numa domains later in SMP
* init, we need to know the node ids now. This is because
* each node to be onlined must have NODE_DATA etc backing it.
*/
for_each_present_cpu(i) {
struct device_node *cpu;
int nid;
cpu = of_get_cpu_node(i, NULL);
BUG_ON(!cpu);
nid = of_node_to_nid_single(cpu);
of_node_put(cpu);
/*
* Don't fall back to default_nid yet -- we will plug
* cpus into nodes once the memory scan has discovered
* the topology.
*/
if (nid < 0)
continue;
node_set_online(nid);
}
get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
for_each_node_by_type(memory, "memory") {
unsigned long start;
unsigned long size;
int nid;
int ranges;
const unsigned int *memcell_buf;
unsigned int len;
memcell_buf = of_get_property(memory,
"linux,usable-memory", &len);
if (!memcell_buf || len <= 0)
memcell_buf = of_get_property(memory, "reg", &len);
if (!memcell_buf || len <= 0)
continue;
/* ranges in cell */
ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
new_range:
/* these are order-sensitive, and modify the buffer pointer */
start = read_n_cells(n_mem_addr_cells, &memcell_buf);
size = read_n_cells(n_mem_size_cells, &memcell_buf);
/*
* Assumption: either all memory nodes or none will
* have associativity properties. If none, then
* everything goes to default_nid.
*/
nid = of_node_to_nid_single(memory);
if (nid < 0)
nid = default_nid;
fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
node_set_online(nid);
if (!(size = numa_enforce_memory_limit(start, size))) {
if (--ranges)
goto new_range;
else
continue;
}
memblock_set_node(start, size, nid);
if (--ranges)
goto new_range;
}
/*
* Now do the same thing for each MEMBLOCK listed in the
* ibm,dynamic-memory property in the
* ibm,dynamic-reconfiguration-memory node.
*/
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory)
parse_drconf_memory(memory);
return 0;
}
static void __init setup_nonnuma(void)
{
unsigned long top_of_ram = memblock_end_of_DRAM();
unsigned long total_ram = memblock_phys_mem_size();
unsigned long start_pfn, end_pfn;
unsigned int nid = 0;
struct memblock_region *reg;
printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
for_each_memblock(memory, reg) {
start_pfn = memblock_region_memory_base_pfn(reg);
end_pfn = memblock_region_memory_end_pfn(reg);
fake_numa_create_new_node(end_pfn, &nid);
memblock_set_node(PFN_PHYS(start_pfn),
PFN_PHYS(end_pfn - start_pfn), nid);
node_set_online(nid);
}
}
void __init dump_numa_cpu_topology(void)
{
unsigned int node;
unsigned int cpu, count;
if (min_common_depth == -1 || !numa_enabled)
return;
for_each_online_node(node) {
printk(KERN_DEBUG "Node %d CPUs:", node);
count = 0;
/*
* If we used a CPU iterator here we would miss printing
* the holes in the cpumap.
*/
for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
if (cpumask_test_cpu(cpu,
node_to_cpumask_map[node])) {
if (count == 0)
printk(" %u", cpu);
++count;
} else {
if (count > 1)
printk("-%u", cpu - 1);
count = 0;
}
}
if (count > 1)
printk("-%u", nr_cpu_ids - 1);
printk("\n");
}
}
static void __init dump_numa_memory_topology(void)
{
unsigned int node;
unsigned int count;
if (min_common_depth == -1 || !numa_enabled)
return;
for_each_online_node(node) {
unsigned long i;
printk(KERN_DEBUG "Node %d Memory:", node);
count = 0;
for (i = 0; i < memblock_end_of_DRAM();
i += (1 << SECTION_SIZE_BITS)) {
if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
if (count == 0)
printk(" 0x%lx", i);
++count;
} else {
if (count > 0)
printk("-0x%lx", i);
count = 0;
}
}
if (count > 0)
printk("-0x%lx", i);
printk("\n");
}
}
/*
* Allocate some memory, satisfying the memblock or bootmem allocator where
* required. nid is the preferred node and end is the physical address of
* the highest address in the node.
*
* Returns the virtual address of the memory.
*/
static void __init *careful_zallocation(int nid, unsigned long size,
unsigned long align,
unsigned long end_pfn)
{
void *ret;
int new_nid;
unsigned long ret_paddr;
ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
/* retry over all memory */
if (!ret_paddr)
ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
if (!ret_paddr)
panic("numa.c: cannot allocate %lu bytes for node %d",
size, nid);
ret = __va(ret_paddr);
/*
* We initialize the nodes in numeric order: 0, 1, 2...
* and hand over control from the MEMBLOCK allocator to the
* bootmem allocator. If this function is called for
* node 5, then we know that all nodes <5 are using the
* bootmem allocator instead of the MEMBLOCK allocator.
*
* So, check the nid from which this allocation came
* and double check to see if we need to use bootmem
* instead of the MEMBLOCK. We don't free the MEMBLOCK memory
* since it would be useless.
*/
new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
if (new_nid < nid) {
ret = __alloc_bootmem_node(NODE_DATA(new_nid),
size, align, 0);
dbg("alloc_bootmem %p %lx\n", ret, size);
}
memset(ret, 0, size);
return ret;
}
static struct notifier_block ppc64_numa_nb = {
.notifier_call = cpu_numa_callback,
.priority = 1 /* Must run before sched domains notifier. */
};
static void __init mark_reserved_regions_for_nid(int nid)
{
struct pglist_data *node = NODE_DATA(nid);
struct memblock_region *reg;
for_each_memblock(reserved, reg) {
unsigned long physbase = reg->base;
unsigned long size = reg->size;
unsigned long start_pfn = physbase >> PAGE_SHIFT;
unsigned long end_pfn = PFN_UP(physbase + size);
struct node_active_region node_ar;
unsigned long node_end_pfn = node->node_start_pfn +
node->node_spanned_pages;
/*
* Check to make sure that this memblock.reserved area is
* within the bounds of the node that we care about.
* Checking the nid of the start and end points is not
* sufficient because the reserved area could span the
* entire node.
*/
if (end_pfn <= node->node_start_pfn ||
start_pfn >= node_end_pfn)
continue;
get_node_active_region(start_pfn, &node_ar);
while (start_pfn < end_pfn &&
node_ar.start_pfn < node_ar.end_pfn) {
unsigned long reserve_size = size;
/*
* if reserved region extends past active region
* then trim size to active region
*/
if (end_pfn > node_ar.end_pfn)
reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
- physbase;
/*
* Only worry about *this* node, others may not
* yet have valid NODE_DATA().
*/
if (node_ar.nid == nid) {
dbg("reserve_bootmem %lx %lx nid=%d\n",
physbase, reserve_size, node_ar.nid);
reserve_bootmem_node(NODE_DATA(node_ar.nid),
physbase, reserve_size,
BOOTMEM_DEFAULT);
}
/*
* if reserved region is contained in the active region
* then done.
*/
if (end_pfn <= node_ar.end_pfn)
break;
/*
* reserved region extends past the active region
* get next active region that contains this
* reserved region
*/
start_pfn = node_ar.end_pfn;
physbase = start_pfn << PAGE_SHIFT;
size = size - reserve_size;
get_node_active_region(start_pfn, &node_ar);
}
}
}
void __init do_init_bootmem(void)
{
int nid;
min_low_pfn = 0;
max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
max_pfn = max_low_pfn;
if (parse_numa_properties())
setup_nonnuma();
else
dump_numa_memory_topology();
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
void *bootmem_vaddr;
unsigned long bootmap_pages;
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
/*
* Allocate the node structure node local if possible
*
* Be careful moving this around, as it relies on all
* previous nodes' bootmem to be initialized and have
* all reserved areas marked.
*/
NODE_DATA(nid) = careful_zallocation(nid,
sizeof(struct pglist_data),
SMP_CACHE_BYTES, end_pfn);
dbg("node %d\n", nid);
dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
if (NODE_DATA(nid)->node_spanned_pages == 0)
continue;
dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bootmem_vaddr = careful_zallocation(nid,
bootmap_pages << PAGE_SHIFT,
PAGE_SIZE, end_pfn);
dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
init_bootmem_node(NODE_DATA(nid),
__pa(bootmem_vaddr) >> PAGE_SHIFT,
start_pfn, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
/*
* Be very careful about moving this around. Future
* calls to careful_zallocation() depend on this getting
* done correctly.
*/
mark_reserved_regions_for_nid(nid);
sparse_memory_present_with_active_regions(nid);
}
init_bootmem_done = 1;
/*
* Now bootmem is initialised we can create the node to cpumask
* lookup tables and setup the cpu callback to populate them.
*/
setup_node_to_cpumask_map();
register_cpu_notifier(&ppc64_numa_nb);
cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
(void *)(unsigned long)boot_cpuid);
}
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
free_area_init_nodes(max_zone_pfns);
}
static int __init early_numa(char *p)
{
if (!p)
return 0;
if (strstr(p, "off"))
numa_enabled = 0;
if (strstr(p, "debug"))
numa_debug = 1;
p = strstr(p, "fake=");
if (p)
cmdline = p + strlen("fake=");
return 0;
}
early_param("numa", early_numa);
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Find the node associated with a hot added memory section for
* memory represented in the device tree by the property
* ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
*/
static int hot_add_drconf_scn_to_nid(struct device_node *memory,
unsigned long scn_addr)
{
const u32 *dm;
unsigned int drconf_cell_cnt, rc;
unsigned long lmb_size;
struct assoc_arrays aa;
int nid = -1;
drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
if (!drconf_cell_cnt)
return -1;
lmb_size = of_get_lmb_size(memory);
if (!lmb_size)
return -1;
rc = of_get_assoc_arrays(memory, &aa);
if (rc)
return -1;
for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
struct of_drconf_cell drmem;
read_drconf_cell(&drmem, &dm);
/* skip this block if it is reserved or not assigned to
* this partition */
if ((drmem.flags & DRCONF_MEM_RESERVED)
|| !(drmem.flags & DRCONF_MEM_ASSIGNED))
continue;
if ((scn_addr < drmem.base_addr)
|| (scn_addr >= (drmem.base_addr + lmb_size)))
continue;
nid = of_drconf_to_nid_single(&drmem, &aa);
break;
}
return nid;
}
/*
* Find the node associated with a hot added memory section for memory
* represented in the device tree as a node (i.e. memory@XXXX) for
* each memblock.
*/
int hot_add_node_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory;
int nid = -1;
for_each_node_by_type(memory, "memory") {
unsigned long start, size;
int ranges;
const unsigned int *memcell_buf;
unsigned int len;
memcell_buf = of_get_property(memory, "reg", &len);
if (!memcell_buf || len <= 0)
continue;
/* ranges in cell */
ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
while (ranges--) {
start = read_n_cells(n_mem_addr_cells, &memcell_buf);
size = read_n_cells(n_mem_size_cells, &memcell_buf);
if ((scn_addr < start) || (scn_addr >= (start + size)))
continue;
nid = of_node_to_nid_single(memory);
break;
}
if (nid >= 0)
break;
}
of_node_put(memory);
return nid;
}
/*
* Find the node associated with a hot added memory section. Section
* corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
* sections are fully contained within a single MEMBLOCK.
*/
int hot_add_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory = NULL;
int nid, found = 0;
if (!numa_enabled || (min_common_depth < 0))
return first_online_node;
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory) {
nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
of_node_put(memory);
} else {
nid = hot_add_node_scn_to_nid(scn_addr);
}
if (nid < 0 || !node_online(nid))
nid = first_online_node;
if (NODE_DATA(nid)->node_spanned_pages)
return nid;
for_each_online_node(nid) {
if (NODE_DATA(nid)->node_spanned_pages) {
found = 1;
break;
}
}
BUG_ON(!found);
return nid;
}
static u64 hot_add_drconf_memory_max(void)
{
struct device_node *memory = NULL;
unsigned int drconf_cell_cnt = 0;
u64 lmb_size = 0;
const u32 *dm = 0;
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory) {
drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
lmb_size = of_get_lmb_size(memory);
of_node_put(memory);
}
return lmb_size * drconf_cell_cnt;
}
/*
* memory_hotplug_max - return max address of memory that may be added
*
* This is currently only used on systems that support drconfig memory
* hotplug.
*/
u64 memory_hotplug_max(void)
{
return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
}
#endif /* CONFIG_MEMORY_HOTPLUG */
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
static cpumask_t cpu_associativity_changes_mask;
static int vphn_enabled;
static void set_topology_timer(void);
/*
* Store the current values of the associativity change counters in the
* hypervisor.
*/
static void setup_cpu_associativity_change_counters(void)
{
int cpu;
/* The VPHN feature supports a maximum of 8 reference points */
BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
for_each_possible_cpu(cpu) {
int i;
u8 *counts = vphn_cpu_change_counts[cpu];
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
for (i = 0; i < distance_ref_points_depth; i++)
counts[i] = hypervisor_counts[i];
}
}
/*
* The hypervisor maintains a set of 8 associativity change counters in
* the VPA of each cpu that correspond to the associativity levels in the
* ibm,associativity-reference-points property. When an associativity
* level changes, the corresponding counter is incremented.
*
* Set a bit in cpu_associativity_changes_mask for each cpu whose home
* node associativity levels have changed.
*
* Returns the number of cpus with unhandled associativity changes.
*/
static int update_cpu_associativity_changes_mask(void)
{
int cpu, nr_cpus = 0;
cpumask_t *changes = &cpu_associativity_changes_mask;
cpumask_clear(changes);
for_each_possible_cpu(cpu) {
int i, changed = 0;
u8 *counts = vphn_cpu_change_counts[cpu];
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
for (i = 0; i < distance_ref_points_depth; i++) {
if (hypervisor_counts[i] != counts[i]) {
counts[i] = hypervisor_counts[i];
changed = 1;
}
}
if (changed) {
cpumask_set_cpu(cpu, changes);
nr_cpus++;
}
}
return nr_cpus;
}
/*
* 6 64-bit registers unpacked into 12 32-bit associativity values. To form
* the complete property we have to add the length in the first cell.
*/
#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
/*
* Convert the associativity domain numbers returned from the hypervisor
* to the sequence they would appear in the ibm,associativity property.
*/
static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
{
int i, nr_assoc_doms = 0;
const u16 *field = (const u16*) packed;
#define VPHN_FIELD_UNUSED (0xffff)
#define VPHN_FIELD_MSB (0x8000)
#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
if (*field == VPHN_FIELD_UNUSED) {
/* All significant fields processed, and remaining
* fields contain the reserved value of all 1's.
* Just store them.
*/
unpacked[i] = *((u32*)field);
field += 2;
} else if (*field & VPHN_FIELD_MSB) {
/* Data is in the lower 15 bits of this field */
unpacked[i] = *field & VPHN_FIELD_MASK;
field++;
nr_assoc_doms++;
} else {
/* Data is in the lower 15 bits of this field
* concatenated with the next 16 bit field
*/
unpacked[i] = *((u32*)field);
field += 2;
nr_assoc_doms++;
}
}
/* The first cell contains the length of the property */
unpacked[0] = nr_assoc_doms;
return nr_assoc_doms;
}
/*
* Retrieve the new associativity information for a virtual processor's
* home node.
*/
static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
{
long rc;
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
u64 flags = 1;
int hwcpu = get_hard_smp_processor_id(cpu);
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
vphn_unpack_associativity(retbuf, associativity);
return rc;
}
static long vphn_get_associativity(unsigned long cpu,
unsigned int *associativity)
{
long rc;
rc = hcall_vphn(cpu, associativity);
switch (rc) {
case H_FUNCTION:
printk(KERN_INFO
"VPHN is not supported. Disabling polling...\n");
stop_topology_update();
break;
case H_HARDWARE:
printk(KERN_ERR
"hcall_vphn() experienced a hardware fault "
"preventing VPHN. Disabling polling...\n");
stop_topology_update();
}
return rc;
}
/*
* Update the node maps and sysfs entries for each cpu whose home node
* has changed.
*/
int arch_update_cpu_topology(void)
{
int cpu, nid, old_nid;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct device *dev;
for_each_cpu(cpu,&cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity);
nid = associativity_to_nid(associativity);
if (nid < 0 || !node_online(nid))
nid = first_online_node;
old_nid = numa_cpu_lookup_table[cpu];
/* Disable hotplug while we update the cpu
* masks and sysfs.
*/
get_online_cpus();
unregister_cpu_under_node(cpu, old_nid);
unmap_cpu_from_node(cpu);
map_cpu_to_node(cpu, nid);
register_cpu_under_node(cpu, nid);
put_online_cpus();
dev = get_cpu_device(cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
return 1;
}
static void topology_work_fn(struct work_struct *work)
{
rebuild_sched_domains();
}
static DECLARE_WORK(topology_work, topology_work_fn);
void topology_schedule_update(void)
{
schedule_work(&topology_work);
}
static void topology_timer_fn(unsigned long ignored)
{
if (!vphn_enabled)
return;
if (update_cpu_associativity_changes_mask() > 0)
topology_schedule_update();
set_topology_timer();
}
static struct timer_list topology_timer =
TIMER_INITIALIZER(topology_timer_fn, 0, 0);
static void set_topology_timer(void)
{
topology_timer.data = 0;
topology_timer.expires = jiffies + 60 * HZ;
add_timer(&topology_timer);
}
/*
* Start polling for VPHN associativity changes.
*/
int start_topology_update(void)
{
int rc = 0;
/* Disabled until races with load balancing are fixed */
if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
get_lppaca()->shared_proc) {
vphn_enabled = 1;
setup_cpu_associativity_change_counters();
init_timer_deferrable(&topology_timer);
set_topology_timer();
rc = 1;
}
return rc;
}
__initcall(start_topology_update);
/*
* Disable polling for VPHN associativity changes.
*/
int stop_topology_update(void)
{
vphn_enabled = 0;
return del_timer_sync(&topology_timer);
}
#endif /* CONFIG_PPC_SPLPAR */
| gpl-2.0 |
Orange-OpenSource/linux | kernel/irq/generic-chip.c | 572 | 15129 | /*
* Library implementing the most common irq chip callback functions
*
* Copyright (C) 2011, Thomas Gleixner
*/
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/syscore_ops.h>
#include "internals.h"
static LIST_HEAD(gc_list);
static DEFINE_RAW_SPINLOCK(gc_lock);
/**
* irq_gc_noop - NOOP function
* @d: irq_data
*/
void irq_gc_noop(struct irq_data *d)
{
}
/**
* irq_gc_mask_disable_reg - Mask chip via disable register
* @d: irq_data
*
* Chip has separate enable/disable registers instead of a single mask
* register.
*/
void irq_gc_mask_disable_reg(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + ct->regs.disable);
*ct->mask_cache &= ~mask;
irq_gc_unlock(gc);
}
/**
* irq_gc_mask_set_bit - Mask chip via setting bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
* and protected by gc->lock
*/
void irq_gc_mask_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
*ct->mask_cache |= mask;
irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
/**
* irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
* @d: irq_data
*
* Chip has a single mask register. Values of this register are cached
* and protected by gc->lock
*/
void irq_gc_mask_clr_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
*ct->mask_cache &= ~mask;
irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
/**
* irq_gc_unmask_enable_reg - Unmask chip via enable register
* @d: irq_data
*
* Chip has separate enable/disable registers instead of a single mask
* register.
*/
void irq_gc_unmask_enable_reg(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + ct->regs.enable);
*ct->mask_cache |= mask;
irq_gc_unlock(gc);
}
/**
* irq_gc_ack_set_bit - Ack pending interrupt via setting bit
* @d: irq_data
*/
void irq_gc_ack_set_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
irq_gc_unlock(gc);
}
EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
/**
* irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
* @d: irq_data
*/
void irq_gc_ack_clr_bit(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = ~d->mask;
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
irq_gc_unlock(gc);
}
/**
* irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
* @d: irq_data
*/
void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + ct->regs.mask);
irq_reg_writel(mask, gc->reg_base + ct->regs.ack);
irq_gc_unlock(gc);
}
/**
* irq_gc_eoi - EOI interrupt
* @d: irq_data
*/
void irq_gc_eoi(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
irq_reg_writel(mask, gc->reg_base + ct->regs.eoi);
irq_gc_unlock(gc);
}
/**
* irq_gc_set_wake - Set/clr wake bit for an interrupt
* @d: irq_data
* @on: Indicates whether the wake bit should be set or cleared
*
* For chips where the wake from suspend functionality is not
* configured in a separate register and the wakeup active state is
* just stored in a bitmask.
*/
int irq_gc_set_wake(struct irq_data *d, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
u32 mask = d->mask;
if (!(mask & gc->wake_enabled))
return -EINVAL;
irq_gc_lock(gc);
if (on)
gc->wake_active |= mask;
else
gc->wake_active &= ~mask;
irq_gc_unlock(gc);
return 0;
}
static void
irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
int num_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler)
{
raw_spin_lock_init(&gc->lock);
gc->num_ct = num_ct;
gc->irq_base = irq_base;
gc->reg_base = reg_base;
gc->chip_types->chip.name = name;
gc->chip_types->handler = handler;
}
/**
* irq_alloc_generic_chip - Allocate a generic chip and initialize it
* @name: Name of the irq chip
* @num_ct: Number of irq_chip_type instances associated with this
* @irq_base: Interrupt base nr for this chip
* @reg_base: Register base address (virtual)
* @handler: Default flow handler associated with this chip
*
* Returns an initialized irq_chip_generic structure. The chip defaults
* to the primary (index 0) irq_chip_type and @handler
*/
struct irq_chip_generic *
irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
void __iomem *reg_base, irq_flow_handler_t handler)
{
struct irq_chip_generic *gc;
unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
gc = kzalloc(sz, GFP_KERNEL);
if (gc) {
irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
handler);
}
return gc;
}
EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
static void
irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
{
struct irq_chip_type *ct = gc->chip_types;
u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
int i;
for (i = 0; i < gc->num_ct; i++) {
if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
mskptr = &ct[i].mask_cache_priv;
mskreg = ct[i].regs.mask;
}
ct[i].mask_cache = mskptr;
if (flags & IRQ_GC_INIT_MASK_CACHE)
*mskptr = irq_reg_readl(gc->reg_base + mskreg);
}
}
/**
* irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
* @d: irq domain for which to allocate chips
* @irqs_per_chip: Number of interrupts each chip handles
* @num_ct: Number of irq_chip_type instances associated with this
* @name: Name of the irq chip
* @handler: Default flow handler associated with these chips
* @clr: IRQ_* bits to clear in the mapping function
* @set: IRQ_* bits to set in the mapping function
* @gcflags: Generic chip specific setup flags
*/
int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
int num_ct, const char *name,
irq_flow_handler_t handler,
unsigned int clr, unsigned int set,
enum irq_gc_flags gcflags)
{
struct irq_domain_chip_generic *dgc;
struct irq_chip_generic *gc;
int numchips, sz, i;
unsigned long flags;
void *tmp;
if (d->gc)
return -EBUSY;
numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
if (!numchips)
return -EINVAL;
/* Allocate a pointer, generic chip and chiptypes for each chip */
sz = sizeof(*dgc) + numchips * sizeof(gc);
sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
tmp = dgc = kzalloc(sz, GFP_KERNEL);
if (!dgc)
return -ENOMEM;
dgc->irqs_per_chip = irqs_per_chip;
dgc->num_chips = numchips;
dgc->irq_flags_to_set = set;
dgc->irq_flags_to_clear = clr;
dgc->gc_flags = gcflags;
d->gc = dgc;
/* Calc pointer to the first generic chip */
tmp += sizeof(*dgc) + numchips * sizeof(gc);
for (i = 0; i < numchips; i++) {
/* Store the pointer to the generic chip */
dgc->gc[i] = gc = tmp;
irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
NULL, handler);
gc->domain = d;
raw_spin_lock_irqsave(&gc_lock, flags);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock_irqrestore(&gc_lock, flags);
/* Calc pointer to the next generic chip */
tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
}
d->name = name;
return 0;
}
EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
/**
* irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
* @d: irq domain pointer
* @hw_irq: Hardware interrupt number
*/
struct irq_chip_generic *
irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
{
struct irq_domain_chip_generic *dgc = d->gc;
int idx;
if (!dgc)
return NULL;
idx = hw_irq / dgc->irqs_per_chip;
if (idx >= dgc->num_chips)
return NULL;
return dgc->gc[idx];
}
EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
/*
* Separate lockdep class for interrupt chip which can nest irq_desc
* lock.
*/
static struct lock_class_key irq_nested_lock_class;
/*
* irq_map_generic_chip - Map a generic chip for an irq domain
*/
static int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw_irq)
{
struct irq_data *data = irq_get_irq_data(virq);
struct irq_domain_chip_generic *dgc = d->gc;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
struct irq_chip *chip;
unsigned long flags;
int idx;
if (!d->gc)
return -ENODEV;
idx = hw_irq / dgc->irqs_per_chip;
if (idx >= dgc->num_chips)
return -EINVAL;
gc = dgc->gc[idx];
idx = hw_irq % dgc->irqs_per_chip;
if (test_bit(idx, &gc->unused))
return -ENOTSUPP;
if (test_bit(idx, &gc->installed))
return -EBUSY;
ct = gc->chip_types;
chip = &ct->chip;
/* We only init the cache for the first mapping of a generic chip */
if (!gc->installed) {
raw_spin_lock_irqsave(&gc->lock, flags);
irq_gc_init_mask_cache(gc, dgc->gc_flags);
raw_spin_unlock_irqrestore(&gc->lock, flags);
}
/* Mark the interrupt as installed */
set_bit(idx, &gc->installed);
if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
irq_set_lockdep_class(virq, &irq_nested_lock_class);
if (chip->irq_calc_mask)
chip->irq_calc_mask(data);
else
data->mask = 1 << idx;
irq_set_chip_and_handler(virq, chip, ct->handler);
irq_set_chip_data(virq, gc);
irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
return 0;
}
struct irq_domain_ops irq_generic_chip_ops = {
.map = irq_map_generic_chip,
.xlate = irq_domain_xlate_onetwocell,
};
EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
/**
* irq_setup_generic_chip - Setup a range of interrupts with a generic chip
* @gc: Generic irq chip holding all data
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
* @flags: Flags for initialization
* @clr: IRQ_* bits to clear
* @set: IRQ_* bits to set
*
* Set up max. 32 interrupts starting from gc->irq_base. Note, this
* initializes all interrupts to the primary irq_chip_type and its
* associated handler.
*/
void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
enum irq_gc_flags flags, unsigned int clr,
unsigned int set)
{
struct irq_chip_type *ct = gc->chip_types;
struct irq_chip *chip = &ct->chip;
unsigned int i;
raw_spin_lock(&gc_lock);
list_add_tail(&gc->list, &gc_list);
raw_spin_unlock(&gc_lock);
irq_gc_init_mask_cache(gc, flags);
for (i = gc->irq_base; msk; msk >>= 1, i++) {
if (!(msk & 0x01))
continue;
if (flags & IRQ_GC_INIT_NESTED_LOCK)
irq_set_lockdep_class(i, &irq_nested_lock_class);
if (!(flags & IRQ_GC_NO_MASK)) {
struct irq_data *d = irq_get_irq_data(i);
if (chip->irq_calc_mask)
chip->irq_calc_mask(d);
else
d->mask = 1 << (i - gc->irq_base);
}
irq_set_chip_and_handler(i, chip, ct->handler);
irq_set_chip_data(i, gc);
irq_modify_status(i, clr, set);
}
gc->irq_cnt = i - gc->irq_base;
}
EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
/**
* irq_setup_alt_chip - Switch to alternative chip
* @d: irq_data for this interrupt
* @type: Flow type to be initialized
*
* Only to be called from chip->irq_set_type() callbacks.
*/
int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = gc->chip_types;
unsigned int i;
for (i = 0; i < gc->num_ct; i++, ct++) {
if (ct->type & type) {
d->chip = &ct->chip;
irq_data_to_desc(d)->handle_irq = ct->handler;
return 0;
}
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
/**
* irq_remove_generic_chip - Remove a chip
* @gc: Generic irq chip holding all data
* @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
* @clr: IRQ_* bits to clear
* @set: IRQ_* bits to set
*
* Remove up to 32 interrupts starting from gc->irq_base.
*/
void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
unsigned int clr, unsigned int set)
{
unsigned int i = gc->irq_base;
raw_spin_lock(&gc_lock);
list_del(&gc->list);
raw_spin_unlock(&gc_lock);
for (; msk; msk >>= 1, i++) {
if (!(msk & 0x01))
continue;
/* Remove handler first. That will mask the irq line */
irq_set_handler(i, NULL);
irq_set_chip(i, &no_irq_chip);
irq_set_chip_data(i, NULL);
irq_modify_status(i, clr, set);
}
}
EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
{
unsigned int virq;
if (!gc->domain)
return irq_get_irq_data(gc->irq_base);
/*
* We don't know which of the irqs has been actually
* installed. Use the first one.
*/
if (!gc->installed)
return NULL;
virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
return virq ? irq_get_irq_data(virq) : NULL;
}
#ifdef CONFIG_PM
static int irq_gc_suspend(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (ct->chip.irq_suspend) {
struct irq_data *data = irq_gc_get_irq_data(gc);
if (data)
ct->chip.irq_suspend(data);
}
}
return 0;
}
static void irq_gc_resume(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (ct->chip.irq_resume) {
struct irq_data *data = irq_gc_get_irq_data(gc);
if (data)
ct->chip.irq_resume(data);
}
}
}
#else
#define irq_gc_suspend NULL
#define irq_gc_resume NULL
#endif
static void irq_gc_shutdown(void)
{
struct irq_chip_generic *gc;
list_for_each_entry(gc, &gc_list, list) {
struct irq_chip_type *ct = gc->chip_types;
if (ct->chip.irq_pm_shutdown) {
struct irq_data *data = irq_gc_get_irq_data(gc);
if (data)
ct->chip.irq_pm_shutdown(data);
}
}
}
static struct syscore_ops irq_gc_syscore_ops = {
.suspend = irq_gc_suspend,
.resume = irq_gc_resume,
.shutdown = irq_gc_shutdown,
};
static int __init irq_gc_init_ops(void)
{
register_syscore_ops(&irq_gc_syscore_ops);
return 0;
}
device_initcall(irq_gc_init_ops);
| gpl-2.0 |
kimjh-sane/imx6sane-linux-3.14.28 | drivers/gpu/host1x/hw/debug_hw.c | 828 | 9172 | /*
* Copyright (C) 2010 Google, Inc.
* Author: Erik Gilling <konkers@android.com>
*
* Copyright (C) 2011-2013 NVIDIA Corporation
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "../dev.h"
#include "../debug.h"
#include "../cdma.h"
#include "../channel.h"
#define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
enum {
HOST1X_OPCODE_SETCLASS = 0x00,
HOST1X_OPCODE_INCR = 0x01,
HOST1X_OPCODE_NONINCR = 0x02,
HOST1X_OPCODE_MASK = 0x03,
HOST1X_OPCODE_IMM = 0x04,
HOST1X_OPCODE_RESTART = 0x05,
HOST1X_OPCODE_GATHER = 0x06,
HOST1X_OPCODE_EXTEND = 0x0e,
};
enum {
HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK = 0x00,
HOST1X_OPCODE_EXTEND_RELEASE_MLOCK = 0x01,
};
static unsigned int show_channel_command(struct output *o, u32 val)
{
unsigned mask;
unsigned subop;
switch (val >> 28) {
case HOST1X_OPCODE_SETCLASS:
mask = val & 0x3f;
if (mask) {
host1x_debug_output(o, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
val >> 6 & 0x3ff,
val >> 16 & 0xfff, mask);
return hweight8(mask);
} else {
host1x_debug_output(o, "SETCL(class=%03x)\n",
val >> 6 & 0x3ff);
return 0;
}
case HOST1X_OPCODE_INCR:
host1x_debug_output(o, "INCR(offset=%03x, [",
val >> 16 & 0xfff);
return val & 0xffff;
case HOST1X_OPCODE_NONINCR:
host1x_debug_output(o, "NONINCR(offset=%03x, [",
val >> 16 & 0xfff);
return val & 0xffff;
case HOST1X_OPCODE_MASK:
mask = val & 0xffff;
host1x_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
val >> 16 & 0xfff, mask);
return hweight16(mask);
case HOST1X_OPCODE_IMM:
host1x_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
val >> 16 & 0xfff, val & 0xffff);
return 0;
case HOST1X_OPCODE_RESTART:
host1x_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
return 0;
case HOST1X_OPCODE_GATHER:
host1x_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
val >> 16 & 0xfff, val >> 15 & 0x1,
val >> 14 & 0x1, val & 0x3fff);
return 1;
case HOST1X_OPCODE_EXTEND:
subop = val >> 24 & 0xf;
if (subop == HOST1X_OPCODE_EXTEND_ACQUIRE_MLOCK)
host1x_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
val & 0xff);
else if (subop == HOST1X_OPCODE_EXTEND_RELEASE_MLOCK)
host1x_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
val & 0xff);
else
host1x_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
return 0;
default:
return 0;
}
}
static void show_gather(struct output *o, phys_addr_t phys_addr,
unsigned int words, struct host1x_cdma *cdma,
phys_addr_t pin_addr, u32 *map_addr)
{
/* Map dmaget cursor to corresponding mem handle */
u32 offset = phys_addr - pin_addr;
unsigned int data_count = 0, i;
/*
* Sometimes we're given different hardware address to the same
* page - in these cases the offset will get an invalid number and
* we just have to bail out.
*/
if (offset > HOST1X_DEBUG_MAX_PAGE_OFFSET) {
host1x_debug_output(o, "[address mismatch]\n");
return;
}
for (i = 0; i < words; i++) {
u32 addr = phys_addr + i * 4;
u32 val = *(map_addr + offset / 4 + i);
if (!data_count) {
host1x_debug_output(o, "%08x: %08x:", addr, val);
data_count = show_channel_command(o, val);
} else {
host1x_debug_output(o, "%08x%s", val,
data_count > 0 ? ", " : "])\n");
data_count--;
}
}
}
static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
{
struct host1x_job *job;
list_for_each_entry(job, &cdma->sync_queue, list) {
int i;
host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
job, job->syncpt_id, job->syncpt_end,
job->first_get, job->timeout,
job->num_slots, job->num_unpins);
for (i = 0; i < job->num_gathers; i++) {
struct host1x_job_gather *g = &job->gathers[i];
u32 *mapped;
if (job->gather_copy_mapped)
mapped = (u32 *)job->gather_copy_mapped;
else
mapped = host1x_bo_mmap(g->bo);
if (!mapped) {
host1x_debug_output(o, "[could not mmap]\n");
continue;
}
host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n",
(u64)g->base, g->offset, g->words);
show_gather(o, g->base + g->offset, g->words, cdma,
g->base, mapped);
if (!job->gather_copy_mapped)
host1x_bo_munmap(g->bo, mapped);
}
}
}
static void host1x_debug_show_channel_cdma(struct host1x *host,
struct host1x_channel *ch,
struct output *o)
{
struct host1x_cdma *cdma = &ch->cdma;
u32 dmaput, dmaget, dmactrl;
u32 cbstat, cbread;
u32 val, base, baseval;
dmaput = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAPUT);
dmaget = host1x_ch_readl(ch, HOST1X_CHANNEL_DMAGET);
dmactrl = host1x_ch_readl(ch, HOST1X_CHANNEL_DMACTRL);
cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev));
if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
!ch->cdma.push_buffer.mapped) {
host1x_debug_output(o, "inactive\n\n");
return;
}
if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
HOST1X_UCLASS_WAIT_SYNCPT)
host1x_debug_output(o, "waiting on syncpt %d val %d\n",
cbread >> 24, cbread & 0xffffff);
else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
HOST1X_CLASS_HOST1X &&
HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
base = (cbread >> 16) & 0xff;
baseval =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
val = cbread & 0xffff;
host1x_debug_output(o, "waiting on syncpt %d val %d (base %d = %d; offset = %d)\n",
cbread >> 24, baseval + val, base,
baseval, val);
} else
host1x_debug_output(o, "active class %02x, offset %04x, val %08x\n",
HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat),
HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat),
cbread);
host1x_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
dmaput, dmaget, dmactrl);
host1x_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
show_channel_gathers(o, cdma);
host1x_debug_output(o, "\n");
}
static void host1x_debug_show_channel_fifo(struct host1x *host,
struct host1x_channel *ch,
struct output *o)
{
u32 val, rd_ptr, wr_ptr, start, end;
unsigned int data_count = 0;
host1x_debug_output(o, "%d: fifo:\n", ch->id);
val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
host1x_debug_output(o, "FIFOSTAT %08x\n", val);
if (HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(val)) {
host1x_debug_output(o, "[empty]\n");
return;
}
host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id),
HOST1X_SYNC_CFPEEK_CTRL);
val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_PTRS);
rd_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(val);
wr_ptr = HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(val);
val = host1x_sync_readl(host, HOST1X_SYNC_CF_SETUP(ch->id));
start = HOST1X_SYNC_CF_SETUP_BASE_V(val);
end = HOST1X_SYNC_CF_SETUP_LIMIT_V(val);
do {
host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
host1x_sync_writel(host, HOST1X_SYNC_CFPEEK_CTRL_ENA_F(1) |
HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(ch->id) |
HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(rd_ptr),
HOST1X_SYNC_CFPEEK_CTRL);
val = host1x_sync_readl(host, HOST1X_SYNC_CFPEEK_READ);
if (!data_count) {
host1x_debug_output(o, "%08x:", val);
data_count = show_channel_command(o, val);
} else {
host1x_debug_output(o, "%08x%s", val,
data_count > 0 ? ", " : "])\n");
data_count--;
}
if (rd_ptr == end)
rd_ptr = start;
else
rd_ptr++;
} while (rd_ptr != wr_ptr);
if (data_count)
host1x_debug_output(o, ", ...])\n");
host1x_debug_output(o, "\n");
host1x_sync_writel(host, 0x0, HOST1X_SYNC_CFPEEK_CTRL);
}
static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
{
int i;
host1x_debug_output(o, "---- mlocks ----\n");
for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
u32 owner =
host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
host1x_debug_output(o, "%d: locked by channel %d\n",
i, HOST1X_SYNC_MLOCK_OWNER_CHID_F(owner));
else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
host1x_debug_output(o, "%d: locked by cpu\n", i);
else
host1x_debug_output(o, "%d: unlocked\n", i);
}
host1x_debug_output(o, "\n");
}
static const struct host1x_debug_ops host1x_debug_ops = {
.show_channel_cdma = host1x_debug_show_channel_cdma,
.show_channel_fifo = host1x_debug_show_channel_fifo,
.show_mlocks = host1x_debug_show_mlocks,
};
| gpl-2.0 |
dpuyosa/android_kernel_wiko_l5460 | fs/jffs2/malloc.c | 1596 | 7716 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/jffs2.h>
#include "nodelist.h"
/* These are initialised to NULL in the kernel startup code.
If you're porting to other operating systems, beware */
static struct kmem_cache *full_dnode_slab;
static struct kmem_cache *raw_dirent_slab;
static struct kmem_cache *raw_inode_slab;
static struct kmem_cache *tmp_dnode_info_slab;
static struct kmem_cache *raw_node_ref_slab;
static struct kmem_cache *node_frag_slab;
static struct kmem_cache *inode_cache_slab;
#ifdef CONFIG_JFFS2_FS_XATTR
static struct kmem_cache *xattr_datum_cache;
static struct kmem_cache *xattr_ref_cache;
#endif
int __init jffs2_create_slab_caches(void)
{
full_dnode_slab = kmem_cache_create("jffs2_full_dnode",
sizeof(struct jffs2_full_dnode),
0, 0, NULL);
if (!full_dnode_slab)
goto err;
raw_dirent_slab = kmem_cache_create("jffs2_raw_dirent",
sizeof(struct jffs2_raw_dirent),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!raw_dirent_slab)
goto err;
raw_inode_slab = kmem_cache_create("jffs2_raw_inode",
sizeof(struct jffs2_raw_inode),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!raw_inode_slab)
goto err;
tmp_dnode_info_slab = kmem_cache_create("jffs2_tmp_dnode",
sizeof(struct jffs2_tmp_dnode_info),
0, 0, NULL);
if (!tmp_dnode_info_slab)
goto err;
raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
0, 0, NULL);
if (!raw_node_ref_slab)
goto err;
node_frag_slab = kmem_cache_create("jffs2_node_frag",
sizeof(struct jffs2_node_frag),
0, 0, NULL);
if (!node_frag_slab)
goto err;
inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
sizeof(struct jffs2_inode_cache),
0, 0, NULL);
if (!inode_cache_slab)
goto err;
#ifdef CONFIG_JFFS2_FS_XATTR
xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
sizeof(struct jffs2_xattr_datum),
0, 0, NULL);
if (!xattr_datum_cache)
goto err;
xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
sizeof(struct jffs2_xattr_ref),
0, 0, NULL);
if (!xattr_ref_cache)
goto err;
#endif
return 0;
err:
jffs2_destroy_slab_caches();
return -ENOMEM;
}
void jffs2_destroy_slab_caches(void)
{
if(full_dnode_slab)
kmem_cache_destroy(full_dnode_slab);
if(raw_dirent_slab)
kmem_cache_destroy(raw_dirent_slab);
if(raw_inode_slab)
kmem_cache_destroy(raw_inode_slab);
if(tmp_dnode_info_slab)
kmem_cache_destroy(tmp_dnode_info_slab);
if(raw_node_ref_slab)
kmem_cache_destroy(raw_node_ref_slab);
if(node_frag_slab)
kmem_cache_destroy(node_frag_slab);
if(inode_cache_slab)
kmem_cache_destroy(inode_cache_slab);
#ifdef CONFIG_JFFS2_FS_XATTR
if (xattr_datum_cache)
kmem_cache_destroy(xattr_datum_cache);
if (xattr_ref_cache)
kmem_cache_destroy(xattr_ref_cache);
#endif
}
struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
{
struct jffs2_full_dirent *ret;
ret = kmalloc(sizeof(struct jffs2_full_dirent) + namesize, GFP_KERNEL);
dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_full_dirent(struct jffs2_full_dirent *x)
{
dbg_memalloc("%p\n", x);
kfree(x);
}
struct jffs2_full_dnode *jffs2_alloc_full_dnode(void)
{
struct jffs2_full_dnode *ret;
ret = kmem_cache_alloc(full_dnode_slab, GFP_KERNEL);
dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_full_dnode(struct jffs2_full_dnode *x)
{
dbg_memalloc("%p\n", x);
kmem_cache_free(full_dnode_slab, x);
}
struct jffs2_raw_dirent *jffs2_alloc_raw_dirent(void)
{
struct jffs2_raw_dirent *ret;
ret = kmem_cache_alloc(raw_dirent_slab, GFP_KERNEL);
dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_raw_dirent(struct jffs2_raw_dirent *x)
{
dbg_memalloc("%p\n", x);
kmem_cache_free(raw_dirent_slab, x);
}
struct jffs2_raw_inode *jffs2_alloc_raw_inode(void)
{
struct jffs2_raw_inode *ret;
ret = kmem_cache_alloc(raw_inode_slab, GFP_KERNEL);
dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_raw_inode(struct jffs2_raw_inode *x)
{
dbg_memalloc("%p\n", x);
kmem_cache_free(raw_inode_slab, x);
}
struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void)
{
struct jffs2_tmp_dnode_info *ret;
ret = kmem_cache_alloc(tmp_dnode_info_slab, GFP_KERNEL);
dbg_memalloc("%p\n",
ret);
return ret;
}
void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
{
dbg_memalloc("%p\n", x);
kmem_cache_free(tmp_dnode_info_slab, x);
}
static struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
{
struct jffs2_raw_node_ref *ret;
ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
if (ret) {
int i = 0;
for (i=0; i < REFS_PER_BLOCK; i++) {
ret[i].flash_offset = REF_EMPTY_NODE;
ret[i].next_in_ino = NULL;
}
ret[i].flash_offset = REF_LINK_NODE;
ret[i].next_in_ino = NULL;
}
return ret;
}
int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
struct jffs2_eraseblock *jeb, int nr)
{
struct jffs2_raw_node_ref **p, *ref;
int i = nr;
dbg_memalloc("%d\n", nr);
p = &jeb->last_node;
ref = *p;
dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
/* If jeb->last_node is really a valid node then skip over it */
if (ref && ref->flash_offset != REF_EMPTY_NODE)
ref++;
while (i) {
if (!ref) {
dbg_memalloc("Allocating new refblock linked from %p\n", p);
ref = *p = jffs2_alloc_refblock();
if (!ref)
return -ENOMEM;
}
if (ref->flash_offset == REF_LINK_NODE) {
p = &ref->next_in_ino;
ref = *p;
continue;
}
i--;
ref++;
}
jeb->allocated_refs = nr;
dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
jeb->last_node->next_in_ino);
return 0;
}
void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
{
dbg_memalloc("%p\n", x);
kmem_cache_free(raw_node_ref_slab, x);
}
struct jffs2_node_frag *jffs2_alloc_node_frag(void)
{
struct jffs2_node_frag *ret;
ret = kmem_cache_alloc(node_frag_slab, GFP_KERNEL);
dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_node_frag(struct jffs2_node_frag *x)
{
dbg_memalloc("%p\n", x);
kmem_cache_free(node_frag_slab, x);
}
struct jffs2_inode_cache *jffs2_alloc_inode_cache(void)
{
struct jffs2_inode_cache *ret;
ret = kmem_cache_alloc(inode_cache_slab, GFP_KERNEL);
dbg_memalloc("%p\n", ret);
return ret;
}
void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
{
dbg_memalloc("%p\n", x);
kmem_cache_free(inode_cache_slab, x);
}
#ifdef CONFIG_JFFS2_FS_XATTR
struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
{
struct jffs2_xattr_datum *xd;
xd = kmem_cache_zalloc(xattr_datum_cache, GFP_KERNEL);
dbg_memalloc("%p\n", xd);
if (!xd)
return NULL;
xd->class = RAWNODE_CLASS_XATTR_DATUM;
xd->node = (void *)xd;
INIT_LIST_HEAD(&xd->xindex);
return xd;
}
void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
{
dbg_memalloc("%p\n", xd);
kmem_cache_free(xattr_datum_cache, xd);
}
struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
{
struct jffs2_xattr_ref *ref;
ref = kmem_cache_zalloc(xattr_ref_cache, GFP_KERNEL);
dbg_memalloc("%p\n", ref);
if (!ref)
return NULL;
ref->class = RAWNODE_CLASS_XATTR_REF;
ref->node = (void *)ref;
return ref;
}
void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
{
dbg_memalloc("%p\n", ref);
kmem_cache_free(xattr_ref_cache, ref);
}
#endif
| gpl-2.0 |
zeeshanhussain/inazuma-msm8916 | drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c | 1852 | 2478 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <subdev/mc.h>
struct nv50_mc_priv {
struct nouveau_mc base;
};
static const struct nouveau_mc_intr
nv50_mc_intr[] = {
{ 0x00000001, NVDEV_ENGINE_MPEG },
{ 0x00000100, NVDEV_ENGINE_FIFO },
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84- */
{ 0x00008000, NVDEV_ENGINE_BSP }, /* NV84- */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
{ 0x0000d101, NVDEV_SUBDEV_FB },
{},
};
static int
nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv50_mc_priv *priv;
int ret;
ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
return 0;
}
int
nv50_mc_init(struct nouveau_object *object)
{
struct nv50_mc_priv *priv = (void *)object;
nv_wr32(priv, 0x000200, 0xffffffff); /* everything on */
return nouveau_mc_init(&priv->base);
}
struct nouveau_oclass
nv50_mc_oclass = {
.handle = NV_SUBDEV(MC, 0x50),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv50_mc_ctor,
.dtor = _nouveau_mc_dtor,
.init = nv50_mc_init,
.fini = _nouveau_mc_fini,
},
};
| gpl-2.0 |
mericon/Xperia-S-msm8660 | drivers/net/wireless/iwlegacy/iwl4965-base.c | 2364 | 104245 | /******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <net/mac80211.h>
#include <asm/div64.h>
#define DRV_NAME "iwl4965"
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-sta.h"
#include "iwl-4965-calib.h"
#include "iwl-4965.h"
#include "iwl-4965-led.h"
/******************************************************************************
*
* module boiler plate
*
******************************************************************************/
/*
* module name, copyright, version, etc.
*/
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
#define VD "d"
#else
#define VD
#endif
#define DRV_VERSION IWLWIFI_VERSION VD
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
MODULE_ALIAS("iwl4965");
void iwl4965_update_chain_flags(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
if (priv->cfg->ops->hcmd->set_rxon_chain) {
for_each_context(priv, ctx) {
priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
if (ctx->active.rx_chain != ctx->staging.rx_chain)
iwl_legacy_commit_rxon(priv, ctx);
}
}
}
static void iwl4965_clear_free_frames(struct iwl_priv *priv)
{
struct list_head *element;
IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
priv->frames_count);
while (!list_empty(&priv->free_frames)) {
element = priv->free_frames.next;
list_del(element);
kfree(list_entry(element, struct iwl_frame, list));
priv->frames_count--;
}
if (priv->frames_count) {
IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
priv->frames_count);
priv->frames_count = 0;
}
}
static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
{
struct iwl_frame *frame;
struct list_head *element;
if (list_empty(&priv->free_frames)) {
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
if (!frame) {
IWL_ERR(priv, "Could not allocate frame!\n");
return NULL;
}
priv->frames_count++;
return frame;
}
element = priv->free_frames.next;
list_del(element);
return list_entry(element, struct iwl_frame, list);
}
static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
{
memset(frame, 0, sizeof(*frame));
list_add(&frame->list, &priv->free_frames);
}
static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,
int left)
{
lockdep_assert_held(&priv->mutex);
if (!priv->beacon_skb)
return 0;
if (priv->beacon_skb->len > left)
return 0;
memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
return priv->beacon_skb->len;
}
/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
struct iwl_tx_beacon_cmd *tx_beacon_cmd,
u8 *beacon, u32 frame_size)
{
u16 tim_idx;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
/*
* The index is relative to frame start but we start looking at the
* variable-length part of the beacon.
*/
tim_idx = mgmt->u.beacon.variable - beacon;
/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
while ((tim_idx < (frame_size - 2)) &&
(beacon[tim_idx] != WLAN_EID_TIM))
tim_idx += beacon[tim_idx+1] + 2;
/* If TIM field was found, set variables */
if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
tx_beacon_cmd->tim_size = beacon[tim_idx+1];
} else
IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
}
static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
struct iwl_frame *frame)
{
struct iwl_tx_beacon_cmd *tx_beacon_cmd;
u32 frame_size;
u32 rate_flags;
u32 rate;
/*
* We have to set up the TX command, the TX Beacon command, and the
* beacon contents.
*/
lockdep_assert_held(&priv->mutex);
if (!priv->beacon_ctx) {
IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
return 0;
}
/* Initialize memory */
tx_beacon_cmd = &frame->u.beacon;
memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
/* Set up TX beacon contents */
frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
sizeof(frame->u) - sizeof(*tx_beacon_cmd));
if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
return 0;
if (!frame_size)
return 0;
/* Set up TX command fields */
tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
/* Set up TX beacon command fields */
iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
frame_size);
/* Set up packet rate and flags */
rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
priv->hw_params.valid_tx_ant);
rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
rate_flags |= RATE_MCS_CCK_MSK;
tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
rate_flags);
return sizeof(*tx_beacon_cmd) + frame_size;
}
int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
{
struct iwl_frame *frame;
unsigned int frame_size;
int rc;
frame = iwl4965_get_free_frame(priv);
if (!frame) {
IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
"command.\n");
return -ENOMEM;
}
frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
if (!frame_size) {
IWL_ERR(priv, "Error configuring the beacon command\n");
iwl4965_free_frame(priv, frame);
return -EINVAL;
}
rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
&frame->u.cmd[0]);
iwl4965_free_frame(priv, frame);
return rc;
}
static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
dma_addr_t addr = get_unaligned_le32(&tb->lo);
if (sizeof(dma_addr_t) > sizeof(u32))
addr |=
((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
return addr;
}
static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
return le16_to_cpu(tb->hi_n_len) >> 4;
}
static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
dma_addr_t addr, u16 len)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
u16 hi_n_len = len << 4;
put_unaligned_le32(addr, &tb->lo);
if (sizeof(dma_addr_t) > sizeof(u32))
hi_n_len |= ((addr >> 16) >> 16) & 0xF;
tb->hi_n_len = cpu_to_le16(hi_n_len);
tfd->num_tbs = idx + 1;
}
static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
return tfd->num_tbs & 0x1f;
}
/**
* iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @priv - driver private data
* @txq - tx queue
*
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
struct iwl_tfd *tfd;
struct pci_dev *dev = priv->pci_dev;
int index = txq->q.read_ptr;
int i;
int num_tbs;
tfd = &tfd_tmp[index];
/* Sanity check on number of chunks */
num_tbs = iwl4965_tfd_get_num_tbs(tfd);
if (num_tbs >= IWL_NUM_OF_TBS) {
IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
/* @todo issue fatal error, it is quite serious situation */
return;
}
/* Unmap tx_cmd */
if (num_tbs)
pci_unmap_single(dev,
dma_unmap_addr(&txq->meta[index], mapping),
dma_unmap_len(&txq->meta[index], len),
PCI_DMA_BIDIRECTIONAL);
/* Unmap chunks, if any. */
for (i = 1; i < num_tbs; i++)
pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
iwl4965_tfd_tb_get_len(tfd, i),
PCI_DMA_TODEVICE);
/* free SKB */
if (txq->txb) {
struct sk_buff *skb;
skb = txq->txb[txq->q.read_ptr].skb;
/* can be called from irqs-disabled context */
if (skb) {
dev_kfree_skb_any(skb);
txq->txb[txq->q.read_ptr].skb = NULL;
}
}
}
int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len,
u8 reset, u8 pad)
{
struct iwl_queue *q;
struct iwl_tfd *tfd, *tfd_tmp;
u32 num_tbs;
q = &txq->q;
tfd_tmp = (struct iwl_tfd *)txq->tfds;
tfd = &tfd_tmp[q->write_ptr];
if (reset)
memset(tfd, 0, sizeof(*tfd));
num_tbs = iwl4965_tfd_get_num_tbs(tfd);
/* Each TFD can point to a maximum 20 Tx buffers */
if (num_tbs >= IWL_NUM_OF_TBS) {
IWL_ERR(priv, "Error can not send more than %d chunks\n",
IWL_NUM_OF_TBS);
return -EINVAL;
}
BUG_ON(addr & ~DMA_BIT_MASK(36));
if (unlikely(addr & ~IWL_TX_DMA_MASK))
IWL_ERR(priv, "Unaligned address = %llx\n",
(unsigned long long)addr);
iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
}
/*
* Tell nic where to find circular buffer of Tx Frame Descriptors for
* given Tx queue, and enable the DMA channel used for that queue.
*
* 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
* channels supported in hardware.
*/
int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq)
{
int txq_id = txq->q.id;
/* Circular buffer (TFD queue in DRAM) physical base address */
iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
txq->q.dma_addr >> 8);
return 0;
}
/******************************************************************************
*
* Generic RX handler implementations
*
******************************************************************************/
static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_alive_resp *palive;
struct delayed_work *pwork;
palive = &pkt->u.alive_frame;
IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
memcpy(&priv->card_alive_init,
&pkt->u.alive_frame,
sizeof(struct iwl_init_alive_resp));
pwork = &priv->init_alive_start;
} else {
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
memcpy(&priv->card_alive, &pkt->u.alive_frame,
sizeof(struct iwl_alive_resp));
pwork = &priv->alive_start;
}
/* We delay the ALIVE response by 5ms to
* give the HW RF Kill time to activate... */
if (palive->is_valid == UCODE_VALID_OK)
queue_delayed_work(priv->workqueue, pwork,
msecs_to_jiffies(5));
else
IWL_WARN(priv, "uCode did not respond OK.\n");
}
/**
* iwl4965_bg_statistics_periodic - Timer callback to queue statistics
*
* This callback is provided in order to send a statistics request.
*
* This timer function is continually reset to execute within
* REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
* was received. We need to ensure we receive the statistics in order
* to update the temperature used for calibrating the TXPOWER.
*/
static void iwl4965_bg_statistics_periodic(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
/* dont send host command if rf-kill is on */
if (!iwl_legacy_is_ready_rf(priv))
return;
iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
}
static void iwl4965_print_cont_event_trace(struct iwl_priv *priv, u32 base,
u32 start_idx, u32 num_events,
u32 mode)
{
u32 i;
u32 ptr; /* SRAM byte address of log data */
u32 ev, time, data; /* event log data */
unsigned long reg_flags;
if (mode == 0)
ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
else
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
spin_lock_irqsave(&priv->reg_lock, reg_flags);
if (iwl_grab_nic_access(priv)) {
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
return;
}
/* Set starting address; reads will auto-increment */
_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
rmb();
/*
* "time" is actually "data" for mode 0 (no timestamp).
* place event id # at far right for easier visual parsing.
*/
for (i = 0; i < num_events; i++) {
ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
0, time, ev);
} else {
data = _iwl_legacy_read_direct32(priv,
HBUS_TARG_MEM_RDAT);
trace_iwlwifi_legacy_dev_ucode_cont_event(priv,
time, data, ev);
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}
static void iwl4965_continuous_event_trace(struct iwl_priv *priv)
{
u32 capacity; /* event log capacity in # entries */
u32 base; /* SRAM byte address of event log header */
u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
else
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
capacity = iwl_legacy_read_targ_mem(priv, base);
num_wraps = iwl_legacy_read_targ_mem(priv,
base + (2 * sizeof(u32)));
mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
next_entry = iwl_legacy_read_targ_mem(priv,
base + (3 * sizeof(u32)));
} else
return;
if (num_wraps == priv->event_log.num_wraps) {
iwl4965_print_cont_event_trace(priv,
base, priv->event_log.next_entry,
next_entry - priv->event_log.next_entry,
mode);
priv->event_log.non_wraps_count++;
} else {
if ((num_wraps - priv->event_log.num_wraps) > 1)
priv->event_log.wraps_more_count++;
else
priv->event_log.wraps_once_count++;
trace_iwlwifi_legacy_dev_ucode_wrap_event(priv,
num_wraps - priv->event_log.num_wraps,
next_entry, priv->event_log.next_entry);
if (next_entry < priv->event_log.next_entry) {
iwl4965_print_cont_event_trace(priv, base,
priv->event_log.next_entry,
capacity - priv->event_log.next_entry,
mode);
iwl4965_print_cont_event_trace(priv, base, 0,
next_entry, mode);
} else {
iwl4965_print_cont_event_trace(priv, base,
next_entry, capacity - next_entry,
mode);
iwl4965_print_cont_event_trace(priv, base, 0,
next_entry, mode);
}
}
priv->event_log.num_wraps = num_wraps;
priv->event_log.next_entry = next_entry;
}
/**
* iwl4965_bg_ucode_trace - Timer callback to log ucode event
*
* The timer is continually set to execute every
* UCODE_TRACE_PERIOD milliseconds after the last timer expired
* this function is to perform continuous uCode event logging operation
* if enabled
*/
static void iwl4965_bg_ucode_trace(unsigned long data)
{
struct iwl_priv *priv = (struct iwl_priv *)data;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (priv->event_log.ucode_trace) {
iwl4965_continuous_event_trace(priv);
/* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
mod_timer(&priv->ucode_trace,
jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
}
}
static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl4965_beacon_notif *beacon =
(struct iwl4965_beacon_notif *)pkt->u.raw;
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
"tsf %d %d rate %d\n",
le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
beacon->beacon_notify_hdr.failure_frame,
le32_to_cpu(beacon->ibss_mgr_status),
le32_to_cpu(beacon->high_tsf),
le32_to_cpu(beacon->low_tsf), rate);
#endif
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
}
static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
{
unsigned long flags;
IWL_DEBUG_POWER(priv, "Stop all queues\n");
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
iwl_read32(priv, CSR_UCODE_DRV_GP1);
spin_lock_irqsave(&priv->reg_lock, flags);
if (!iwl_grab_nic_access(priv))
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, flags);
}
/* Handle notification from uCode that card's power state is changing
* due to software, hardware, or critical temperature RFKILL */
static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
(flags & SW_CARD_DISABLED) ? "Kill" : "On",
(flags & CT_CARD_DISABLED) ?
"Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
CT_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
if (!(flags & RXON_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
}
if (flags & CT_CARD_DISABLED)
iwl4965_perform_ct_kill_task(priv);
if (flags & HW_CARD_DISABLED)
set_bit(STATUS_RF_KILL_HW, &priv->status);
else
clear_bit(STATUS_RF_KILL_HW, &priv->status);
if (!(flags & RXON_CARD_DISABLED))
iwl_legacy_scan_cancel(priv);
if ((test_bit(STATUS_RF_KILL_HW, &status) !=
test_bit(STATUS_RF_KILL_HW, &priv->status)))
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
else
wake_up(&priv->wait_command_queue);
}
/**
* iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
* to the host.
*
* This function chains into the hardware specific files for them to setup
* any hardware specific handlers as well.
*/
static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
{
priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
iwl_legacy_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_legacy_rx_pm_debug_statistics_notif;
priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic
* statistics notifications (after received beacons) from the uCode.
*/
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
iwl_legacy_setup_rx_scan_handlers(priv);
/* status change handler */
priv->rx_handlers[CARD_STATE_NOTIFICATION] =
iwl4965_rx_card_state_notif;
priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
iwl4965_rx_missed_beacon_notif;
/* Rx handlers */
priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
/* block ack */
priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
/* Set up hardware specific Rx handlers */
priv->cfg->ops->lib->rx_handler_setup(priv);
}
/**
* iwl4965_rx_handle - Main entry function for receiving responses from uCode
*
* Uses the priv->rx_handlers callback function array to invoke
* the appropriate handlers, including command responses,
* frame-received notifications, and other notifications.
*/
void iwl4965_rx_handle(struct iwl_priv *priv)
{
struct iwl_rx_mem_buffer *rxb;
struct iwl_rx_packet *pkt;
struct iwl_rx_queue *rxq = &priv->rxq;
u32 r, i;
int reclaim;
unsigned long flags;
u8 fill_rx = 0;
u32 count = 8;
int total_empty;
/* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
i = rxq->read;
/* Rx interrupt, but nothing sent from uCode */
if (i == r)
IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
/* calculate total frames need to be restock after handling RX */
total_empty = r - rxq->write_actual;
if (total_empty < 0)
total_empty += RX_QUEUE_SIZE;
if (total_empty > (RX_QUEUE_SIZE / 2))
fill_rx = 1;
while (i != r) {
int len;
rxb = rxq->queue[i];
/* If an RXB doesn't have a Rx queue slot associated with it,
* then a bug has been introduced in the queue refilling
* routines -- catch it here */
BUG_ON(rxb == NULL);
rxq->queue[i] = NULL;
pci_unmap_page(priv->pci_dev, rxb->page_dma,
PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
pkt = rxb_addr(rxb);
len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
len += sizeof(u32); /* account for status word */
trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
/* Reclaim a command buffer only if this packet is a response
* to a (driver-originated) command.
* If the packet (e.g. Rx frame) originated from uCode,
* there is no command buffer to reclaim.
* Ucode should set SEQ_RX_FRAME bit if ucode-originated,
* but apparently a few don't get set; catch them here. */
reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
(pkt->hdr.cmd != REPLY_RX) &&
(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
(pkt->hdr.cmd != REPLY_TX);
/* Based on type of command response or notification,
* handle those that need handling via function in
* rx_handlers table. See iwl4965_setup_rx_handlers() */
if (priv->rx_handlers[pkt->hdr.cmd]) {
IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
} else {
/* No handling needed */
IWL_DEBUG_RX(priv,
"r %d i %d No handler needed for %s, 0x%02x\n",
r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
pkt->hdr.cmd);
}
/*
* XXX: After here, we should always check rxb->page
* against NULL before touching it or its virtual
* memory (pkt). Because some rx_handler might have
* already taken or freed the pages.
*/
if (reclaim) {
/* Invoke any callbacks, transfer the buffer to caller,
* and fire off the (possibly) blocking iwl_legacy_send_cmd()
* as we reclaim the driver command queue */
if (rxb->page)
iwl_legacy_tx_cmd_complete(priv, rxb);
else
IWL_WARN(priv, "Claim null rxb?\n");
}
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
spin_lock_irqsave(&rxq->lock, flags);
if (rxb->page != NULL) {
rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
0, PAGE_SIZE << priv->hw_params.rx_page_order,
PCI_DMA_FROMDEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
} else
list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
i = (i + 1) & RX_QUEUE_MASK;
/* If there are a lot of unused frames,
* restock the Rx queue so ucode wont assert. */
if (fill_rx) {
count++;
if (count >= 8) {
rxq->read = i;
iwl4965_rx_replenish_now(priv);
count = 0;
}
}
}
/* Backtrack one entry */
rxq->read = i;
if (fill_rx)
iwl4965_rx_replenish_now(priv);
else
iwl4965_rx_queue_restock(priv);
}
/* call this function to flush any scheduled tasklet */
static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
{
/* wait to make sure we flush pending tasklet*/
synchronize_irq(priv->pci_dev->irq);
tasklet_kill(&priv->irq_tasklet);
}
static void iwl4965_irq_tasklet(struct iwl_priv *priv)
{
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
u32 i;
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
u32 inta_mask;
#endif
spin_lock_irqsave(&priv->lock, flags);
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
* and will clear only when CSR_FH_INT_STATUS gets cleared. */
inta = iwl_read32(priv, CSR_INT);
iwl_write32(priv, CSR_INT, inta);
/* Ack/clear/reset pending flow-handler (DMA) interrupts.
* Any new interrupts that happen after this, either while we're
* in this tasklet, or later, will show up in next ISR/tasklet. */
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
/* just for debug */
inta_mask = iwl_read32(priv, CSR_INT_MASK);
IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
inta, inta_mask, inta_fh);
}
#endif
spin_unlock_irqrestore(&priv->lock, flags);
/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
* atomic, make sure that inta covers all the interrupts that
* we've discovered, even if FH interrupt came in just after
* reading CSR_INT. */
if (inta_fh & CSR49_FH_INT_RX_MASK)
inta |= CSR_INT_BIT_FH_RX;
if (inta_fh & CSR49_FH_INT_TX_MASK)
inta |= CSR_INT_BIT_FH_TX;
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
IWL_ERR(priv, "Hardware error detected. Restarting.\n");
/* Tell the device to stop sending interrupts */
iwl_legacy_disable_interrupts(priv);
priv->isr_stats.hw++;
iwl_legacy_irq_handle_error(priv);
handled |= CSR_INT_BIT_HW_ERR;
return;
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
/* NIC fires this, but we don't use it, redundant with WAKEUP */
if (inta & CSR_INT_BIT_SCD) {
IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
"the frame/frames.\n");
priv->isr_stats.sch++;
}
/* Alive notification via Rx interrupt will do the real work */
if (inta & CSR_INT_BIT_ALIVE) {
IWL_DEBUG_ISR(priv, "Alive interrupt\n");
priv->isr_stats.alive++;
}
}
#endif
/* Safely ignore these bits for debug checks below */
inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
/* HW RF KILL switch toggled */
if (inta & CSR_INT_BIT_RF_KILL) {
int hw_rf_kill = 0;
if (!(iwl_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
hw_rf_kill = 1;
IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
hw_rf_kill ? "disable radio" : "enable radio");
priv->isr_stats.rfkill++;
/* driver only loads ucode once setting the interface up.
* the driver allows loading the ucode even if the radio
* is killed. Hence update the killswitch state here. The
* rfkill handler will care about restarting if needed.
*/
if (!test_bit(STATUS_ALIVE, &priv->status)) {
if (hw_rf_kill)
set_bit(STATUS_RF_KILL_HW, &priv->status);
else
clear_bit(STATUS_RF_KILL_HW, &priv->status);
wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
}
handled |= CSR_INT_BIT_RF_KILL;
}
/* Chip got too hot and stopped itself */
if (inta & CSR_INT_BIT_CT_KILL) {
IWL_ERR(priv, "Microcode CT kill error detected.\n");
priv->isr_stats.ctkill++;
handled |= CSR_INT_BIT_CT_KILL;
}
/* Error detected by uCode */
if (inta & CSR_INT_BIT_SW_ERR) {
IWL_ERR(priv, "Microcode SW error detected. "
" Restarting 0x%X.\n", inta);
priv->isr_stats.sw++;
iwl_legacy_irq_handle_error(priv);
handled |= CSR_INT_BIT_SW_ERR;
}
/*
* uCode wakes up after power-down sleep.
* Tell device about any new tx or host commands enqueued,
* and about any Rx buffers made available while asleep.
*/
if (inta & CSR_INT_BIT_WAKEUP) {
IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
for (i = 0; i < priv->hw_params.max_txq_num; i++)
iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
priv->isr_stats.wakeup++;
handled |= CSR_INT_BIT_WAKEUP;
}
/* All uCode command responses, including Tx command responses,
* Rx "responses" (frame-received notification), and other
* notifications from uCode come through here*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
iwl4965_rx_handle(priv);
priv->isr_stats.rx++;
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
}
/* This "Tx" DMA channel is used only for loading uCode */
if (inta & CSR_INT_BIT_FH_TX) {
IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
priv->isr_stats.tx++;
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
priv->ucode_write_complete = 1;
wake_up(&priv->wait_command_queue);
}
if (inta & ~handled) {
IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
priv->isr_stats.unhandled++;
}
if (inta & ~(priv->inta_mask)) {
IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
inta & ~priv->inta_mask);
IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
}
/* Re-enable all interrupts */
/* only Re-enable if disabled by irq */
if (test_bit(STATUS_INT_ENABLED, &priv->status))
iwl_legacy_enable_interrupts(priv);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
iwl_legacy_enable_rfkill_int(priv);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
inta = iwl_read32(priv, CSR_INT);
inta_mask = iwl_read32(priv, CSR_INT_MASK);
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
IWL_DEBUG_ISR(priv,
"End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
}
#endif
}
/*****************************************************************************
*
* sysfs attributes
*
*****************************************************************************/
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
/*
* The following adds a new attribute to the sysfs representation
* of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
* used for controlling the debug level.
*
* See the level definitions in iwl for details.
*
* The debug_level being managed using sysfs below is a per device debug
* level that is used instead of the global debug level if it (the per
* device debug level) is set.
*/
static ssize_t iwl4965_show_debug_level(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
}
static ssize_t iwl4965_store_debug_level(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct iwl_priv *priv = dev_get_drvdata(d);
unsigned long val;
int ret;
ret = strict_strtoul(buf, 0, &val);
if (ret)
IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
else {
priv->debug_level = val;
if (iwl_legacy_alloc_traffic_mem(priv))
IWL_ERR(priv,
"Not enough memory to generate traffic log\n");
}
return strnlen(buf, count);
}
static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
iwl4965_show_debug_level, iwl4965_store_debug_level);
#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
static ssize_t iwl4965_show_temperature(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
if (!iwl_legacy_is_alive(priv))
return -EAGAIN;
return sprintf(buf, "%d\n", priv->temperature);
}
static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
static ssize_t iwl4965_show_tx_power(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwl_priv *priv = dev_get_drvdata(d);
if (!iwl_legacy_is_ready_rf(priv))
return sprintf(buf, "off\n");
else
return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
}
static ssize_t iwl4965_store_tx_power(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct iwl_priv *priv = dev_get_drvdata(d);
unsigned long val;
int ret;
ret = strict_strtoul(buf, 10, &val);
if (ret)
IWL_INFO(priv, "%s is not in decimal form.\n", buf);
else {
ret = iwl_legacy_set_tx_power(priv, val, false);
if (ret)
IWL_ERR(priv, "failed setting tx power (0x%d).\n",
ret);
else
ret = count;
}
return ret;
}
static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
iwl4965_show_tx_power, iwl4965_store_tx_power);
static struct attribute *iwl_sysfs_entries[] = {
&dev_attr_temperature.attr,
&dev_attr_tx_power.attr,
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
&dev_attr_debug_level.attr,
#endif
NULL
};
static struct attribute_group iwl_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = iwl_sysfs_entries,
};
/******************************************************************************
*
* uCode download functions
*
******************************************************************************/
static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
{
iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
}
static void iwl4965_nic_start(struct iwl_priv *priv)
{
/* Remove all resets to allow NIC to operate */
iwl_write32(priv, CSR_RESET, 0);
}
static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
void *context);
static int iwl4965_mac_setup_register(struct iwl_priv *priv,
u32 max_probe_length);
static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
{
const char *name_pre = priv->cfg->fw_name_pre;
char tag[8];
if (first) {
priv->fw_index = priv->cfg->ucode_api_max;
sprintf(tag, "%d", priv->fw_index);
} else {
priv->fw_index--;
sprintf(tag, "%d", priv->fw_index);
}
if (priv->fw_index < priv->cfg->ucode_api_min) {
IWL_ERR(priv, "no suitable firmware found!\n");
return -ENOENT;
}
sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
priv->firmware_name);
return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
&priv->pci_dev->dev, GFP_KERNEL, priv,
iwl4965_ucode_callback);
}
struct iwl4965_firmware_pieces {
const void *inst, *data, *init, *init_data, *boot;
size_t inst_size, data_size, init_size, init_data_size, boot_size;
};
static int iwl4965_load_firmware(struct iwl_priv *priv,
const struct firmware *ucode_raw,
struct iwl4965_firmware_pieces *pieces)
{
struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
u32 api_ver, hdr_size;
const u8 *src;
priv->ucode_ver = le32_to_cpu(ucode->ver);
api_ver = IWL_UCODE_API(priv->ucode_ver);
switch (api_ver) {
default:
case 0:
case 1:
case 2:
hdr_size = 24;
if (ucode_raw->size < hdr_size) {
IWL_ERR(priv, "File size too small!\n");
return -EINVAL;
}
pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
pieces->data_size = le32_to_cpu(ucode->v1.data_size);
pieces->init_size = le32_to_cpu(ucode->v1.init_size);
pieces->init_data_size =
le32_to_cpu(ucode->v1.init_data_size);
pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
src = ucode->v1.data;
break;
}
/* Verify size of file vs. image size info in file's header */
if (ucode_raw->size != hdr_size + pieces->inst_size +
pieces->data_size + pieces->init_size +
pieces->init_data_size + pieces->boot_size) {
IWL_ERR(priv,
"uCode file size %d does not match expected size\n",
(int)ucode_raw->size);
return -EINVAL;
}
pieces->inst = src;
src += pieces->inst_size;
pieces->data = src;
src += pieces->data_size;
pieces->init = src;
src += pieces->init_size;
pieces->init_data = src;
src += pieces->init_data_size;
pieces->boot = src;
src += pieces->boot_size;
return 0;
}
/**
* iwl4965_ucode_callback - callback when firmware was loaded
*
* If loaded successfully, copies the firmware into buffers
* for the card to fetch (via DMA).
*/
static void
iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
{
struct iwl_priv *priv = context;
struct iwl_ucode_header *ucode;
int err;
struct iwl4965_firmware_pieces pieces;
const unsigned int api_max = priv->cfg->ucode_api_max;
const unsigned int api_min = priv->cfg->ucode_api_min;
u32 api_ver;
u32 max_probe_length = 200;
u32 standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
memset(&pieces, 0, sizeof(pieces));
if (!ucode_raw) {
if (priv->fw_index <= priv->cfg->ucode_api_max)
IWL_ERR(priv,
"request for firmware file '%s' failed.\n",
priv->firmware_name);
goto try_again;
}
IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
priv->firmware_name, ucode_raw->size);
/* Make sure that we got at least the API version number */
if (ucode_raw->size < 4) {
IWL_ERR(priv, "File size way too small!\n");
goto try_again;
}
/* Data from ucode file: header followed by uCode images */
ucode = (struct iwl_ucode_header *)ucode_raw->data;
err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
if (err)
goto try_again;
api_ver = IWL_UCODE_API(priv->ucode_ver);
/*
* api_ver should match the api version forming part of the
* firmware filename ... but we don't check for that and only rely
* on the API version read from firmware header from here on forward
*/
if (api_ver < api_min || api_ver > api_max) {
IWL_ERR(priv,
"Driver unable to support your firmware API. "
"Driver supports v%u, firmware is v%u.\n",
api_max, api_ver);
goto try_again;
}
if (api_ver != api_max)
IWL_ERR(priv,
"Firmware has old API version. Expected v%u, "
"got v%u. New firmware can be obtained "
"from http://www.intellinuxwireless.org.\n",
api_max, api_ver);
IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
IWL_UCODE_MAJOR(priv->ucode_ver),
IWL_UCODE_MINOR(priv->ucode_ver),
IWL_UCODE_API(priv->ucode_ver),
IWL_UCODE_SERIAL(priv->ucode_ver));
snprintf(priv->hw->wiphy->fw_version,
sizeof(priv->hw->wiphy->fw_version),
"%u.%u.%u.%u",
IWL_UCODE_MAJOR(priv->ucode_ver),
IWL_UCODE_MINOR(priv->ucode_ver),
IWL_UCODE_API(priv->ucode_ver),
IWL_UCODE_SERIAL(priv->ucode_ver));
/*
* For any of the failures below (before allocating pci memory)
* we will try to load a version with a smaller API -- maybe the
* user just got a corrupted version of the latest API.
*/
IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
priv->ucode_ver);
IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
pieces.inst_size);
IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
pieces.data_size);
IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
pieces.init_size);
IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
pieces.init_data_size);
IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
pieces.boot_size);
/* Verify that uCode images will fit in card's SRAM */
if (pieces.inst_size > priv->hw_params.max_inst_size) {
IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
pieces.inst_size);
goto try_again;
}
if (pieces.data_size > priv->hw_params.max_data_size) {
IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
pieces.data_size);
goto try_again;
}
if (pieces.init_size > priv->hw_params.max_inst_size) {
IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
pieces.init_size);
goto try_again;
}
if (pieces.init_data_size > priv->hw_params.max_data_size) {
IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
pieces.init_data_size);
goto try_again;
}
if (pieces.boot_size > priv->hw_params.max_bsm_size) {
IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
pieces.boot_size);
goto try_again;
}
/* Allocate ucode buffers for card's bus-master loading ... */
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
priv->ucode_code.len = pieces.inst_size;
iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
priv->ucode_data.len = pieces.data_size;
iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
priv->ucode_data_backup.len = pieces.data_size;
iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
!priv->ucode_data_backup.v_addr)
goto err_pci_alloc;
/* Initialization instructions and data */
if (pieces.init_size && pieces.init_data_size) {
priv->ucode_init.len = pieces.init_size;
iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
priv->ucode_init_data.len = pieces.init_data_size;
iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
goto err_pci_alloc;
}
/* Bootstrap (instructions only, no data) */
if (pieces.boot_size) {
priv->ucode_boot.len = pieces.boot_size;
iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
if (!priv->ucode_boot.v_addr)
goto err_pci_alloc;
}
/* Now that we can no longer fail, copy information */
priv->sta_key_max_num = STA_KEY_MAX_NUM;
/* Copy images into buffers for card's bus-master reads ... */
/* Runtime instructions (first block of data in file) */
IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
pieces.inst_size);
memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
/*
* Runtime data
* NOTE: Copy into backup buffer will be done in iwl_up()
*/
IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
pieces.data_size);
memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
/* Initialization instructions */
if (pieces.init_size) {
IWL_DEBUG_INFO(priv,
"Copying (but not loading) init instr len %Zd\n",
pieces.init_size);
memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
}
/* Initialization data */
if (pieces.init_data_size) {
IWL_DEBUG_INFO(priv,
"Copying (but not loading) init data len %Zd\n",
pieces.init_data_size);
memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
pieces.init_data_size);
}
/* Bootstrap instructions */
IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
pieces.boot_size);
memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
/*
* figure out the offset of chain noise reset and gain commands
* base on the size of standard phy calibration commands table size
*/
priv->_4965.phy_calib_chain_noise_reset_cmd =
standard_phy_calibration_size;
priv->_4965.phy_calib_chain_noise_gain_cmd =
standard_phy_calibration_size + 1;
/**************************************************
* This is still part of probe() in a sense...
*
* 9. Setup and register with mac80211 and debugfs
**************************************************/
err = iwl4965_mac_setup_register(priv, max_probe_length);
if (err)
goto out_unbind;
err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
if (err)
IWL_ERR(priv,
"failed to create debugfs files. Ignoring error: %d\n", err);
err = sysfs_create_group(&priv->pci_dev->dev.kobj,
&iwl_attribute_group);
if (err) {
IWL_ERR(priv, "failed to create sysfs device attributes\n");
goto out_unbind;
}
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
complete(&priv->_4965.firmware_loading_complete);
return;
try_again:
/* try next, if any */
if (iwl4965_request_firmware(priv, false))
goto out_unbind;
release_firmware(ucode_raw);
return;
err_pci_alloc:
IWL_ERR(priv, "failed to allocate pci memory\n");
iwl4965_dealloc_ucode_pci(priv);
out_unbind:
complete(&priv->_4965.firmware_loading_complete);
device_release_driver(&priv->pci_dev->dev);
release_firmware(ucode_raw);
}
static const char * const desc_lookup_text[] = {
"OK",
"FAIL",
"BAD_PARAM",
"BAD_CHECKSUM",
"NMI_INTERRUPT_WDG",
"SYSASSERT",
"FATAL_ERROR",
"BAD_COMMAND",
"HW_ERROR_TUNE_LOCK",
"HW_ERROR_TEMPERATURE",
"ILLEGAL_CHAN_FREQ",
"VCC_NOT_STABLE",
"FH_ERROR",
"NMI_INTERRUPT_HOST",
"NMI_INTERRUPT_ACTION_PT",
"NMI_INTERRUPT_UNKNOWN",
"UCODE_VERSION_MISMATCH",
"HW_ERROR_ABS_LOCK",
"HW_ERROR_CAL_LOCK_FAIL",
"NMI_INTERRUPT_INST_ACTION_PT",
"NMI_INTERRUPT_DATA_ACTION_PT",
"NMI_TRM_HW_ER",
"NMI_INTERRUPT_TRM",
"NMI_INTERRUPT_BREAK_POINT"
"DEBUG_0",
"DEBUG_1",
"DEBUG_2",
"DEBUG_3",
};
static struct { char *name; u8 num; } advanced_lookup[] = {
{ "NMI_INTERRUPT_WDG", 0x34 },
{ "SYSASSERT", 0x35 },
{ "UCODE_VERSION_MISMATCH", 0x37 },
{ "BAD_COMMAND", 0x38 },
{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
{ "FATAL_ERROR", 0x3D },
{ "NMI_TRM_HW_ERR", 0x46 },
{ "NMI_INTERRUPT_TRM", 0x4C },
{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
{ "NMI_INTERRUPT_HOST", 0x66 },
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
{ "ADVANCED_SYSASSERT", 0 },
};
static const char *iwl4965_desc_lookup(u32 num)
{
int i;
int max = ARRAY_SIZE(desc_lookup_text);
if (num < max)
return desc_lookup_text[num];
max = ARRAY_SIZE(advanced_lookup) - 1;
for (i = 0; i < max; i++) {
if (advanced_lookup[i].num == num)
break;
}
return advanced_lookup[i].name;
}
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
{
u32 data2, line;
u32 desc, time, count, base, data1;
u32 blink1, blink2, ilink1, ilink2;
u32 pc, hcmd;
if (priv->ucode_type == UCODE_INIT) {
base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
} else {
base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
}
if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
IWL_ERR(priv,
"Not valid error log pointer 0x%08X for %s uCode\n",
base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
return;
}
count = iwl_legacy_read_targ_mem(priv, base);
if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
IWL_ERR(priv, "Start IWL Error Log Dump:\n");
IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
priv->status, count);
}
desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
priv->isr_stats.err_code = desc;
pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
time, data1, data2, line,
blink1, blink2, ilink1, ilink2);
IWL_ERR(priv, "Desc Time "
"data1 data2 line\n");
IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
pc, blink1, blink2, ilink1, ilink2, hcmd);
}
#define EVENT_START_OFFSET (4 * sizeof(u32))
/**
* iwl4965_print_event_log - Dump error event log to syslog
*
*/
static int iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
u32 num_events, u32 mode,
int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
u32 ptr; /* SRAM byte address of log data */
u32 ev, time, data; /* event log data */
unsigned long reg_flags;
if (num_events == 0)
return pos;
if (priv->ucode_type == UCODE_INIT) {
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
} else {
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
}
if (mode == 0)
event_size = 2 * sizeof(u32);
else
event_size = 3 * sizeof(u32);
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
/* Make sure device is powered up for SRAM reads */
spin_lock_irqsave(&priv->reg_lock, reg_flags);
iwl_grab_nic_access(priv);
/* Set starting address; reads will auto-increment */
_iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
rmb();
/* "time" is actually "data" for mode 0 (no timestamp).
* place event id # at far right for easier visual parsing. */
for (i = 0; i < num_events; i++) {
ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
if (bufsz) {
pos += scnprintf(*buf + pos, bufsz - pos,
"EVT_LOG:0x%08x:%04u\n",
time, ev);
} else {
trace_iwlwifi_legacy_dev_ucode_event(priv, 0,
time, ev);
IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
time, ev);
}
} else {
data = _iwl_legacy_read_direct32(priv,
HBUS_TARG_MEM_RDAT);
if (bufsz) {
pos += scnprintf(*buf + pos, bufsz - pos,
"EVT_LOGT:%010u:0x%08x:%04u\n",
time, data, ev);
} else {
IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
time, data, ev);
trace_iwlwifi_legacy_dev_ucode_event(priv, time,
data, ev);
}
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
return pos;
}
/**
* iwl4965_print_last_event_logs - Dump the newest # of event log to syslog
*/
static int iwl4965_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
u32 num_wraps, u32 next_entry,
u32 size, u32 mode,
int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
* i.e the entries just before the next ont that uCode would fill.
*/
if (num_wraps) {
if (next_entry < size) {
pos = iwl4965_print_event_log(priv,
capacity - (size - next_entry),
size - next_entry, mode,
pos, buf, bufsz);
pos = iwl4965_print_event_log(priv, 0,
next_entry, mode,
pos, buf, bufsz);
} else
pos = iwl4965_print_event_log(priv, next_entry - size,
size, mode, pos, buf, bufsz);
} else {
if (next_entry < size) {
pos = iwl4965_print_event_log(priv, 0, next_entry,
mode, pos, buf, bufsz);
} else {
pos = iwl4965_print_event_log(priv, next_entry - size,
size, mode, pos, buf, bufsz);
}
}
return pos;
}
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
int iwl4965_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
int pos = 0;
size_t bufsz = 0;
if (priv->ucode_type == UCODE_INIT) {
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
} else {
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
}
if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
IWL_ERR(priv,
"Invalid event log pointer 0x%08X for %s uCode\n",
base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
return -EINVAL;
}
/* event log header */
capacity = iwl_legacy_read_targ_mem(priv, base);
mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32)));
num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32)));
next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32)));
size = num_wraps ? capacity : next_entry;
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
return pos;
}
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#else
size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
#endif
IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
size);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
if (display) {
if (full_log)
bufsz = capacity * 48;
else
bufsz = size * 48;
*buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf)
return -ENOMEM;
}
if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/*
* if uCode has wrapped back to top of log,
* start at the oldest entry,
* i.e the next one that uCode would fill.
*/
if (num_wraps)
pos = iwl4965_print_event_log(priv, next_entry,
capacity - next_entry, mode,
pos, buf, bufsz);
/* (then/else) start at top of log */
pos = iwl4965_print_event_log(priv, 0,
next_entry, mode, pos, buf, bufsz);
} else
pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
next_entry, size, mode,
pos, buf, bufsz);
#else
pos = iwl4965_print_last_event_logs(priv, capacity, num_wraps,
next_entry, size, mode,
pos, buf, bufsz);
#endif
return pos;
}
static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
{
struct iwl_ct_kill_config cmd;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&priv->lock, flags);
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
spin_unlock_irqrestore(&priv->lock, flags);
cmd.critical_temperature_R =
cpu_to_le32(priv->hw_params.ct_kill_threshold);
ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
sizeof(cmd), &cmd);
if (ret)
IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
else
IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
"succeeded, "
"critical temperature is %d\n",
priv->hw_params.ct_kill_threshold);
}
static const s8 default_queue_to_tx_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
IWL49_CMD_FIFO_NUM,
IWL_TX_FIFO_UNUSED,
IWL_TX_FIFO_UNUSED,
};
static int iwl4965_alive_notify(struct iwl_priv *priv)
{
u32 a;
unsigned long flags;
int i, chan;
u32 reg_val;
spin_lock_irqsave(&priv->lock, flags);
/* Clear 4965's internal Tx Scheduler data base */
priv->scd_base_addr = iwl_legacy_read_prph(priv,
IWL49_SCD_SRAM_BASE_ADDR);
a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
iwl_legacy_write_targ_mem(priv, a, 0);
for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
iwl_legacy_write_targ_mem(priv, a, 0);
for (; a < priv->scd_base_addr +
IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
iwl_legacy_write_targ_mem(priv, a, 0);
/* Tel 4965 where to find Tx byte count tables */
iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
priv->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */
for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
iwl_legacy_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(chan),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Disable chain mode for all queues */
iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
/* Initialize each Tx queue (including the command queue) */
for (i = 0; i < priv->hw_params.max_txq_num; i++) {
/* TFD circular buffer read/write indexes */
iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
/* Max Tx Window size for Scheduler-ACK mode */
iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
(SCD_WIN_SIZE <<
IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
/* Frame limit */
iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
(SCD_FRAME_LIMIT <<
IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
}
iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
(1 << priv->hw_params.max_txq_num) - 1);
/* Activate all Tx DMA/FIFO channels */
iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
/* make sure all queue are not stopped */
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
for (i = 0; i < 4; i++)
atomic_set(&priv->queue_stop_count[i], 0);
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
iwl_txq_ctx_activate(priv, i);
if (ac == IWL_TX_FIFO_UNUSED)
continue;
iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
}
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
/**
* iwl4965_alive_start - called after REPLY_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
* Alive gets handled by iwl_init_alive_start()).
*/
static void iwl4965_alive_start(struct iwl_priv *priv)
{
int ret = 0;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
if (priv->card_alive.is_valid != UCODE_VALID_OK) {
/* We had an error bringing up the hardware, so take it
* all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Alive failed.\n");
goto restart;
}
/* Initialize uCode has loaded Runtime uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "runtime" alive if code weren't properly loaded. */
if (iwl4965_verify_ucode(priv)) {
/* Runtime instruction load was bad;
* take it all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
goto restart;
}
ret = iwl4965_alive_notify(priv);
if (ret) {
IWL_WARN(priv,
"Could not complete ALIVE transition [ntf]: %d\n", ret);
goto restart;
}
/* After the ALIVE response, we can send host commands to the uCode */
set_bit(STATUS_ALIVE, &priv->status);
/* Enable watchdog to monitor the driver tx queues */
iwl_legacy_setup_watchdog(priv);
if (iwl_legacy_is_rfkill(priv))
return;
ieee80211_wake_queues(priv->hw);
priv->active_rate = IWL_RATES_MASK;
if (iwl_legacy_is_associated_ctx(ctx)) {
struct iwl_legacy_rxon_cmd *active_rxon =
(struct iwl_legacy_rxon_cmd *)&ctx->active;
/* apply any changes in staging */
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
} else {
struct iwl_rxon_context *tmp;
/* Initialize our rx_config data */
for_each_context(priv, tmp)
iwl_legacy_connection_init_rx_config(priv, tmp);
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
}
/* Configure bluetooth coexistence if enabled */
iwl_legacy_send_bt_config(priv);
iwl4965_reset_run_time_calib(priv);
set_bit(STATUS_READY, &priv->status);
/* Configure the adapter for unassociated operation */
iwl_legacy_commit_rxon(priv, ctx);
/* At this point, the NIC is initialized and operational */
iwl4965_rf_kill_ct_config(priv);
IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
wake_up(&priv->wait_command_queue);
iwl_legacy_power_update_mode(priv, true);
IWL_DEBUG_INFO(priv, "Updated power mode\n");
return;
restart:
queue_work(priv->workqueue, &priv->restart);
}
static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
static void __iwl4965_down(struct iwl_priv *priv)
{
unsigned long flags;
int exit_pending;
IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
iwl_legacy_scan_cancel_timeout(priv, 200);
exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
/* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
* to prevent rearm timer */
del_timer_sync(&priv->watchdog);
iwl_legacy_clear_ucode_stations(priv, NULL);
iwl_legacy_dealloc_bcast_stations(priv);
iwl_legacy_clear_driver_stations(priv);
/* Unblock any waiting calls */
wake_up_all(&priv->wait_command_queue);
/* Wipe out the EXIT_PENDING status bit if we are not actually
* exiting the module */
if (!exit_pending)
clear_bit(STATUS_EXIT_PENDING, &priv->status);
/* stop and reset the on-board processor */
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
/* tell the device to stop sending interrupts */
spin_lock_irqsave(&priv->lock, flags);
iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
iwl4965_synchronize_irq(priv);
if (priv->mac80211_registered)
ieee80211_stop_queues(priv->hw);
/* If we have not previously called iwl_init() then
* clear all bits but the RF Kill bit and return */
if (!iwl_legacy_is_init(priv)) {
priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
STATUS_EXIT_PENDING;
goto exit;
}
/* ...otherwise clear out all the status bits but the RF Kill
* bit and continue taking the NIC down. */
priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
STATUS_GEO_CONFIGURED |
test_bit(STATUS_FW_ERROR, &priv->status) <<
STATUS_FW_ERROR |
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
STATUS_EXIT_PENDING;
iwl4965_txq_ctx_stop(priv);
iwl4965_rxq_stop(priv);
/* Power-down device's busmaster DMA clocks */
iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
udelay(5);
/* Make sure (redundant) we've released our request to stay awake */
iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_legacy_apm_stop(priv);
exit:
memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
dev_kfree_skb(priv->beacon_skb);
priv->beacon_skb = NULL;
/* clear out any free frames */
iwl4965_clear_free_frames(priv);
}
static void iwl4965_down(struct iwl_priv *priv)
{
mutex_lock(&priv->mutex);
__iwl4965_down(priv);
mutex_unlock(&priv->mutex);
iwl4965_cancel_deferred_work(priv);
}
#define HW_READY_TIMEOUT (50)
static int iwl4965_set_hw_ready(struct iwl_priv *priv)
{
int ret = 0;
iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
/* See if we got it */
ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
HW_READY_TIMEOUT);
if (ret != -ETIMEDOUT)
priv->hw_ready = true;
else
priv->hw_ready = false;
IWL_DEBUG_INFO(priv, "hardware %s\n",
(priv->hw_ready == 1) ? "ready" : "not ready");
return ret;
}
static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
{
int ret = 0;
IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
ret = iwl4965_set_hw_ready(priv);
if (priv->hw_ready)
return ret;
/* If HW is not ready, prepare the conditions to check again */
iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PREPARE);
ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
/* HW should be ready by now, check again. */
if (ret != -ETIMEDOUT)
iwl4965_set_hw_ready(priv);
return ret;
}
#define MAX_HW_RESTARTS 5
static int __iwl4965_up(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
int i;
int ret;
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
return -EIO;
}
if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
IWL_ERR(priv, "ucode not available for device bringup\n");
return -EIO;
}
for_each_context(priv, ctx) {
ret = iwl4965_alloc_bcast_station(priv, ctx);
if (ret) {
iwl_legacy_dealloc_bcast_stations(priv);
return ret;
}
}
iwl4965_prepare_card_hw(priv);
if (!priv->hw_ready) {
IWL_WARN(priv, "Exit HW not ready\n");
return -EIO;
}
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv,
CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
else
set_bit(STATUS_RF_KILL_HW, &priv->status);
if (iwl_legacy_is_rfkill(priv)) {
wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
iwl_legacy_enable_interrupts(priv);
IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
return 0;
}
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
/* must be initialised before iwl_hw_nic_init */
priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
ret = iwl4965_hw_nic_init(priv);
if (ret) {
IWL_ERR(priv, "Unable to init nic\n");
return ret;
}
/* make sure rfkill handshake bits are cleared */
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
/* clear (again), then enable host interrupts */
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
iwl_legacy_enable_interrupts(priv);
/* really make sure rfkill handshake bits are cleared */
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Copy original ucode data image from disk into backup cache.
* This will be used to initialize the on-board processor's
* data SRAM for a clean start when the runtime program first loads. */
memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
priv->ucode_data.len);
for (i = 0; i < MAX_HW_RESTARTS; i++) {
/* load bootstrap state machine,
* load bootstrap program into processor's memory,
* prepare to load the "initialize" uCode */
ret = priv->cfg->ops->lib->load_ucode(priv);
if (ret) {
IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
ret);
continue;
}
/* start card; "initialize" will load runtime ucode */
iwl4965_nic_start(priv);
IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
return 0;
}
set_bit(STATUS_EXIT_PENDING, &priv->status);
__iwl4965_down(priv);
clear_bit(STATUS_EXIT_PENDING, &priv->status);
/* tried to restart and config the device for as long as our
* patience could withstand */
IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
return -EIO;
}
/*****************************************************************************
*
* Workqueue callbacks
*
*****************************************************************************/
static void iwl4965_bg_init_alive_start(struct work_struct *data)
{
struct iwl_priv *priv =
container_of(data, struct iwl_priv, init_alive_start.work);
mutex_lock(&priv->mutex);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
goto out;
priv->cfg->ops->lib->init_alive_start(priv);
out:
mutex_unlock(&priv->mutex);
}
static void iwl4965_bg_alive_start(struct work_struct *data)
{
struct iwl_priv *priv =
container_of(data, struct iwl_priv, alive_start.work);
mutex_lock(&priv->mutex);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
goto out;
iwl4965_alive_start(priv);
out:
mutex_unlock(&priv->mutex);
}
static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
run_time_calib_work);
mutex_lock(&priv->mutex);
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
test_bit(STATUS_SCANNING, &priv->status)) {
mutex_unlock(&priv->mutex);
return;
}
if (priv->start_calib) {
iwl4965_chain_noise_calibration(priv,
(void *)&priv->_4965.statistics);
iwl4965_sensitivity_calibration(priv,
(void *)&priv->_4965.statistics);
}
mutex_unlock(&priv->mutex);
}
static void iwl4965_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
struct iwl_rxon_context *ctx;
mutex_lock(&priv->mutex);
for_each_context(priv, ctx)
ctx->vif = NULL;
priv->is_open = 0;
__iwl4965_down(priv);
mutex_unlock(&priv->mutex);
iwl4965_cancel_deferred_work(priv);
ieee80211_restart_hw(priv->hw);
} else {
iwl4965_down(priv);
mutex_lock(&priv->mutex);
if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
mutex_unlock(&priv->mutex);
return;
}
__iwl4965_up(priv);
mutex_unlock(&priv->mutex);
}
}
static void iwl4965_bg_rx_replenish(struct work_struct *data)
{
struct iwl_priv *priv =
container_of(data, struct iwl_priv, rx_replenish);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
mutex_lock(&priv->mutex);
iwl4965_rx_replenish(priv);
mutex_unlock(&priv->mutex);
}
/*****************************************************************************
*
* mac80211 entry point functions
*
*****************************************************************************/
#define UCODE_READY_TIMEOUT (4 * HZ)
/*
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
static int iwl4965_mac_setup_register(struct iwl_priv *priv,
u32 max_probe_length)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
struct iwl_rxon_context *ctx;
hw->rate_control_algorithm = "iwl-4965-rs";
/* Tell mac80211 our characteristics */
hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_NEED_DTIM_PERIOD |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
if (priv->cfg->sku & IWL_SKU_N)
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
hw->sta_data_size = sizeof(struct iwl_station_priv);
hw->vif_data_size = sizeof(struct iwl_vif_priv);
for_each_context(priv, ctx) {
hw->wiphy->interface_modes |= ctx->interface_modes;
hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
}
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS;
/*
* For now, disable PS by default because it affects
* RX performance significantly.
*/
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&priv->bands[IEEE80211_BAND_2GHZ];
if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
&priv->bands[IEEE80211_BAND_5GHZ];
iwl_legacy_leds_init(priv);
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
return ret;
}
priv->mac80211_registered = 1;
return 0;
}
int iwl4965_mac_start(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
int ret;
IWL_DEBUG_MAC80211(priv, "enter\n");
/* we should be verifying the device is ready to be opened */
mutex_lock(&priv->mutex);
ret = __iwl4965_up(priv);
mutex_unlock(&priv->mutex);
if (ret)
return ret;
if (iwl_legacy_is_rfkill(priv))
goto out;
IWL_DEBUG_INFO(priv, "Start UP work done.\n");
/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
* mac80211 will not be run successfully. */
ret = wait_event_timeout(priv->wait_command_queue,
test_bit(STATUS_READY, &priv->status),
UCODE_READY_TIMEOUT);
if (!ret) {
if (!test_bit(STATUS_READY, &priv->status)) {
IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
jiffies_to_msecs(UCODE_READY_TIMEOUT));
return -ETIMEDOUT;
}
}
iwl4965_led_enable(priv);
out:
priv->is_open = 1;
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
void iwl4965_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
if (!priv->is_open)
return;
priv->is_open = 0;
iwl4965_down(priv);
flush_workqueue(priv->workqueue);
/* User space software may expect getting rfkill changes
* even if interface is down */
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
iwl_legacy_enable_rfkill_int(priv);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MACDUMP(priv, "enter\n");
IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (iwl4965_tx_skb(priv, skb))
dev_kfree_skb_any(skb);
IWL_DEBUG_MACDUMP(priv, "leave\n");
}
void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_key_conf *keyconf,
struct ieee80211_sta *sta,
u32 iv32, u16 *phase1key)
{
struct iwl_priv *priv = hw->priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
iv32, phase1key);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = hw->priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *ctx = vif_priv->ctx;
int ret;
u8 sta_id;
bool is_default_wep_key = false;
IWL_DEBUG_MAC80211(priv, "enter\n");
if (priv->cfg->mod_params->sw_crypto) {
IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
return -EOPNOTSUPP;
}
sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
if (sta_id == IWL_INVALID_STATION)
return -EINVAL;
mutex_lock(&priv->mutex);
iwl_legacy_scan_cancel_timeout(priv, 100);
/*
* If we are getting WEP group key and we didn't receive any key mapping
* so far, we are in legacy wep mode (group key only), otherwise we are
* in 1X mode.
* In legacy wep mode, we use another host command to the uCode.
*/
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
!sta) {
if (cmd == SET_KEY)
is_default_wep_key = !ctx->key_mapping_keys;
else
is_default_wep_key =
(key->hw_key_idx == HW_KEY_DEFAULT);
}
switch (cmd) {
case SET_KEY:
if (is_default_wep_key)
ret = iwl4965_set_default_wep_key(priv,
vif_priv->ctx, key);
else
ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
key, sta_id);
IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
break;
case DISABLE_KEY:
if (is_default_wep_key)
ret = iwl4965_remove_default_wep_key(priv, ctx, key);
else
ret = iwl4965_remove_dynamic_key(priv, ctx,
key, sta_id);
IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
break;
default:
ret = -EINVAL;
}
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size)
{
struct iwl_priv *priv = hw->priv;
int ret = -EINVAL;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
if (!(priv->cfg->sku & IWL_SKU_N))
return -EACCES;
mutex_lock(&priv->mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
IWL_DEBUG_HT(priv, "start Rx\n");
ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
break;
case IEEE80211_AMPDU_RX_STOP:
IWL_DEBUG_HT(priv, "stop Rx\n");
ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
ret = 0;
break;
case IEEE80211_AMPDU_TX_START:
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
if (ret == 0) {
priv->_4965.agg_tids_count++;
IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
priv->_4965.agg_tids_count);
}
break;
case IEEE80211_AMPDU_TX_STOP:
IWL_DEBUG_HT(priv, "stop Tx\n");
ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
if ((ret == 0) && (priv->_4965.agg_tids_count > 0)) {
priv->_4965.agg_tids_count--;
IWL_DEBUG_HT(priv, "priv->_4965.agg_tids_count = %u\n",
priv->_4965.agg_tids_count);
}
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
ret = 0;
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
ret = 0;
break;
}
mutex_unlock(&priv->mutex);
return ret;
}
int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct iwl_priv *priv = hw->priv;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
bool is_ap = vif->type == NL80211_IFTYPE_STATION;
int ret;
u8 sta_id;
IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
sta->addr);
mutex_lock(&priv->mutex);
IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
sta->addr);
sta_priv->common.sta_id = IWL_INVALID_STATION;
atomic_set(&sta_priv->pending_frames, 0);
ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
is_ap, sta, &sta_id);
if (ret) {
IWL_ERR(priv, "Unable to add station %pM (%d)\n",
sta->addr, ret);
/* Should we return success if return code is EEXIST ? */
mutex_unlock(&priv->mutex);
return ret;
}
sta_priv->common.sta_id = sta_id;
/* Initialize rate scaling */
IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
sta->addr);
iwl4965_rs_rate_init(priv, sta, sta_id);
mutex_unlock(&priv->mutex);
return 0;
}
void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch)
{
struct iwl_priv *priv = hw->priv;
const struct iwl_channel_info *ch_info;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = ch_switch->channel;
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
u16 ch;
unsigned long flags = 0;
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->mutex);
if (iwl_legacy_is_rfkill(priv))
goto out;
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
test_bit(STATUS_SCANNING, &priv->status) ||
test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
goto out;
if (!iwl_legacy_is_associated_ctx(ctx))
goto out;
if (priv->cfg->ops->lib->set_channel_switch) {
ch = channel->hw_value;
if (le16_to_cpu(ctx->active.channel) != ch) {
ch_info = iwl_legacy_get_channel_info(priv,
channel->band,
ch);
if (!iwl_legacy_is_channel_valid(ch_info)) {
IWL_DEBUG_MAC80211(priv, "invalid channel\n");
goto out;
}
spin_lock_irqsave(&priv->lock, flags);
priv->current_ht_config.smps = conf->smps_mode;
/* Configure HT40 channels */
ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) {
if (conf_is_ht40_minus(conf)) {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_BELOW;
ctx->ht.is_40mhz = true;
} else if (conf_is_ht40_plus(conf)) {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
ctx->ht.is_40mhz = true;
} else {
ctx->ht.extension_chan_offset =
IEEE80211_HT_PARAM_CHA_SEC_NONE;
ctx->ht.is_40mhz = false;
}
} else
ctx->ht.is_40mhz = false;
if ((le16_to_cpu(ctx->staging.channel) != ch))
ctx->staging.flags = 0;
iwl_legacy_set_rxon_channel(priv, channel, ctx);
iwl_legacy_set_rxon_ht(priv, ht_conf);
iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
ctx->vif);
spin_unlock_irqrestore(&priv->lock, flags);
iwl_legacy_set_rate(priv);
/*
* at this point, staging_rxon has the
* configuration for channel switch
*/
set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
priv->switch_channel = cpu_to_le16(ch);
if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
&priv->status);
priv->switch_channel = 0;
ieee80211_chswitch_done(ctx->vif, false);
}
}
}
out:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
void iwl4965_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
u64 multicast)
{
struct iwl_priv *priv = hw->priv;
__le32 filter_or = 0, filter_nand = 0;
struct iwl_rxon_context *ctx;
#define CHK(test, flag) do { \
if (*total_flags & (test)) \
filter_or |= (flag); \
else \
filter_nand |= (flag); \
} while (0)
IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
changed_flags, *total_flags);
CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
#undef CHK
mutex_lock(&priv->mutex);
for_each_context(priv, ctx) {
ctx->staging.filter_flags &= ~filter_nand;
ctx->staging.filter_flags |= filter_or;
/*
* Not committing directly because hardware can perform a scan,
* but we'll eventually commit the filter flags change anyway.
*/
}
mutex_unlock(&priv->mutex);
/*
* Receiving all multicast frames is always enabled by the
* default flags setup in iwl_legacy_connection_init_rx_config()
* since we currently do not support programming multicast
* filters into the device.
*/
*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
/*****************************************************************************
*
* driver setup and teardown
*
*****************************************************************************/
static void iwl4965_bg_txpower_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
txpower_work);
mutex_lock(&priv->mutex);
/* If a scan happened to start before we got here
* then just return; the statistics notification will
* kick off another scheduled work to compensate for
* any temperature delta we missed here. */
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
test_bit(STATUS_SCANNING, &priv->status))
goto out;
/* Regardless of if we are associated, we must reconfigure the
* TX power since frames can be sent on non-radar channels while
* not associated */
priv->cfg->ops->lib->send_tx_power(priv);
/* Update last_temperature to keep is_calib_needed from running
* when it isn't needed... */
priv->last_temperature = priv->temperature;
out:
mutex_unlock(&priv->mutex);
}
static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
{
priv->workqueue = create_singlethread_workqueue(DRV_NAME);
init_waitqueue_head(&priv->wait_command_queue);
INIT_WORK(&priv->restart, iwl4965_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
iwl_legacy_setup_scan_deferred_work(priv);
INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
init_timer(&priv->statistics_periodic);
priv->statistics_periodic.data = (unsigned long)priv;
priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
init_timer(&priv->ucode_trace);
priv->ucode_trace.data = (unsigned long)priv;
priv->ucode_trace.function = iwl4965_bg_ucode_trace;
init_timer(&priv->watchdog);
priv->watchdog.data = (unsigned long)priv;
priv->watchdog.function = iwl_legacy_bg_watchdog;
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl4965_irq_tasklet, (unsigned long)priv);
}
static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
{
cancel_work_sync(&priv->txpower_work);
cancel_delayed_work_sync(&priv->init_alive_start);
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->run_time_calib_work);
iwl_legacy_cancel_scan_deferred_work(priv);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
}
static void iwl4965_init_hw_rates(struct iwl_priv *priv,
struct ieee80211_rate *rates)
{
int i;
for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
rates[i].hw_value = i; /* Rate scaling will work on indexes */
rates[i].hw_value_short = i;
rates[i].flags = 0;
if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
/*
* If CCK != 1M then set short preamble rate flag.
*/
rates[i].flags |=
(iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
0 : IEEE80211_RATE_SHORT_PREAMBLE;
}
}
}
/*
* Acquire priv->lock before calling this function !
*/
void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
{
iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
}
void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry)
{
int txq_id = txq->q.id;
/* Find out whether to activate Tx queue */
int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
/* Set up and activate */
iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
(active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
(scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
(scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
IWL49_SCD_QUEUE_STTS_REG_MSK);
txq->sched_retry = scd_retry;
IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
}
static int iwl4965_init_drv(struct iwl_priv *priv)
{
int ret;
spin_lock_init(&priv->sta_lock);
spin_lock_init(&priv->hcmd_lock);
INIT_LIST_HEAD(&priv->free_frames);
mutex_init(&priv->mutex);
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
priv->_4965.agg_tids_count = 0;
/* initialize force reset */
priv->force_reset[IWL_RF_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_RF_RESET;
priv->force_reset[IWL_FW_RESET].reset_duration =
IWL_DELAY_NEXT_FORCE_FW_RELOAD;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
priv->cfg->ops->hcmd->set_rxon_chain(priv,
&priv->contexts[IWL_RXON_CTX_BSS]);
iwl_legacy_init_scan_params(priv);
ret = iwl_legacy_init_channel_map(priv);
if (ret) {
IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
goto err;
}
ret = iwl_legacy_init_geos(priv);
if (ret) {
IWL_ERR(priv, "initializing geos failed: %d\n", ret);
goto err_free_channel_map;
}
iwl4965_init_hw_rates(priv, priv->ieee_rates);
return 0;
err_free_channel_map:
iwl_legacy_free_channel_map(priv);
err:
return ret;
}
static void iwl4965_uninit_drv(struct iwl_priv *priv)
{
iwl4965_calib_free_results(priv);
iwl_legacy_free_geos(priv);
iwl_legacy_free_channel_map(priv);
kfree(priv->scan_cmd);
}
static void iwl4965_hw_detect(struct iwl_priv *priv)
{
priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
priv->rev_id = priv->pci_dev->revision;
IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
}
static int iwl4965_set_hw_params(struct iwl_priv *priv)
{
priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
if (priv->cfg->mod_params->amsdu_size_8K)
priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
else
priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
if (priv->cfg->mod_params->disable_11n)
priv->cfg->sku &= ~IWL_SKU_N;
/* Device-specific setup */
return priv->cfg->ops->lib->set_hw_params(priv);
}
static const u8 iwl4965_bss_ac_to_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
};
static const u8 iwl4965_bss_ac_to_queue[] = {
0, 1, 2, 3,
};
static int
iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = 0, i;
struct iwl_priv *priv;
struct ieee80211_hw *hw;
struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
unsigned long flags;
u16 pci_cmd;
/************************
* 1. Allocating HW data
************************/
hw = iwl_legacy_alloc_all(cfg);
if (!hw) {
err = -ENOMEM;
goto out;
}
priv = hw->priv;
/* At this point both hw and priv are allocated. */
/*
* The default context is always valid,
* more may be discovered when firmware
* is loaded.
*/
priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
for (i = 0; i < NUM_IWL_RXON_CTX; i++)
priv->contexts[i].ctxid = i;
priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
BIT(NL80211_IFTYPE_ADHOC);
priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
BIT(NL80211_IFTYPE_STATION);
priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
SET_IEEE80211_DEV(hw, &pdev->dev);
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
priv->cfg = cfg;
priv->pci_dev = pdev;
priv->inta_mask = CSR_INI_SET_MASK;
if (iwl_legacy_alloc_traffic_mem(priv))
IWL_ERR(priv, "Not enough memory to generate traffic log\n");
/**************************
* 2. Initializing PCI bus
**************************/
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
PCIE_LINK_STATE_CLKPM);
if (pci_enable_device(pdev)) {
err = -ENODEV;
goto out_ieee80211_free_hw;
}
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
/* both attempts failed: */
if (err) {
IWL_WARN(priv, "No suitable DMA available.\n");
goto out_pci_disable_device;
}
}
err = pci_request_regions(pdev, DRV_NAME);
if (err)
goto out_pci_disable_device;
pci_set_drvdata(pdev, priv);
/***********************
* 3. Read REV register
***********************/
priv->hw_base = pci_iomap(pdev, 0, 0);
if (!priv->hw_base) {
err = -ENODEV;
goto out_pci_release_regions;
}
IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
(unsigned long long) pci_resource_len(pdev, 0));
IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
/* these spin locks will be used in apm_ops.init and EEPROM access
* we should init now
*/
spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
/*
* stop and reset the on-board processor just in case it is in a
* strange state ... like being left stranded by a primary kernel
* and this is now the kdump kernel trying to start up
*/
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
iwl4965_hw_detect(priv);
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
/* We disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
iwl4965_prepare_card_hw(priv);
if (!priv->hw_ready) {
IWL_WARN(priv, "Failed, HW not ready\n");
goto out_iounmap;
}
/*****************
* 4. Read EEPROM
*****************/
/* Read the EEPROM */
err = iwl_legacy_eeprom_init(priv);
if (err) {
IWL_ERR(priv, "Unable to init EEPROM\n");
goto out_iounmap;
}
err = iwl4965_eeprom_check_version(priv);
if (err)
goto out_free_eeprom;
if (err)
goto out_free_eeprom;
/* extract MAC Address */
iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
priv->hw->wiphy->addresses = priv->addresses;
priv->hw->wiphy->n_addresses = 1;
/************************
* 5. Setup HW constants
************************/
if (iwl4965_set_hw_params(priv)) {
IWL_ERR(priv, "failed to set hw parameters\n");
goto out_free_eeprom;
}
/*******************
* 6. Setup priv
*******************/
err = iwl4965_init_drv(priv);
if (err)
goto out_free_eeprom;
/* At this point both hw and priv are initialized. */
/********************
* 7. Setup services
********************/
spin_lock_irqsave(&priv->lock, flags);
iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
pci_enable_msi(priv->pci_dev);
err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
IRQF_SHARED, DRV_NAME, priv);
if (err) {
IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
goto out_disable_msi;
}
iwl4965_setup_deferred_work(priv);
iwl4965_setup_rx_handlers(priv);
/*********************************************
* 8. Enable interrupts and read RFKILL state
*********************************************/
/* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
}
iwl_legacy_enable_rfkill_int(priv);
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv, CSR_GP_CNTRL) &
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
else
set_bit(STATUS_RF_KILL_HW, &priv->status);
wiphy_rfkill_set_hw_state(priv->hw->wiphy,
test_bit(STATUS_RF_KILL_HW, &priv->status));
iwl_legacy_power_initialize(priv);
init_completion(&priv->_4965.firmware_loading_complete);
err = iwl4965_request_firmware(priv, true);
if (err)
goto out_destroy_workqueue;
return 0;
out_destroy_workqueue:
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
free_irq(priv->pci_dev->irq, priv);
out_disable_msi:
pci_disable_msi(priv->pci_dev);
iwl4965_uninit_drv(priv);
out_free_eeprom:
iwl_legacy_eeprom_free(priv);
out_iounmap:
pci_iounmap(pdev, priv->hw_base);
out_pci_release_regions:
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
out_pci_disable_device:
pci_disable_device(pdev);
out_ieee80211_free_hw:
iwl_legacy_free_traffic_mem(priv);
ieee80211_free_hw(priv->hw);
out:
return err;
}
static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
{
struct iwl_priv *priv = pci_get_drvdata(pdev);
unsigned long flags;
if (!priv)
return;
wait_for_completion(&priv->_4965.firmware_loading_complete);
IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
iwl_legacy_dbgfs_unregister(priv);
sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
/* ieee80211_unregister_hw call wil cause iwl_mac_stop to
* to be called and iwl4965_down since we are removing the device
* we need to set STATUS_EXIT_PENDING bit.
*/
set_bit(STATUS_EXIT_PENDING, &priv->status);
iwl_legacy_leds_exit(priv);
if (priv->mac80211_registered) {
ieee80211_unregister_hw(priv->hw);
priv->mac80211_registered = 0;
} else {
iwl4965_down(priv);
}
/*
* Make sure device is reset to low power before unloading driver.
* This may be redundant with iwl4965_down(), but there are paths to
* run iwl4965_down() without calling apm_ops.stop(), and there are
* paths to avoid running iwl4965_down() at all before leaving driver.
* This (inexpensive) call *makes sure* device is reset.
*/
iwl_legacy_apm_stop(priv);
/* make sure we flush any pending irq or
* tasklet for the driver
*/
spin_lock_irqsave(&priv->lock, flags);
iwl_legacy_disable_interrupts(priv);
spin_unlock_irqrestore(&priv->lock, flags);
iwl4965_synchronize_irq(priv);
iwl4965_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
iwl4965_rx_queue_free(priv, &priv->rxq);
iwl4965_hw_txq_ctx_free(priv);
iwl_legacy_eeprom_free(priv);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
/* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
* priv->workqueue... so we can't take down the workqueue
* until now... */
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
iwl_legacy_free_traffic_mem(priv);
free_irq(priv->pci_dev->irq, priv);
pci_disable_msi(priv->pci_dev);
pci_iounmap(pdev, priv->hw_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
iwl4965_uninit_drv(priv);
dev_kfree_skb(priv->beacon_skb);
ieee80211_free_hw(priv->hw);
}
/*
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
* must be called under priv->lock and mac access
*/
void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
{
iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
}
/*****************************************************************************
*
* driver and module entry point
*
*****************************************************************************/
/* Hardware specific file defines the PCI IDs table for that hardware module */
static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
{IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
#endif /* CONFIG_IWL4965 */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
static struct pci_driver iwl4965_driver = {
.name = DRV_NAME,
.id_table = iwl4965_hw_card_ids,
.probe = iwl4965_pci_probe,
.remove = __devexit_p(iwl4965_pci_remove),
.driver.pm = IWL_LEGACY_PM_OPS,
};
static int __init iwl4965_init(void)
{
int ret;
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
pr_info(DRV_COPYRIGHT "\n");
ret = iwl4965_rate_control_register();
if (ret) {
pr_err("Unable to register rate control algorithm: %d\n", ret);
return ret;
}
ret = pci_register_driver(&iwl4965_driver);
if (ret) {
pr_err("Unable to initialize PCI module\n");
goto error_register;
}
return ret;
error_register:
iwl4965_rate_control_unregister();
return ret;
}
static void __exit iwl4965_exit(void)
{
pci_unregister_driver(&iwl4965_driver);
iwl4965_rate_control_unregister();
}
module_exit(iwl4965_exit);
module_init(iwl4965_init);
#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
MODULE_PARM_DESC(queues_num, "number of hw queues.");
module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
| gpl-2.0 |
olegk0/rk3188-kernel | arch/mips/mm/highmem.c | 3132 | 3013 | #include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
void *kmap(struct page *page)
{
void *addr;
might_sleep();
if (!PageHighMem(page))
return page_address(page);
addr = kmap_high(page);
flush_tlb_one((unsigned long)addr);
return addr;
}
EXPORT_SYMBOL(kmap);
void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
}
EXPORT_SYMBOL(kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap is is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *__kmap_atomic(struct page *page)
{
unsigned long vaddr;
int idx, type;
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
local_flush_tlb_one((unsigned long)vaddr);
return (void*) vaddr;
}
EXPORT_SYMBOL(__kmap_atomic);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type;
if (vaddr < FIXADDR_START) { // FIXME
pagefault_enable();
return;
}
type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
int idx = type + KM_TYPE_NR * smp_processor_id();
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_one(vaddr);
}
#endif
kmap_atomic_idx_pop();
pagefault_enable();
}
EXPORT_SYMBOL(__kunmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn)
{
unsigned long vaddr;
int idx, type;
pagefault_disable();
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
flush_tlb_one(vaddr);
return (void*) vaddr;
}
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
pte_t *pte;
if (vaddr < FIXADDR_START)
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
return pte_page(*pte);
}
void __init kmap_init(void)
{
unsigned long kmap_vstart;
/* cache the first kmap pte */
kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
}
| gpl-2.0 |
ronasimi/android.googlesource.com-kernel-msm | drivers/mfd/pmic8058.c | 3388 | 22129 | /* Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/*
* Qualcomm PMIC8058 driver
*
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/msm_ssbi.h>
#include <linux/mfd/core.h>
#include <linux/mfd/pmic8058.h>
#include <linux/mfd/pm8xxx/core.h>
#include <linux/msm_adc.h>
#include <linux/module.h>
#define REG_MPP_BASE 0x50
#define REG_IRQ_BASE 0x1BB
/* PMIC8058 Revision */
#define PM8058_REG_REV 0x002 /* PMIC4 revision */
#define PM8058_VERSION_MASK 0xF0
#define PM8058_REVISION_MASK 0x0F
#define PM8058_VERSION_VALUE 0xE0
/* PMIC 8058 Battery Alarm SSBI registers */
#define REG_BATT_ALARM_THRESH 0x023
#define REG_BATT_ALARM_CTRL1 0x024
#define REG_BATT_ALARM_CTRL2 0x0AA
#define REG_BATT_ALARM_PWM_CTRL 0x0A3
#define REG_TEMP_ALRM_CTRL 0x1B
#define REG_TEMP_ALRM_PWM 0x9B
/* PON CNTL 4 register */
#define SSBI_REG_ADDR_PON_CNTL_4 0x98
#define PM8058_PON_RESET_EN_MASK 0x01
/* PON CNTL 5 register */
#define SSBI_REG_ADDR_PON_CNTL_5 0x7B
#define PM8058_HARD_RESET_EN_MASK 0x08
/* GP_TEST1 register */
#define SSBI_REG_ADDR_GP_TEST_1 0x07A
#define PM8058_RTC_BASE 0x1E8
#define PM8058_OTHC_CNTR_BASE0 0xA0
#define PM8058_OTHC_CNTR_BASE1 0x134
#define PM8058_OTHC_CNTR_BASE2 0x137
#define SINGLE_IRQ_RESOURCE(_name, _irq) \
{ \
.name = _name, \
.start = _irq, \
.end = _irq, \
.flags = IORESOURCE_IRQ, \
}
struct pm8058_chip {
struct pm8058_platform_data pdata;
struct device *dev;
struct pm_irq_chip *irq_chip;
struct mfd_cell *mfd_regulators, *mfd_xo_buffers;
u8 revision;
};
static int pm8058_readb(const struct device *dev, u16 addr, u8 *val)
{
const struct pm8xxx_drvdata *pm8058_drvdata = dev_get_drvdata(dev);
const struct pm8058_chip *pmic = pm8058_drvdata->pm_chip_data;
return msm_ssbi_read(pmic->dev->parent, addr, val, 1);
}
static int pm8058_writeb(const struct device *dev, u16 addr, u8 val)
{
const struct pm8xxx_drvdata *pm8058_drvdata = dev_get_drvdata(dev);
const struct pm8058_chip *pmic = pm8058_drvdata->pm_chip_data;
return msm_ssbi_write(pmic->dev->parent, addr, &val, 1);
}
static int pm8058_read_buf(const struct device *dev, u16 addr, u8 *buf,
int cnt)
{
const struct pm8xxx_drvdata *pm8058_drvdata = dev_get_drvdata(dev);
const struct pm8058_chip *pmic = pm8058_drvdata->pm_chip_data;
return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt);
}
static int pm8058_write_buf(const struct device *dev, u16 addr, u8 *buf,
int cnt)
{
const struct pm8xxx_drvdata *pm8058_drvdata = dev_get_drvdata(dev);
const struct pm8058_chip *pmic = pm8058_drvdata->pm_chip_data;
return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt);
}
static int pm8058_read_irq_stat(const struct device *dev, int irq)
{
const struct pm8xxx_drvdata *pm8058_drvdata = dev_get_drvdata(dev);
const struct pm8058_chip *pmic = pm8058_drvdata->pm_chip_data;
return pm8xxx_get_irq_stat(pmic->irq_chip, irq);
return 0;
}
static enum pm8xxx_version pm8058_get_version(const struct device *dev)
{
const struct pm8xxx_drvdata *pm8058_drvdata = dev_get_drvdata(dev);
const struct pm8058_chip *pmic = pm8058_drvdata->pm_chip_data;
enum pm8xxx_version version = -ENODEV;
if ((pmic->revision & PM8058_VERSION_MASK) == PM8058_VERSION_VALUE)
version = PM8XXX_VERSION_8058;
return version;
}
static int pm8058_get_revision(const struct device *dev)
{
const struct pm8xxx_drvdata *pm8058_drvdata = dev_get_drvdata(dev);
const struct pm8058_chip *pmic = pm8058_drvdata->pm_chip_data;
return pmic->revision & PM8058_REVISION_MASK;
}
static struct pm8xxx_drvdata pm8058_drvdata = {
.pmic_readb = pm8058_readb,
.pmic_writeb = pm8058_writeb,
.pmic_read_buf = pm8058_read_buf,
.pmic_write_buf = pm8058_write_buf,
.pmic_read_irq_stat = pm8058_read_irq_stat,
.pmic_get_version = pm8058_get_version,
.pmic_get_revision = pm8058_get_revision,
};
static const struct resource pm8058_charger_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("CHGVAL", PM8058_CHGVAL_IRQ),
SINGLE_IRQ_RESOURCE("CHGINVAL", PM8058_CHGINVAL_IRQ),
SINGLE_IRQ_RESOURCE("CHGILIM", PM8058_CHGILIM_IRQ),
SINGLE_IRQ_RESOURCE("VCP", PM8058_VCP_IRQ),
SINGLE_IRQ_RESOURCE("ATC_DONE", PM8058_ATC_DONE_IRQ),
SINGLE_IRQ_RESOURCE("ATCFAIL", PM8058_ATCFAIL_IRQ),
SINGLE_IRQ_RESOURCE("AUTO_CHGDONE", PM8058_AUTO_CHGDONE_IRQ),
SINGLE_IRQ_RESOURCE("AUTO_CHGFAIL", PM8058_AUTO_CHGFAIL_IRQ),
SINGLE_IRQ_RESOURCE("CHGSTATE", PM8058_CHGSTATE_IRQ),
SINGLE_IRQ_RESOURCE("FASTCHG", PM8058_FASTCHG_IRQ),
SINGLE_IRQ_RESOURCE("CHG_END", PM8058_CHG_END_IRQ),
SINGLE_IRQ_RESOURCE("BATTTEMP", PM8058_BATTTEMP_IRQ),
SINGLE_IRQ_RESOURCE("CHGHOT", PM8058_CHGHOT_IRQ),
SINGLE_IRQ_RESOURCE("CHGTLIMIT", PM8058_CHGTLIMIT_IRQ),
SINGLE_IRQ_RESOURCE("CHG_GONE", PM8058_CHG_GONE_IRQ),
SINGLE_IRQ_RESOURCE("VCPMAJOR", PM8058_VCPMAJOR_IRQ),
SINGLE_IRQ_RESOURCE("VBATDET", PM8058_VBATDET_IRQ),
SINGLE_IRQ_RESOURCE("BATFET", PM8058_BATFET_IRQ),
SINGLE_IRQ_RESOURCE("BATT_REPLACE", PM8058_BATT_REPLACE_IRQ),
SINGLE_IRQ_RESOURCE("BATTCONNECT", PM8058_BATTCONNECT_IRQ),
SINGLE_IRQ_RESOURCE("VBATDET_LOW", PM8058_VBATDET_LOW_IRQ),
};
static struct mfd_cell pm8058_charger_cell __devinitdata = {
.name = "pm8058-charger",
.id = -1,
.resources = pm8058_charger_resources,
.num_resources = ARRAY_SIZE(pm8058_charger_resources),
};
static const struct resource misc_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("pm8xxx_osc_halt_irq", PM8058_OSCHALT_IRQ),
};
static struct mfd_cell misc_cell __devinitdata = {
.name = PM8XXX_MISC_DEV_NAME,
.id = -1,
.resources = misc_cell_resources,
.num_resources = ARRAY_SIZE(misc_cell_resources),
};
static struct mfd_cell pm8058_pwm_cell __devinitdata = {
.name = "pm8058-pwm",
.id = -1,
};
static struct resource xoadc_resources[] = {
SINGLE_IRQ_RESOURCE(NULL, PM8058_ADC_IRQ),
};
static struct mfd_cell xoadc_cell __devinitdata = {
.name = "pm8058-xoadc",
.id = -1,
.resources = xoadc_resources,
.num_resources = ARRAY_SIZE(xoadc_resources),
};
static const struct resource thermal_alarm_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("pm8058_tempstat_irq", PM8058_TEMPSTAT_IRQ),
SINGLE_IRQ_RESOURCE("pm8058_overtemp_irq", PM8058_OVERTEMP_IRQ),
};
static struct pm8xxx_tm_core_data thermal_alarm_cdata = {
.adc_channel = CHANNEL_ADC_DIE_TEMP,
.adc_type = PM8XXX_TM_ADC_PM8058_ADC,
.reg_addr_temp_alarm_ctrl = REG_TEMP_ALRM_CTRL,
.reg_addr_temp_alarm_pwm = REG_TEMP_ALRM_PWM,
.tm_name = "pm8058_tz",
.irq_name_temp_stat = "pm8058_tempstat_irq",
.irq_name_over_temp = "pm8058_overtemp_irq",
};
static struct mfd_cell thermal_alarm_cell __devinitdata = {
.name = PM8XXX_TM_DEV_NAME,
.id = -1,
.resources = thermal_alarm_cell_resources,
.num_resources = ARRAY_SIZE(thermal_alarm_cell_resources),
.platform_data = &thermal_alarm_cdata,
.pdata_size = sizeof(struct pm8xxx_tm_core_data),
};
static struct mfd_cell debugfs_cell __devinitdata = {
.name = "pm8xxx-debug",
.id = -1,
.platform_data = "pm8058-dbg",
.pdata_size = sizeof("pm8058-dbg"),
};
static const struct resource othc0_cell_resources[] __devinitconst = {
{
.name = "othc_base",
.start = PM8058_OTHC_CNTR_BASE0,
.end = PM8058_OTHC_CNTR_BASE0,
.flags = IORESOURCE_IO,
},
};
static const struct resource othc1_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE(NULL, PM8058_SW_1_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8058_IR_1_IRQ),
{
.name = "othc_base",
.start = PM8058_OTHC_CNTR_BASE1,
.end = PM8058_OTHC_CNTR_BASE1,
.flags = IORESOURCE_IO,
},
};
static const struct resource othc2_cell_resources[] __devinitconst = {
{
.name = "othc_base",
.start = PM8058_OTHC_CNTR_BASE2,
.end = PM8058_OTHC_CNTR_BASE2,
.flags = IORESOURCE_IO,
},
};
static const struct resource batt_alarm_cell_resources[] __devinitconst = {
SINGLE_IRQ_RESOURCE("pm8058_batt_alarm_irq", PM8058_BATT_ALARM_IRQ),
};
static struct mfd_cell leds_cell __devinitdata = {
.name = "pm8058-led",
.id = -1,
};
static struct mfd_cell othc0_cell __devinitdata = {
.name = "pm8058-othc",
.id = 0,
.resources = othc0_cell_resources,
.num_resources = ARRAY_SIZE(othc0_cell_resources),
};
static struct mfd_cell othc1_cell __devinitdata = {
.name = "pm8058-othc",
.id = 1,
.resources = othc1_cell_resources,
.num_resources = ARRAY_SIZE(othc1_cell_resources),
};
static struct mfd_cell othc2_cell __devinitdata = {
.name = "pm8058-othc",
.id = 2,
.resources = othc2_cell_resources,
.num_resources = ARRAY_SIZE(othc2_cell_resources),
};
static struct pm8xxx_batt_alarm_core_data batt_alarm_cdata = {
.irq_name = "pm8058_batt_alarm_irq",
.reg_addr_threshold = REG_BATT_ALARM_THRESH,
.reg_addr_ctrl1 = REG_BATT_ALARM_CTRL1,
.reg_addr_ctrl2 = REG_BATT_ALARM_CTRL2,
.reg_addr_pwm_ctrl = REG_BATT_ALARM_PWM_CTRL,
};
static struct mfd_cell batt_alarm_cell __devinitdata = {
.name = PM8XXX_BATT_ALARM_DEV_NAME,
.id = -1,
.resources = batt_alarm_cell_resources,
.num_resources = ARRAY_SIZE(batt_alarm_cell_resources),
.platform_data = &batt_alarm_cdata,
.pdata_size = sizeof(struct pm8xxx_batt_alarm_core_data),
};
static struct mfd_cell upl_cell __devinitdata = {
.name = PM8XXX_UPL_DEV_NAME,
.id = -1,
};
static struct mfd_cell nfc_cell __devinitdata = {
.name = PM8XXX_NFC_DEV_NAME,
.id = -1,
};
static const struct resource rtc_cell_resources[] __devinitconst = {
[0] = SINGLE_IRQ_RESOURCE(NULL, PM8058_RTC_ALARM_IRQ),
[1] = {
.name = "pmic_rtc_base",
.start = PM8058_RTC_BASE,
.end = PM8058_RTC_BASE,
.flags = IORESOURCE_IO,
},
};
static struct mfd_cell rtc_cell __devinitdata = {
.name = PM8XXX_RTC_DEV_NAME,
.id = -1,
.resources = rtc_cell_resources,
.num_resources = ARRAY_SIZE(rtc_cell_resources),
};
static const struct resource resources_pwrkey[] __devinitconst = {
SINGLE_IRQ_RESOURCE(NULL, PM8058_PWRKEY_REL_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8058_PWRKEY_PRESS_IRQ),
};
static struct mfd_cell vibrator_cell __devinitdata = {
.name = PM8XXX_VIBRATOR_DEV_NAME,
.id = -1,
};
static struct mfd_cell pwrkey_cell __devinitdata = {
.name = PM8XXX_PWRKEY_DEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(resources_pwrkey),
.resources = resources_pwrkey,
};
static const struct resource resources_keypad[] = {
SINGLE_IRQ_RESOURCE(NULL, PM8058_KEYPAD_IRQ),
SINGLE_IRQ_RESOURCE(NULL, PM8058_KEYSTUCK_IRQ),
};
static struct mfd_cell keypad_cell __devinitdata = {
.name = PM8XXX_KEYPAD_DEV_NAME,
.id = -1,
.num_resources = ARRAY_SIZE(resources_keypad),
.resources = resources_keypad,
};
static const struct resource mpp_cell_resources[] __devinitconst = {
{
.start = PM8058_IRQ_BLOCK_BIT(PM8058_MPP_BLOCK_START, 0),
.end = PM8058_IRQ_BLOCK_BIT(PM8058_MPP_BLOCK_START, 0)
+ PM8058_MPPS - 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell mpp_cell __devinitdata = {
.name = PM8XXX_MPP_DEV_NAME,
.id = 0,
.resources = mpp_cell_resources,
.num_resources = ARRAY_SIZE(mpp_cell_resources),
};
static const struct resource gpio_cell_resources[] __devinitconst = {
[0] = {
.start = PM8058_IRQ_BLOCK_BIT(PM8058_GPIO_BLOCK_START, 0),
.end = PM8058_IRQ_BLOCK_BIT(PM8058_GPIO_BLOCK_START, 0)
+ PM8058_GPIOS - 1,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell gpio_cell __devinitdata = {
.name = PM8XXX_GPIO_DEV_NAME,
.id = -1,
.resources = gpio_cell_resources,
.num_resources = ARRAY_SIZE(gpio_cell_resources),
};
static int __devinit
pm8058_add_subdevices(const struct pm8058_platform_data *pdata,
struct pm8058_chip *pmic)
{
int rc = 0, irq_base = 0, i;
struct pm_irq_chip *irq_chip;
static struct mfd_cell *mfd_regulators, *mfd_xo_buffers;
if (pdata->irq_pdata) {
pdata->irq_pdata->irq_cdata.nirqs = PM8058_NR_IRQS;
pdata->irq_pdata->irq_cdata.base_addr = REG_IRQ_BASE;
irq_base = pdata->irq_pdata->irq_base;
irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata);
if (IS_ERR(irq_chip)) {
pr_err("Failed to init interrupts ret=%ld\n",
PTR_ERR(irq_chip));
return PTR_ERR(irq_chip);
}
pmic->irq_chip = irq_chip;
}
if (pdata->gpio_pdata) {
pdata->gpio_pdata->gpio_cdata.ngpios = PM8058_GPIOS;
gpio_cell.platform_data = pdata->gpio_pdata;
gpio_cell.pdata_size = sizeof(struct pm8xxx_gpio_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &gpio_cell, 1,
NULL, irq_base);
if (rc) {
pr_err("Failed to add gpio subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->mpp_pdata) {
pdata->mpp_pdata->core_data.nmpps = PM8058_MPPS;
pdata->mpp_pdata->core_data.base_addr = REG_MPP_BASE;
mpp_cell.platform_data = pdata->mpp_pdata;
mpp_cell.pdata_size = sizeof(struct pm8xxx_mpp_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &mpp_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add mpp subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->num_regulators > 0 && pdata->regulator_pdatas) {
mfd_regulators = kzalloc(sizeof(struct mfd_cell)
* (pdata->num_regulators), GFP_KERNEL);
if (!mfd_regulators) {
pr_err("Cannot allocate %d bytes for pm8058 regulator "
"mfd cells\n", sizeof(struct mfd_cell)
* (pdata->num_regulators));
rc = -ENOMEM;
goto bail;
}
for (i = 0; i < pdata->num_regulators; i++) {
mfd_regulators[i].name = "pm8058-regulator";
mfd_regulators[i].id = pdata->regulator_pdatas[i].id;
mfd_regulators[i].platform_data =
&(pdata->regulator_pdatas[i]);
mfd_regulators[i].pdata_size =
sizeof(struct pm8058_vreg_pdata);
}
rc = mfd_add_devices(pmic->dev, 0, mfd_regulators,
pdata->num_regulators, NULL, irq_base);
if (rc) {
pr_err("Failed to add regulator subdevices ret=%d\n",
rc);
kfree(mfd_regulators);
goto bail;
}
pmic->mfd_regulators = mfd_regulators;
}
if (pdata->num_xo_buffers > 0 && pdata->xo_buffer_pdata) {
mfd_xo_buffers = kzalloc(sizeof(struct mfd_cell)
* (pdata->num_xo_buffers), GFP_KERNEL);
if (!mfd_xo_buffers) {
pr_err("Cannot allocate %d bytes for pm8058 XO buffer "
"mfd cells\n", sizeof(struct mfd_cell)
* (pdata->num_xo_buffers));
rc = -ENOMEM;
goto bail;
}
for (i = 0; i < pdata->num_xo_buffers; i++) {
mfd_xo_buffers[i].name = PM8058_XO_BUFFER_DEV_NAME;
mfd_xo_buffers[i].id = pdata->xo_buffer_pdata[i].id;
mfd_xo_buffers[i].platform_data =
&(pdata->xo_buffer_pdata[i]);
mfd_xo_buffers[i].pdata_size =
sizeof(struct pm8058_xo_pdata);
}
rc = mfd_add_devices(pmic->dev, 0, mfd_xo_buffers,
pdata->num_xo_buffers, NULL, irq_base);
if (rc) {
pr_err("Failed to add XO buffer subdevices ret=%d\n",
rc);
kfree(mfd_xo_buffers);
goto bail;
}
pmic->mfd_xo_buffers = mfd_xo_buffers;
}
if (pdata->keypad_pdata) {
keypad_cell.platform_data = pdata->keypad_pdata;
keypad_cell.pdata_size =
sizeof(struct pm8xxx_keypad_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &keypad_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add keypad subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->rtc_pdata) {
rtc_cell.platform_data = pdata->rtc_pdata;
rtc_cell.pdata_size = sizeof(struct pm8xxx_rtc_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &rtc_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add rtc subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->pwrkey_pdata) {
pwrkey_cell.platform_data = pdata->pwrkey_pdata;
pwrkey_cell.pdata_size =
sizeof(struct pm8xxx_pwrkey_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &pwrkey_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add pwrkey subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->vibrator_pdata) {
vibrator_cell.platform_data = pdata->vibrator_pdata;
vibrator_cell.pdata_size =
sizeof(struct pm8xxx_vibrator_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &vibrator_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add vibrator subdevice ret=%d\n",
rc);
goto bail;
}
}
if (pdata->leds_pdata) {
leds_cell.platform_data = pdata->leds_pdata;
leds_cell.pdata_size =
sizeof(struct pmic8058_leds_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &leds_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add leds subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->xoadc_pdata) {
xoadc_cell.platform_data = pdata->xoadc_pdata;
xoadc_cell.pdata_size =
sizeof(struct xoadc_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &xoadc_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add leds subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->othc0_pdata) {
othc0_cell.platform_data = pdata->othc0_pdata;
othc0_cell.pdata_size =
sizeof(struct pmic8058_othc_config_pdata);
rc = mfd_add_devices(pmic->dev, 0, &othc0_cell, 1, NULL, 0);
if (rc) {
pr_err("Failed to add othc0 subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->othc1_pdata) {
othc1_cell.platform_data = pdata->othc1_pdata;
othc1_cell.pdata_size =
sizeof(struct pmic8058_othc_config_pdata);
rc = mfd_add_devices(pmic->dev, 0, &othc1_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add othc1 subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->othc2_pdata) {
othc2_cell.platform_data = pdata->othc2_pdata;
othc2_cell.pdata_size =
sizeof(struct pmic8058_othc_config_pdata);
rc = mfd_add_devices(pmic->dev, 0, &othc2_cell, 1, NULL, 0);
if (rc) {
pr_err("Failed to add othc2 subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->pwm_pdata) {
pm8058_pwm_cell.platform_data = pdata->pwm_pdata;
pm8058_pwm_cell.pdata_size = sizeof(struct pm8058_pwm_pdata);
rc = mfd_add_devices(pmic->dev, 0, &pm8058_pwm_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add pwm subdevice ret=%d\n", rc);
goto bail;
}
}
if (pdata->misc_pdata) {
misc_cell.platform_data = pdata->misc_pdata;
misc_cell.pdata_size = sizeof(struct pm8xxx_misc_platform_data);
rc = mfd_add_devices(pmic->dev, 0, &misc_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add misc subdevice ret=%d\n", rc);
goto bail;
}
}
rc = mfd_add_devices(pmic->dev, 0, &thermal_alarm_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add thermal alarm subdevice ret=%d\n",
rc);
goto bail;
}
rc = mfd_add_devices(pmic->dev, 0, &batt_alarm_cell, 1, NULL,
irq_base);
if (rc) {
pr_err("Failed to add battery alarm subdevice ret=%d\n",
rc);
goto bail;
}
rc = mfd_add_devices(pmic->dev, 0, &upl_cell, 1, NULL, 0);
if (rc) {
pr_err("Failed to add upl subdevice ret=%d\n", rc);
goto bail;
}
rc = mfd_add_devices(pmic->dev, 0, &nfc_cell, 1, NULL, 0);
if (rc) {
pr_err("Failed to add upl subdevice ret=%d\n", rc);
goto bail;
}
if (pdata->charger_pdata) {
pm8058_charger_cell.platform_data = pdata->charger_pdata;
pm8058_charger_cell.pdata_size = sizeof(struct
pmic8058_charger_data);
rc = mfd_add_devices(pmic->dev, 0, &pm8058_charger_cell,
1, NULL, irq_base);
if (rc) {
pr_err("Failed to add charger subdevice ret=%d\n", rc);
goto bail;
}
}
rc = mfd_add_devices(pmic->dev, 0, &debugfs_cell, 1, NULL, irq_base);
if (rc) {
pr_err("Failed to add debugfs subdevice ret=%d\n", rc);
goto bail;
}
return rc;
bail:
if (pmic->irq_chip) {
pm8xxx_irq_exit(pmic->irq_chip);
pmic->irq_chip = NULL;
}
return rc;
}
static int __devinit pm8058_probe(struct platform_device *pdev)
{
int rc;
struct pm8058_platform_data *pdata = pdev->dev.platform_data;
struct pm8058_chip *pmic;
if (pdata == NULL) {
pr_err("%s: No platform_data or IRQ.\n", __func__);
return -ENODEV;
}
pmic = kzalloc(sizeof *pmic, GFP_KERNEL);
if (pmic == NULL) {
pr_err("%s: kzalloc() failed.\n", __func__);
return -ENOMEM;
}
pmic->dev = &pdev->dev;
pm8058_drvdata.pm_chip_data = pmic;
platform_set_drvdata(pdev, &pm8058_drvdata);
/* Read PMIC chip revision */
rc = pm8058_readb(pmic->dev, PM8058_REG_REV, &pmic->revision);
if (rc)
pr_err("%s: Failed on pm8058_readb for revision: rc=%d.\n",
__func__, rc);
pr_info("%s: PMIC revision: %X\n", __func__, pmic->revision);
(void) memcpy((void *)&pmic->pdata, (const void *)pdata,
sizeof(pmic->pdata));
rc = pm8058_add_subdevices(pdata, pmic);
if (rc) {
pr_err("Cannot add subdevices rc=%d\n", rc);
goto err;
}
rc = pm8xxx_hard_reset_config(PM8XXX_SHUTDOWN_ON_HARD_RESET);
if (rc < 0)
pr_err("%s: failed to config shutdown on hard reset: %d\n",
__func__, rc);
return 0;
err:
mfd_remove_devices(pmic->dev);
platform_set_drvdata(pdev, NULL);
kfree(pmic);
return rc;
}
static int __devexit pm8058_remove(struct platform_device *pdev)
{
struct pm8xxx_drvdata *drvdata;
struct pm8058_chip *pmic = NULL;
drvdata = platform_get_drvdata(pdev);
if (drvdata)
pmic = drvdata->pm_chip_data;
if (pmic) {
if (pmic->dev)
mfd_remove_devices(pmic->dev);
if (pmic->irq_chip)
pm8xxx_irq_exit(pmic->irq_chip);
kfree(pmic->mfd_regulators);
kfree(pmic);
}
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver pm8058_driver = {
.probe = pm8058_probe,
.remove = __devexit_p(pm8058_remove),
.driver = {
.name = "pm8058-core",
.owner = THIS_MODULE,
},
};
static int __init pm8058_init(void)
{
return platform_driver_register(&pm8058_driver);
}
postcore_initcall(pm8058_init);
static void __exit pm8058_exit(void)
{
platform_driver_unregister(&pm8058_driver);
}
module_exit(pm8058_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC8058 core driver");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:pmic8058-core");
| gpl-2.0 |
curbthepain/revkernel_ubers5 | arch/s390/kernel/early.c | 4412 | 11727 | /*
* arch/s390/kernel/early.c
*
* Copyright IBM Corp. 2007, 2009
* Author(s): Hongjie Yang <hongjie@us.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#define KMSG_COMPONENT "setup"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/pfn.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <asm/ebcdic.h>
#include <asm/ipl.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sysinfo.h>
#include <asm/cpcmd.h>
#include <asm/sclp.h>
#include <asm/facility.h>
#include "entry.h"
/*
* Create a Kernel NSS if the SAVESYS= parameter is defined
*/
#define DEFSYS_CMD_SIZE 128
#define SAVESYS_CMD_SIZE 32
char kernel_nss_name[NSS_NAME_SIZE + 1];
static void __init setup_boot_command_line(void);
/*
* Get the TOD clock running.
*/
static void __init reset_tod_clock(void)
{
u64 time;
if (store_clock(&time) == 0)
return;
/* TOD clock not running. Set the clock to Unix Epoch. */
if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
disabled_wait(0);
sched_clock_base_cc = TOD_UNIX_EPOCH;
S390_lowcore.last_update_clock = sched_clock_base_cc;
}
#ifdef CONFIG_SHARED_KERNEL
int __init savesys_ipl_nss(char *cmd, const int cmdlen);
asm(
" .section .init.text,\"ax\",@progbits\n"
" .align 4\n"
" .type savesys_ipl_nss, @function\n"
"savesys_ipl_nss:\n"
#ifdef CONFIG_64BIT
" stmg 6,15,48(15)\n"
" lgr 14,3\n"
" sam31\n"
" diag 2,14,0x8\n"
" sam64\n"
" lgr 2,14\n"
" lmg 6,15,48(15)\n"
#else
" stm 6,15,24(15)\n"
" lr 14,3\n"
" diag 2,14,0x8\n"
" lr 2,14\n"
" lm 6,15,24(15)\n"
#endif
" br 14\n"
" .size savesys_ipl_nss, .-savesys_ipl_nss\n"
" .previous\n");
static __initdata char upper_command_line[COMMAND_LINE_SIZE];
static noinline __init void create_kernel_nss(void)
{
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
#ifdef CONFIG_BLK_DEV_INITRD
unsigned int sinitrd_pfn, einitrd_pfn;
#endif
int response;
int hlen;
size_t len;
char *savesys_ptr;
char defsys_cmd[DEFSYS_CMD_SIZE];
char savesys_cmd[SAVESYS_CMD_SIZE];
/* Do nothing if we are not running under VM */
if (!MACHINE_IS_VM)
return;
/* Convert COMMAND_LINE to upper case */
for (i = 0; i < strlen(boot_command_line); i++)
upper_command_line[i] = toupper(boot_command_line[i]);
savesys_ptr = strstr(upper_command_line, "SAVESYS=");
if (!savesys_ptr)
return;
savesys_ptr += 8; /* Point to the beginning of the NSS name */
for (i = 0; i < NSS_NAME_SIZE; i++) {
if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
break;
kernel_nss_name[i] = savesys_ptr[i];
}
stext_pfn = PFN_DOWN(__pa(&_stext));
eshared_pfn = PFN_DOWN(__pa(&_eshared));
end_pfn = PFN_UP(__pa(&_end));
min_size = end_pfn << 2;
hlen = snprintf(defsys_cmd, DEFSYS_CMD_SIZE,
"DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
kernel_nss_name, stext_pfn - 1, stext_pfn,
eshared_pfn - 1, eshared_pfn, end_pfn);
#ifdef CONFIG_BLK_DEV_INITRD
if (INITRD_START && INITRD_SIZE) {
sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
min_size = einitrd_pfn << 2;
hlen += snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
" EW %.5X-%.5X", sinitrd_pfn, einitrd_pfn);
}
#endif
snprintf(defsys_cmd + hlen, DEFSYS_CMD_SIZE - hlen,
" EW MINSIZE=%.7iK PARMREGS=0-13", min_size);
defsys_cmd[DEFSYS_CMD_SIZE - 1] = '\0';
snprintf(savesys_cmd, SAVESYS_CMD_SIZE, "SAVESYS %s \n IPL %s",
kernel_nss_name, kernel_nss_name);
savesys_cmd[SAVESYS_CMD_SIZE - 1] = '\0';
__cpcmd(defsys_cmd, NULL, 0, &response);
if (response != 0) {
pr_err("Defining the Linux kernel NSS failed with rc=%d\n",
response);
kernel_nss_name[0] = '\0';
return;
}
len = strlen(savesys_cmd);
ASCEBC(savesys_cmd, len);
response = savesys_ipl_nss(savesys_cmd, len);
/* On success: response is equal to the command size,
* max SAVESYS_CMD_SIZE
* On error: response contains the numeric portion of cp error message.
* for SAVESYS it will be >= 263
* for missing privilege class, it will be 1
*/
if (response > SAVESYS_CMD_SIZE || response == 1) {
pr_err("Saving the Linux kernel NSS failed with rc=%d\n",
response);
kernel_nss_name[0] = '\0';
return;
}
/* re-initialize cputime accounting. */
sched_clock_base_cc = get_clock();
S390_lowcore.last_update_clock = sched_clock_base_cc;
S390_lowcore.last_update_timer = 0x7fffffffffffffffULL;
S390_lowcore.user_timer = 0;
S390_lowcore.system_timer = 0;
asm volatile("SPT 0(%0)" : : "a" (&S390_lowcore.last_update_timer));
/* re-setup boot command line with new ipl vm parms */
ipl_update_parameters();
setup_boot_command_line();
ipl_flags = IPL_NSS_VALID;
}
#else /* CONFIG_SHARED_KERNEL */
static inline void create_kernel_nss(void) { }
#endif /* CONFIG_SHARED_KERNEL */
/*
* Clear bss memory
*/
static noinline __init void clear_bss_section(void)
{
memset(__bss_start, 0, __bss_stop - __bss_start);
}
/*
* Initialize storage key for kernel pages
*/
static noinline __init void init_kernel_storage_key(void)
{
unsigned long end_pfn, init_pfn;
end_pfn = PFN_UP(__pa(&_end));
for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT,
PAGE_DEFAULT_KEY, 0);
}
static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
static noinline __init void detect_machine_type(void)
{
/* Check current-configuration-level */
if ((stsi(NULL, 0, 0, 0) >> 28) <= 2) {
S390_lowcore.machine_flags |= MACHINE_FLAG_LPAR;
return;
}
/* Get virtual-machine cpu information. */
if (stsi(&vmms, 3, 2, 2) == -ENOSYS || !vmms.count)
return;
/* Running under KVM? If not we assume z/VM */
if (!memcmp(vmms.vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
else
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
static __init void early_pgm_check_handler(void)
{
unsigned long addr;
const struct exception_table_entry *fixup;
addr = S390_lowcore.program_old_psw.addr;
fixup = search_exception_tables(addr & PSW_ADDR_INSN);
if (!fixup)
disabled_wait(0);
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
}
static noinline __init void setup_lowcore_early(void)
{
psw_t psw;
psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
S390_lowcore.external_new_psw = psw;
psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
S390_lowcore.program_new_psw = psw;
s390_base_pgm_handler_fn = early_pgm_check_handler;
}
static noinline __init void setup_facility_list(void)
{
stfle(S390_lowcore.stfle_fac_list,
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
}
static noinline __init void setup_hpage(void)
{
#ifndef CONFIG_DEBUG_PAGEALLOC
if (!test_facility(2) || !test_facility(8))
return;
S390_lowcore.machine_flags |= MACHINE_FLAG_HPAGE;
__ctl_set_bit(0, 23);
#endif
}
static __init void detect_mvpg(void)
{
#ifndef CONFIG_64BIT
int rc;
asm volatile(
" la 0,0\n"
" mvpg %2,%2\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_MVPG;
#endif
}
static __init void detect_ieee(void)
{
#ifndef CONFIG_64BIT
int rc, tmp;
asm volatile(
" efpc %1,0\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_IEEE;
#endif
}
static __init void detect_csp(void)
{
#ifndef CONFIG_64BIT
int rc;
asm volatile(
" la 0,0\n"
" la 1,0\n"
" la 2,4\n"
" csp 0,2\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_CSP;
#endif
}
static __init void detect_diag9c(void)
{
unsigned int cpu_address;
int rc;
cpu_address = stap();
asm volatile(
" diag %2,0,0x9c\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG9C;
}
static __init void detect_diag44(void)
{
#ifdef CONFIG_64BIT
int rc;
asm volatile(
" diag 0,0,0x44\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
if (!rc)
S390_lowcore.machine_flags |= MACHINE_FLAG_DIAG44;
#endif
}
static __init void detect_machine_facilities(void)
{
#ifdef CONFIG_64BIT
if (test_facility(3))
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(8))
S390_lowcore.machine_flags |= MACHINE_FLAG_PFMF;
if (test_facility(11))
S390_lowcore.machine_flags |= MACHINE_FLAG_TOPOLOGY;
if (test_facility(27))
S390_lowcore.machine_flags |= MACHINE_FLAG_MVCOS;
if (test_facility(40))
S390_lowcore.machine_flags |= MACHINE_FLAG_SPP;
if (test_facility(25))
S390_lowcore.machine_flags |= MACHINE_FLAG_STCKF;
#endif
}
static __init void rescue_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned long min_initrd_addr = (unsigned long) _end + (4UL << 20);
/*
* Just like in case of IPL from VM reader we make sure there is a
* gap of 4MB between end of kernel and start of initrd.
* That way we can also be sure that saving an NSS will succeed,
* which however only requires different segments.
*/
if (!INITRD_START || !INITRD_SIZE)
return;
if (INITRD_START >= min_initrd_addr)
return;
memmove((void *) min_initrd_addr, (void *) INITRD_START, INITRD_SIZE);
INITRD_START = min_initrd_addr;
#endif
}
/* Set up boot command line */
static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t))
{
char *parm, *delim;
size_t rc, len;
len = strlen(boot_command_line);
delim = boot_command_line + len; /* '\0' character position */
parm = boot_command_line + len + 1; /* append right after '\0' */
rc = ipl_data(parm, COMMAND_LINE_SIZE - len - 1);
if (rc) {
if (*parm == '=')
memmove(boot_command_line, parm + 1, rc);
else
*delim = ' '; /* replace '\0' with space */
}
}
static inline int has_ebcdic_char(const char *str)
{
int i;
for (i = 0; str[i]; i++)
if (str[i] & 0x80)
return 1;
return 0;
}
static void __init setup_boot_command_line(void)
{
COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0;
/* convert arch command line to ascii if necessary */
if (has_ebcdic_char(COMMAND_LINE))
EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE);
/* copy arch command line */
strlcpy(boot_command_line, strstrip(COMMAND_LINE),
ARCH_COMMAND_LINE_SIZE);
/* append IPL PARM data to the boot command line */
if (MACHINE_IS_VM)
append_to_cmdline(append_ipl_vmparm);
append_to_cmdline(append_ipl_scpdata);
}
/*
* Save ipl parameters, clear bss memory, initialize storage keys
* and create a kernel NSS at startup if the SAVESYS= parm is defined
*/
void __init startup_init(void)
{
reset_tod_clock();
ipl_save_parameters();
rescue_initrd();
clear_bss_section();
init_kernel_storage_key();
lockdep_init();
lockdep_off();
sort_main_extable();
setup_lowcore_early();
setup_facility_list();
detect_machine_type();
ipl_update_parameters();
setup_boot_command_line();
create_kernel_nss();
detect_mvpg();
detect_ieee();
detect_csp();
detect_diag9c();
detect_diag44();
detect_machine_facilities();
setup_hpage();
sclp_facilities_detect();
detect_memory_layout(memory_chunk);
#ifdef CONFIG_DYNAMIC_FTRACE
S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
#endif
lockdep_on();
}
| gpl-2.0 |
SeKwonLee/pmfs | arch/unicore32/kernel/ksyms.c | 7228 | 2173 | /*
* linux/arch/unicore32/kernel/ksyms.c
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/cryptohash.h>
#include <linux/delay.h>
#include <linux/in6.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/checksum.h>
#include "ksyms.h"
EXPORT_SYMBOL(find_next_zero_bit);
EXPORT_SYMBOL(find_next_bit);
EXPORT_SYMBOL(__backtrace);
/* platform dependent support */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__const_udelay);
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_ipv6_magic);
/* io */
#ifndef __raw_readsb
EXPORT_SYMBOL(__raw_readsb);
#endif
#ifndef __raw_readsw
EXPORT_SYMBOL(__raw_readsw);
#endif
#ifndef __raw_readsl
EXPORT_SYMBOL(__raw_readsl);
#endif
#ifndef __raw_writesb
EXPORT_SYMBOL(__raw_writesb);
#endif
#ifndef __raw_writesw
EXPORT_SYMBOL(__raw_writesw);
#endif
#ifndef __raw_writesl
EXPORT_SYMBOL(__raw_writesl);
#endif
/* string / mem functions */
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
/* user mem (segment) */
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__divsi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__modsi3);
EXPORT_SYMBOL(__muldi3);
EXPORT_SYMBOL(__ucmpdi2);
EXPORT_SYMBOL(__udivsi3);
EXPORT_SYMBOL(__umodsi3);
EXPORT_SYMBOL(__bswapsi2);
| gpl-2.0 |
cubieboard/CC-A80-kernel-source | arch/arm/mach-vt8500/gpio.c | 7996 | 6500 | /* linux/arch/arm/mach-vt8500/gpio.c
*
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
#include "devices.h"
#define to_vt8500(__chip) container_of(__chip, struct vt8500_gpio_chip, chip)
#define ENABLE_REGS 0x0
#define DIRECTION_REGS 0x20
#define OUTVALUE_REGS 0x40
#define INVALUE_REGS 0x60
#define EXT_REGOFF 0x1c
static void __iomem *regbase;
struct vt8500_gpio_chip {
struct gpio_chip chip;
unsigned int shift;
unsigned int regoff;
};
static int gpio_to_irq_map[8];
static int vt8500_muxed_gpio_request(struct gpio_chip *chip,
unsigned offset)
{
struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
unsigned val = readl(regbase + ENABLE_REGS + vt8500_chip->regoff);
val |= (1 << vt8500_chip->shift << offset);
writel(val, regbase + ENABLE_REGS + vt8500_chip->regoff);
return 0;
}
static void vt8500_muxed_gpio_free(struct gpio_chip *chip,
unsigned offset)
{
struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
unsigned val = readl(regbase + ENABLE_REGS + vt8500_chip->regoff);
val &= ~(1 << vt8500_chip->shift << offset);
writel(val, regbase + ENABLE_REGS + vt8500_chip->regoff);
}
static int vt8500_muxed_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
unsigned val = readl(regbase + DIRECTION_REGS + vt8500_chip->regoff);
val &= ~(1 << vt8500_chip->shift << offset);
writel(val, regbase + DIRECTION_REGS + vt8500_chip->regoff);
return 0;
}
static int vt8500_muxed_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
unsigned val = readl(regbase + DIRECTION_REGS + vt8500_chip->regoff);
val |= (1 << vt8500_chip->shift << offset);
writel(val, regbase + DIRECTION_REGS + vt8500_chip->regoff);
if (value) {
val = readl(regbase + OUTVALUE_REGS + vt8500_chip->regoff);
val |= (1 << vt8500_chip->shift << offset);
writel(val, regbase + OUTVALUE_REGS + vt8500_chip->regoff);
}
return 0;
}
static int vt8500_muxed_gpio_get_value(struct gpio_chip *chip,
unsigned offset)
{
struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
return (readl(regbase + INVALUE_REGS + vt8500_chip->regoff)
>> vt8500_chip->shift >> offset) & 1;
}
static void vt8500_muxed_gpio_set_value(struct gpio_chip *chip,
unsigned offset, int value)
{
struct vt8500_gpio_chip *vt8500_chip = to_vt8500(chip);
unsigned val = readl(regbase + INVALUE_REGS + vt8500_chip->regoff);
if (value)
val |= (1 << vt8500_chip->shift << offset);
else
val &= ~(1 << vt8500_chip->shift << offset);
writel(val, regbase + INVALUE_REGS + vt8500_chip->regoff);
}
#define VT8500_GPIO_BANK(__name, __shift, __off, __base, __num) \
{ \
.chip = { \
.label = __name, \
.request = vt8500_muxed_gpio_request, \
.free = vt8500_muxed_gpio_free, \
.direction_input = vt8500_muxed_gpio_direction_input, \
.direction_output = vt8500_muxed_gpio_direction_output, \
.get = vt8500_muxed_gpio_get_value, \
.set = vt8500_muxed_gpio_set_value, \
.can_sleep = 0, \
.base = __base, \
.ngpio = __num, \
}, \
.shift = __shift, \
.regoff = __off, \
}
static struct vt8500_gpio_chip vt8500_muxed_gpios[] = {
VT8500_GPIO_BANK("uart0", 0, 0x0, 8, 4),
VT8500_GPIO_BANK("uart1", 4, 0x0, 12, 4),
VT8500_GPIO_BANK("spi0", 8, 0x0, 16, 4),
VT8500_GPIO_BANK("spi1", 12, 0x0, 20, 4),
VT8500_GPIO_BANK("spi2", 16, 0x0, 24, 4),
VT8500_GPIO_BANK("pwmout", 24, 0x0, 28, 2),
VT8500_GPIO_BANK("sdmmc", 0, 0x4, 30, 11),
VT8500_GPIO_BANK("ms", 16, 0x4, 41, 7),
VT8500_GPIO_BANK("i2c0", 24, 0x4, 48, 2),
VT8500_GPIO_BANK("i2c1", 26, 0x4, 50, 2),
VT8500_GPIO_BANK("mii", 0, 0x8, 52, 20),
VT8500_GPIO_BANK("see", 20, 0x8, 72, 4),
VT8500_GPIO_BANK("ide", 24, 0x8, 76, 7),
VT8500_GPIO_BANK("ccir", 0, 0xc, 83, 19),
VT8500_GPIO_BANK("ts", 8, 0x10, 102, 11),
VT8500_GPIO_BANK("lcd", 0, 0x14, 113, 23),
};
static int vt8500_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
unsigned val = readl(regbase + DIRECTION_REGS + EXT_REGOFF);
val &= ~(1 << offset);
writel(val, regbase + DIRECTION_REGS + EXT_REGOFF);
return 0;
}
static int vt8500_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
unsigned val = readl(regbase + DIRECTION_REGS + EXT_REGOFF);
val |= (1 << offset);
writel(val, regbase + DIRECTION_REGS + EXT_REGOFF);
if (value) {
val = readl(regbase + OUTVALUE_REGS + EXT_REGOFF);
val |= (1 << offset);
writel(val, regbase + OUTVALUE_REGS + EXT_REGOFF);
}
return 0;
}
static int vt8500_gpio_get_value(struct gpio_chip *chip,
unsigned offset)
{
return (readl(regbase + INVALUE_REGS + EXT_REGOFF) >> offset) & 1;
}
static void vt8500_gpio_set_value(struct gpio_chip *chip,
unsigned offset, int value)
{
unsigned val = readl(regbase + OUTVALUE_REGS + EXT_REGOFF);
if (value)
val |= (1 << offset);
else
val &= ~(1 << offset);
writel(val, regbase + OUTVALUE_REGS + EXT_REGOFF);
}
static int vt8500_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
if (offset > 7)
return -EINVAL;
return gpio_to_irq_map[offset];
}
static struct gpio_chip vt8500_external_gpios = {
.label = "extgpio",
.direction_input = vt8500_gpio_direction_input,
.direction_output = vt8500_gpio_direction_output,
.get = vt8500_gpio_get_value,
.set = vt8500_gpio_set_value,
.to_irq = vt8500_gpio_to_irq,
.can_sleep = 0,
.base = 0,
.ngpio = 8,
};
void __init vt8500_gpio_init(void)
{
int i;
for (i = 0; i < 8; i++)
gpio_to_irq_map[i] = wmt_gpio_ext_irq[i];
regbase = ioremap(wmt_gpio_base, SZ_64K);
if (!regbase) {
printk(KERN_ERR "Failed to map MMIO registers for GPIO\n");
return;
}
gpiochip_add(&vt8500_external_gpios);
for (i = 0; i < ARRAY_SIZE(vt8500_muxed_gpios); i++)
gpiochip_add(&vt8500_muxed_gpios[i].chip);
}
| gpl-2.0 |
tako0910/android_kernel_htc_dlxj | arch/alpha/kernel/sys_wildfire.c | 9020 | 8795 | /*
* linux/arch/alpha/kernel/sys_wildfire.c
*
* Wildfire support.
*
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_wildfire.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
static unsigned long cached_irq_mask[WILDFIRE_NR_IRQS/(sizeof(long)*8)];
DEFINE_SPINLOCK(wildfire_irq_lock);
static int doing_init_irq_hw = 0;
static void
wildfire_update_irq_hw(unsigned int irq)
{
int qbbno = (irq >> 8) & (WILDFIRE_MAX_QBB - 1);
int pcano = (irq >> 6) & (WILDFIRE_PCA_PER_QBB - 1);
wildfire_pca *pca;
volatile unsigned long * enable0;
if (!WILDFIRE_PCA_EXISTS(qbbno, pcano)) {
if (!doing_init_irq_hw) {
printk(KERN_ERR "wildfire_update_irq_hw:"
" got irq %d for non-existent PCA %d"
" on QBB %d.\n",
irq, pcano, qbbno);
}
return;
}
pca = WILDFIRE_pca(qbbno, pcano);
enable0 = (unsigned long *) &pca->pca_int[0].enable; /* ??? */
*enable0 = cached_irq_mask[qbbno * WILDFIRE_PCA_PER_QBB + pcano];
mb();
*enable0;
}
static void __init
wildfire_init_irq_hw(void)
{
#if 0
register wildfire_pca * pca = WILDFIRE_pca(0, 0);
volatile unsigned long * enable0, * enable1, * enable2, *enable3;
volatile unsigned long * target0, * target1, * target2, *target3;
enable0 = (unsigned long *) &pca->pca_int[0].enable;
enable1 = (unsigned long *) &pca->pca_int[1].enable;
enable2 = (unsigned long *) &pca->pca_int[2].enable;
enable3 = (unsigned long *) &pca->pca_int[3].enable;
target0 = (unsigned long *) &pca->pca_int[0].target;
target1 = (unsigned long *) &pca->pca_int[1].target;
target2 = (unsigned long *) &pca->pca_int[2].target;
target3 = (unsigned long *) &pca->pca_int[3].target;
*enable0 = *enable1 = *enable2 = *enable3 = 0;
*target0 = (1UL<<8) | WILDFIRE_QBB(0);
*target1 = *target2 = *target3 = 0;
mb();
*enable0; *enable1; *enable2; *enable3;
*target0; *target1; *target2; *target3;
#else
int i;
doing_init_irq_hw = 1;
/* Need to update only once for every possible PCA. */
for (i = 0; i < WILDFIRE_NR_IRQS; i+=WILDFIRE_IRQ_PER_PCA)
wildfire_update_irq_hw(i);
doing_init_irq_hw = 0;
#endif
}
static void
wildfire_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_enable_irq(d);
spin_lock(&wildfire_irq_lock);
set_bit(irq, &cached_irq_mask);
wildfire_update_irq_hw(irq);
spin_unlock(&wildfire_irq_lock);
}
static void
wildfire_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_disable_irq(d);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
wildfire_update_irq_hw(irq);
spin_unlock(&wildfire_irq_lock);
}
static void
wildfire_mask_and_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
if (irq < 16)
i8259a_mask_and_ack_irq(d);
spin_lock(&wildfire_irq_lock);
clear_bit(irq, &cached_irq_mask);
wildfire_update_irq_hw(irq);
spin_unlock(&wildfire_irq_lock);
}
static struct irq_chip wildfire_irq_type = {
.name = "WILDFIRE",
.irq_unmask = wildfire_enable_irq,
.irq_mask = wildfire_disable_irq,
.irq_mask_ack = wildfire_mask_and_ack_irq,
};
static void __init
wildfire_init_irq_per_pca(int qbbno, int pcano)
{
int i, irq_bias;
static struct irqaction isa_enable = {
.handler = no_action,
.name = "isa_enable",
};
irq_bias = qbbno * (WILDFIRE_PCA_PER_QBB * WILDFIRE_IRQ_PER_PCA)
+ pcano * WILDFIRE_IRQ_PER_PCA;
#if 0
unsigned long io_bias;
/* Only need the following for first PCI bus per PCA. */
io_bias = WILDFIRE_IO(qbbno, pcano<<1) - WILDFIRE_IO_BIAS;
outb(0, DMA1_RESET_REG + io_bias);
outb(0, DMA2_RESET_REG + io_bias);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG + io_bias);
outb(0, DMA2_MASK_REG + io_bias);
#endif
#if 0
/* ??? Not sure how to do this, yet... */
init_i8259a_irqs(); /* ??? */
#endif
for (i = 0; i < 16; ++i) {
if (i == 2)
continue;
irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
irq_set_chip_and_handler(36 + irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
for (i = 40; i < 64; ++i) {
irq_set_chip_and_handler(i + irq_bias, &wildfire_irq_type,
handle_level_irq);
irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
}
setup_irq(32+irq_bias, &isa_enable);
}
static void __init
wildfire_init_irq(void)
{
int qbbno, pcano;
#if 1
wildfire_init_irq_hw();
init_i8259a_irqs();
#endif
for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
if (WILDFIRE_QBB_EXISTS(qbbno)) {
for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
if (WILDFIRE_PCA_EXISTS(qbbno, pcano)) {
wildfire_init_irq_per_pca(qbbno, pcano);
}
}
}
}
}
static void
wildfire_device_interrupt(unsigned long vector)
{
int irq;
irq = (vector - 0x800) >> 4;
/*
* bits 10-8: source QBB ID
* bits 7-6: PCA
* bits 5-0: irq in PCA
*/
handle_irq(irq);
return;
}
/*
* PCI Fixup configuration.
*
* Summary per PCA (2 PCI or HIPPI buses):
*
* Bit Meaning
* 0-15 ISA
*
*32 ISA summary
*33 SMI
*34 NMI
*36 builtin QLogic SCSI (or slot 0 if no IO module)
*40 Interrupt Line A from slot 2 PCI0
*41 Interrupt Line B from slot 2 PCI0
*42 Interrupt Line C from slot 2 PCI0
*43 Interrupt Line D from slot 2 PCI0
*44 Interrupt Line A from slot 3 PCI0
*45 Interrupt Line B from slot 3 PCI0
*46 Interrupt Line C from slot 3 PCI0
*47 Interrupt Line D from slot 3 PCI0
*
*48 Interrupt Line A from slot 4 PCI1
*49 Interrupt Line B from slot 4 PCI1
*50 Interrupt Line C from slot 4 PCI1
*51 Interrupt Line D from slot 4 PCI1
*52 Interrupt Line A from slot 5 PCI1
*53 Interrupt Line B from slot 5 PCI1
*54 Interrupt Line C from slot 5 PCI1
*55 Interrupt Line D from slot 5 PCI1
*56 Interrupt Line A from slot 6 PCI1
*57 Interrupt Line B from slot 6 PCI1
*58 Interrupt Line C from slot 6 PCI1
*50 Interrupt Line D from slot 6 PCI1
*60 Interrupt Line A from slot 7 PCI1
*61 Interrupt Line B from slot 7 PCI1
*62 Interrupt Line C from slot 7 PCI1
*63 Interrupt Line D from slot 7 PCI1
*
*
* IdSel
* 0 Cypress Bridge I/O (ISA summary interrupt)
* 1 64 bit PCI 0 option slot 1 (SCSI QLogic builtin)
* 2 64 bit PCI 0 option slot 2
* 3 64 bit PCI 0 option slot 3
* 4 64 bit PCI 1 option slot 4
* 5 64 bit PCI 1 option slot 5
* 6 64 bit PCI 1 option slot 6
* 7 64 bit PCI 1 option slot 7
*/
static int __init
wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[8][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */
{ 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */
{ 40, 40, 40+1, 40+2, 40+3}, /* IdSel 2 PCI 0 slot 2 */
{ 44, 44, 44+1, 44+2, 44+3}, /* IdSel 3 PCI 0 slot 3 */
{ 48, 48, 48+1, 48+2, 48+3}, /* IdSel 4 PCI 1 slot 4 */
{ 52, 52, 52+1, 52+2, 52+3}, /* IdSel 5 PCI 1 slot 5 */
{ 56, 56, 56+1, 56+2, 56+3}, /* IdSel 6 PCI 1 slot 6 */
{ 60, 60, 60+1, 60+2, 60+3}, /* IdSel 7 PCI 1 slot 7 */
};
long min_idsel = 0, max_idsel = 7, irqs_per_slot = 5;
struct pci_controller *hose = dev->sysdata;
int irq = COMMON_TABLE_LOOKUP;
if (irq > 0) {
int qbbno = hose->index >> 3;
int pcano = (hose->index >> 1) & 3;
irq += (qbbno << 8) + (pcano << 6);
}
return irq;
}
/*
* The System Vectors
*/
struct alpha_machine_vector wildfire_mv __initmv = {
.vector_name = "WILDFIRE",
DO_EV6_MMU,
DO_DEFAULT_RTC,
DO_WILDFIRE_IO,
.machine_check = wildfire_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.nr_irqs = WILDFIRE_NR_IRQS,
.device_interrupt = wildfire_device_interrupt,
.init_arch = wildfire_init_arch,
.init_irq = wildfire_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = wildfire_kill_arch,
.pci_map_irq = wildfire_map_irq,
.pci_swizzle = common_swizzle,
.pa_to_nid = wildfire_pa_to_nid,
.cpuid_to_nid = wildfire_cpuid_to_nid,
.node_mem_start = wildfire_node_mem_start,
.node_mem_size = wildfire_node_mem_size,
};
ALIAS_MV(wildfire)
| gpl-2.0 |
ghbhaha/furnace-bacon | arch/alpha/kernel/sys_sio.c | 9020 | 12613 | /*
* linux/arch/alpha/kernel/sys_sio.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code for all boards that route the PCI interrupts through the SIO
* PCI/ISA bridge. This includes Noname (AXPpci33), Multia (UDB),
* Kenetics's Platform 2000, Avanti (AlphaStation), XL, and AlphaBook1.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/screen_info.h>
#include <asm/compiler.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_lca.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#include "pc873xx.h"
#if defined(ALPHA_RESTORE_SRM_SETUP)
/* Save LCA configuration data as the console had it set up. */
struct
{
unsigned int orig_route_tab; /* for SAVE/RESTORE */
} saved_config __attribute((common));
#endif
static void __init
sio_init_irq(void)
{
if (alpha_using_srm)
alpha_mv.device_interrupt = srm_device_interrupt;
init_i8259a_irqs();
common_init_isa_dma();
}
static inline void __init
alphabook1_init_arch(void)
{
/* The AlphaBook1 has LCD video fixed at 800x600,
37 rows and 100 cols. */
screen_info.orig_y = 37;
screen_info.orig_video_cols = 100;
screen_info.orig_video_lines = 37;
lca_init_arch();
}
/*
* sio_route_tab selects irq routing in PCI/ISA bridge so that:
* PIRQ0 -> irq 15
* PIRQ1 -> irq 9
* PIRQ2 -> irq 10
* PIRQ3 -> irq 11
*
* This probably ought to be configurable via MILO. For
* example, sound boards seem to like using IRQ 9.
*
* This is NOT how we should do it. PIRQ0-X should have
* their own IRQs, the way intel uses the IO-APIC IRQs.
*/
static void __init
sio_pci_route(void)
{
unsigned int orig_route_tab;
/* First, ALWAYS read and print the original setting. */
pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
&orig_route_tab);
printk("%s: PIRQ original 0x%x new 0x%x\n", __func__,
orig_route_tab, alpha_mv.sys.sio.route_tab);
#if defined(ALPHA_RESTORE_SRM_SETUP)
saved_config.orig_route_tab = orig_route_tab;
#endif
/* Now override with desired setting. */
pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
alpha_mv.sys.sio.route_tab);
}
static unsigned int __init
sio_collect_irq_levels(void)
{
unsigned int level_bits = 0;
struct pci_dev *dev = NULL;
/* Iterate through the devices, collecting IRQ levels. */
for_each_pci_dev(dev) {
if ((dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) &&
(dev->class >> 8 != PCI_CLASS_BRIDGE_PCMCIA))
continue;
if (dev->irq)
level_bits |= (1 << dev->irq);
}
return level_bits;
}
static void __init
sio_fixup_irq_levels(unsigned int level_bits)
{
unsigned int old_level_bits;
/*
* Now, make all PCI interrupts level sensitive. Notice:
* these registers must be accessed byte-wise. inw()/outw()
* don't work.
*
* Make sure to turn off any level bits set for IRQs 9,10,11,15,
* so that the only bits getting set are for devices actually found.
* Note that we do preserve the remainder of the bits, which we hope
* will be set correctly by ARC/SRM.
*
* Note: we at least preserve any level-set bits on AlphaBook1
*/
old_level_bits = inb(0x4d0) | (inb(0x4d1) << 8);
level_bits |= (old_level_bits & 0x71ff);
outb((level_bits >> 0) & 0xff, 0x4d0);
outb((level_bits >> 8) & 0xff, 0x4d1);
}
static inline int __init
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/*
* The Noname board has 5 PCI slots with each of the 4
* interrupt pins routed to different pins on the PCI/ISA
* bridge (PIRQ0-PIRQ3). The table below is based on
* information available at:
*
* http://ftp.digital.com/pub/DEC/axppci/ref_interrupts.txt
*
* I have no information on the Avanti interrupt routing, but
* the routing seems to be identical to the Noname except
* that the Avanti has an additional slot whose routing I'm
* unsure of.
*
* pirq_tab[0] is a fake entry to deal with old PCI boards
* that have the interrupt pin number hardwired to 0 (meaning
* that they use the default INTA line, if they are interrupt
* driven at all).
*/
static char irq_tab[][5] __initdata = {
/*INT A B C D */
{ 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
{ 2, 2, -1, -1, -1}, /* idsel 8 (Hack: slot closest ISA) */
{-1, -1, -1, -1, -1}, /* idsel 9 (unused) */
{-1, -1, -1, -1, -1}, /* idsel 10 (unused) */
{ 0, 0, 2, 1, 0}, /* idsel 11 KN25_PCI_SLOT0 */
{ 1, 1, 0, 2, 1}, /* idsel 12 KN25_PCI_SLOT1 */
{ 2, 2, 1, 0, 2}, /* idsel 13 KN25_PCI_SLOT2 */
{ 0, 0, 0, 0, 0}, /* idsel 14 AS255 TULIP */
};
const long min_idsel = 6, max_idsel = 14, irqs_per_slot = 5;
int irq = COMMON_TABLE_LOOKUP, tmp;
tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
return irq >= 0 ? tmp : -1;
}
static inline int __init
p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[][5] __initdata = {
/*INT A B C D */
{ 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
{ 1, 1, 2, 3, 0}, /* idsel 8 (slot A) */
{ 2, 2, 3, 0, 1}, /* idsel 9 (slot B) */
{-1, -1, -1, -1, -1}, /* idsel 10 (unused) */
{-1, -1, -1, -1, -1}, /* idsel 11 (unused) */
{ 3, 3, -1, -1, -1}, /* idsel 12 (CMD0646) */
};
const long min_idsel = 6, max_idsel = 12, irqs_per_slot = 5;
int irq = COMMON_TABLE_LOOKUP, tmp;
tmp = __kernel_extbl(alpha_mv.sys.sio.route_tab, irq);
return irq >= 0 ? tmp : -1;
}
static inline void __init
noname_init_pci(void)
{
common_init_pci();
sio_pci_route();
sio_fixup_irq_levels(sio_collect_irq_levels());
if (pc873xx_probe() == -1) {
printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
} else {
printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
pc873xx_get_model(), pc873xx_get_base());
/* Enabling things in the Super IO chip doesn't actually
* configure and enable things, the legacy drivers still
* need to do the actual configuration and enabling.
* This only unblocks them.
*/
#if !defined(CONFIG_ALPHA_AVANTI)
/* Don't bother on the Avanti family.
* None of them had on-board IDE.
*/
pc873xx_enable_ide();
#endif
pc873xx_enable_epp19();
}
}
static inline void __init
alphabook1_init_pci(void)
{
struct pci_dev *dev;
unsigned char orig, config;
common_init_pci();
sio_pci_route();
/*
* On the AlphaBook1, the PCMCIA chip (Cirrus 6729)
* is sensitive to PCI bus bursts, so we must DISABLE
* burst mode for the NCR 8xx SCSI... :-(
*
* Note that the NCR810 SCSI driver must preserve the
* setting of the bit in order for this to work. At the
* moment (2.0.29), ncr53c8xx.c does NOT do this, but
* 53c7,8xx.c DOES.
*/
dev = NULL;
while ((dev = pci_get_device(PCI_VENDOR_ID_NCR, PCI_ANY_ID, dev))) {
if (dev->device == PCI_DEVICE_ID_NCR_53C810
|| dev->device == PCI_DEVICE_ID_NCR_53C815
|| dev->device == PCI_DEVICE_ID_NCR_53C820
|| dev->device == PCI_DEVICE_ID_NCR_53C825) {
unsigned long io_port;
unsigned char ctest4;
io_port = dev->resource[0].start;
ctest4 = inb(io_port+0x21);
if (!(ctest4 & 0x80)) {
printk("AlphaBook1 NCR init: setting"
" burst disable\n");
outb(ctest4 | 0x80, io_port+0x21);
}
}
}
/* Do not set *ANY* level triggers for AlphaBook1. */
sio_fixup_irq_levels(0);
/* Make sure that register PR1 indicates 1Mb mem */
outb(0x0f, 0x3ce); orig = inb(0x3cf); /* read PR5 */
outb(0x0f, 0x3ce); outb(0x05, 0x3cf); /* unlock PR0-4 */
outb(0x0b, 0x3ce); config = inb(0x3cf); /* read PR1 */
if ((config & 0xc0) != 0xc0) {
printk("AlphaBook1 VGA init: setting 1Mb memory\n");
config |= 0xc0;
outb(0x0b, 0x3ce); outb(config, 0x3cf); /* write PR1 */
}
outb(0x0f, 0x3ce); outb(orig, 0x3cf); /* (re)lock PR0-4 */
}
void
sio_kill_arch(int mode)
{
#if defined(ALPHA_RESTORE_SRM_SETUP)
/* Since we cannot read the PCI DMA Window CSRs, we
* cannot restore them here.
*
* However, we CAN read the PIRQ route register, so restore it
* now...
*/
pci_bus_write_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
saved_config.orig_route_tab);
#endif
}
/*
* The System Vectors
*/
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_BOOK1)
struct alpha_machine_vector alphabook1_mv __initmv = {
.vector_name = "AlphaBook1",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = isa_device_interrupt,
.init_arch = alphabook1_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = alphabook1_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = noname_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
/* NCR810 SCSI is 14, PCMCIA controller is 15. */
.route_tab = 0x0e0f0a0a,
}}
};
ALIAS_MV(alphabook1)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_AVANTI)
struct alpha_machine_vector avanti_mv __initmv = {
.vector_name = "Avanti",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = apecs_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = isa_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = noname_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = noname_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
.route_tab = 0x0b0a050f, /* leave 14 for IDE, 9 for SND */
}}
};
ALIAS_MV(avanti)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_NONAME)
struct alpha_machine_vector noname_mv __initmv = {
.vector_name = "Noname",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = srm_device_interrupt,
.init_arch = lca_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = noname_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = noname_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
/* For UDB, the only available PCI slot must not map to IRQ 9,
since that's the builtin MSS sound chip. That PCI slot
will map to PIRQ1 (for INTA at least), so we give it IRQ 15
instead.
Unfortunately we have to do this for NONAME as well, since
they are co-indicated when the platform type "Noname" is
selected... :-( */
.route_tab = 0x0b0a0f0d,
}}
};
ALIAS_MV(noname)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_P2K)
struct alpha_machine_vector p2k_mv __initmv = {
.vector_name = "Platform2000",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = srm_device_interrupt,
.init_arch = lca_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = noname_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = p2k_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
.route_tab = 0x0b0a090f,
}}
};
ALIAS_MV(p2k)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_XL)
struct alpha_machine_vector xl_mv __initmv = {
.vector_name = "XL",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = apecs_machine_check,
.max_isa_dma_address = ALPHA_XL_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = XL_DEFAULT_MEM_BASE,
.nr_irqs = 16,
.device_interrupt = isa_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = sio_init_irq,
.init_rtc = common_init_rtc,
.init_pci = noname_init_pci,
.kill_arch = sio_kill_arch,
.pci_map_irq = noname_map_irq,
.pci_swizzle = common_swizzle,
.sys = { .sio = {
.route_tab = 0x0b0a090f,
}}
};
ALIAS_MV(xl)
#endif
| gpl-2.0 |
codesnake/linux | arch/alpha/kernel/sys_takara.c | 9020 | 8062 | /*
* linux/arch/alpha/kernel/sys_takara.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the TAKARA.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <asm/ptrace.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
#include "pc873xx.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned long cached_irq_mask[2] = { -1, -1 };
static inline void
takara_update_irq_hw(unsigned long irq, unsigned long mask)
{
int regaddr;
mask = (irq >= 64 ? mask << 16 : mask >> ((irq - 16) & 0x30));
regaddr = 0x510 + (((irq - 16) >> 2) & 0x0c);
outl(mask & 0xffff0000UL, regaddr);
}
static inline void
takara_enable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
takara_update_irq_hw(irq, mask);
}
static void
takara_disable_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long mask;
mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
takara_update_irq_hw(irq, mask);
}
static struct irq_chip takara_irq_type = {
.name = "TAKARA",
.irq_unmask = takara_enable_irq,
.irq_mask = takara_disable_irq,
.irq_mask_ack = takara_disable_irq,
};
static void
takara_device_interrupt(unsigned long vector)
{
unsigned intstatus;
/*
* The PALcode will have passed us vectors 0x800 or 0x810,
* which are fairly arbitrary values and serve only to tell
* us whether an interrupt has come in on IRQ0 or IRQ1. If
* it's IRQ1 it's a PCI interrupt; if it's IRQ0, it's
* probably ISA, but PCI interrupts can come through IRQ0
* as well if the interrupt controller isn't in accelerated
* mode.
*
* OTOH, the accelerator thing doesn't seem to be working
* overly well, so what we'll do instead is try directly
* examining the Master Interrupt Register to see if it's a
* PCI interrupt, and if _not_ then we'll pass it on to the
* ISA handler.
*/
intstatus = inw(0x500) & 15;
if (intstatus) {
/*
* This is a PCI interrupt. Check each bit and
* despatch an interrupt if it's set.
*/
if (intstatus & 8) handle_irq(16+3);
if (intstatus & 4) handle_irq(16+2);
if (intstatus & 2) handle_irq(16+1);
if (intstatus & 1) handle_irq(16+0);
} else {
isa_device_interrupt (vector);
}
}
static void
takara_srm_device_interrupt(unsigned long vector)
{
int irq = (vector - 0x800) >> 4;
handle_irq(irq);
}
static void __init
takara_init_irq(void)
{
long i;
init_i8259a_irqs();
if (alpha_using_srm) {
alpha_mv.device_interrupt = takara_srm_device_interrupt;
} else {
unsigned int ctlreg = inl(0x500);
/* Return to non-accelerated mode. */
ctlreg &= ~0x8000;
outl(ctlreg, 0x500);
/* Enable the PCI interrupt register. */
ctlreg = 0x05107c00;
outl(ctlreg, 0x500);
}
for (i = 16; i < 128; i += 16)
takara_update_irq_hw(i, -1);
for (i = 16; i < 128; ++i) {
irq_set_chip_and_handler(i, &takara_irq_type,
handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
}
/*
* The Takara has PCI devices 1, 2, and 3 configured to slots 20,
* 19, and 18 respectively, in the default configuration. They can
* also be jumpered to slots 8, 7, and 6 respectively, which is fun
* because the SIO ISA bridge can also be slot 7. However, the SIO
* doesn't explicitly generate PCI-type interrupts, so we can
* assign it whatever the hell IRQ we like and it doesn't matter.
*/
static int __init
takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[15][5] __initdata = {
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
{ -1, -1, -1, -1, -1}, /* slot 9 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 10 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 11 == nothing */
/* These are behind the bridges. */
{ 12, 12, 13, 14, 15}, /* slot 12 == nothing */
{ 8, 8, 9, 19, 11}, /* slot 13 == nothing */
{ 4, 4, 5, 6, 7}, /* slot 14 == nothing */
{ 0, 0, 1, 2, 3}, /* slot 15 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 16 == nothing */
{64+ 0, 64+0, 64+1, 64+2, 64+3}, /* slot 17= device 4 */
{48+ 0, 48+0, 48+1, 48+2, 48+3}, /* slot 18= device 3 */
{32+ 0, 32+0, 32+1, 32+2, 32+3}, /* slot 19= device 2 */
{16+ 0, 16+0, 16+1, 16+2, 16+3}, /* slot 20= device 1 */
};
const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5;
int irq = COMMON_TABLE_LOOKUP;
if (irq >= 0 && irq < 16) {
/* Guess that we are behind a bridge. */
unsigned int busslot = PCI_SLOT(dev->bus->self->devfn);
irq += irq_tab[busslot-min_idsel][0];
}
return irq;
}
static int __init
takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[15][5] __initdata = {
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
{ -1, -1, -1, -1, -1}, /* slot 9 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 10 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 11 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 12 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 13 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 14 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 15 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 16 == nothing */
{ -1, -1, -1, -1, -1}, /* slot 17 == nothing */
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 18 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 19 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 20 == device 1 */
};
const long min_idsel = 6, max_idsel = 20, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static u8 __init
takara_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot = PCI_SLOT(dev->devfn);
int pin = *pinp;
unsigned int ctlreg = inl(0x500);
unsigned int busslot;
if (!dev->bus->self)
return slot;
busslot = PCI_SLOT(dev->bus->self->devfn);
/* Check for built-in bridges. */
if (dev->bus->number != 0
&& busslot > 16
&& ((1<<(36-busslot)) & ctlreg)) {
if (pin == 1)
pin += (20 - busslot);
else {
printk(KERN_WARNING "takara_swizzle: can only "
"handle cards with INTA IRQ pin.\n");
}
} else {
/* Must be a card-based bridge. */
printk(KERN_WARNING "takara_swizzle: cannot handle "
"card-bridge behind builtin bridge yet.\n");
}
*pinp = pin;
return slot;
}
static void __init
takara_init_pci(void)
{
if (alpha_using_srm)
alpha_mv.pci_map_irq = takara_map_irq_srm;
cia_init_pci();
if (pc873xx_probe() == -1) {
printk(KERN_ERR "Probing for PC873xx Super IO chip failed.\n");
} else {
printk(KERN_INFO "Found %s Super IO chip at 0x%x\n",
pc873xx_get_model(), pc873xx_get_base());
pc873xx_enable_ide();
}
}
/*
* The System Vector
*/
struct alpha_machine_vector takara_mv __initmv = {
.vector_name = "Takara",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_CIA_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = CIA_DEFAULT_MEM_BASE,
.nr_irqs = 128,
.device_interrupt = takara_device_interrupt,
.init_arch = cia_init_arch,
.init_irq = takara_init_irq,
.init_rtc = common_init_rtc,
.init_pci = takara_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = takara_map_irq,
.pci_swizzle = takara_swizzle,
};
ALIAS_MV(takara)
| gpl-2.0 |
The-Sickness/S6-MM | arch/alpha/kernel/module.c | 9276 | 7757 | /* Kernel module help for Alpha.
Copyright (C) 2002 Richard Henderson.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
/* Allocate the GOT at the end of the core sections. */
struct got_entry {
struct got_entry *next;
Elf64_Sxword r_addend;
int got_offset;
};
static inline void
process_reloc_for_got(Elf64_Rela *rela,
struct got_entry *chains, Elf64_Xword *poffset)
{
unsigned long r_sym = ELF64_R_SYM (rela->r_info);
unsigned long r_type = ELF64_R_TYPE (rela->r_info);
Elf64_Sxword r_addend = rela->r_addend;
struct got_entry *g;
if (r_type != R_ALPHA_LITERAL)
return;
for (g = chains + r_sym; g ; g = g->next)
if (g->r_addend == r_addend) {
if (g->got_offset == 0) {
g->got_offset = *poffset;
*poffset += 8;
}
goto found_entry;
}
g = kmalloc (sizeof (*g), GFP_KERNEL);
g->next = chains[r_sym].next;
g->r_addend = r_addend;
g->got_offset = *poffset;
*poffset += 8;
chains[r_sym].next = g;
found_entry:
/* Trick: most of the ELF64_R_TYPE field is unused. There are
42 valid relocation types, and a 32-bit field. Co-opt the
bits above 256 to store the got offset for this reloc. */
rela->r_info |= g->got_offset << 8;
}
int
module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
char *secstrings, struct module *me)
{
struct got_entry *chains;
Elf64_Rela *rela;
Elf64_Shdr *esechdrs, *symtab, *s, *got;
unsigned long nsyms, nrela, i;
esechdrs = sechdrs + hdr->e_shnum;
symtab = got = NULL;
/* Find out how large the symbol table is. Allocate one got_entry
head per symbol. Normally this will be enough, but not always.
We'll chain different offsets for the symbol down each head. */
for (s = sechdrs; s < esechdrs; ++s)
if (s->sh_type == SHT_SYMTAB)
symtab = s;
else if (!strcmp(".got", secstrings + s->sh_name)) {
got = s;
me->arch.gotsecindex = s - sechdrs;
}
if (!symtab) {
printk(KERN_ERR "module %s: no symbol table\n", me->name);
return -ENOEXEC;
}
if (!got) {
printk(KERN_ERR "module %s: no got section\n", me->name);
return -ENOEXEC;
}
nsyms = symtab->sh_size / sizeof(Elf64_Sym);
chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL);
if (!chains) {
printk(KERN_ERR
"module %s: no memory for symbol chain buffer\n",
me->name);
return -ENOMEM;
}
got->sh_size = 0;
got->sh_addralign = 8;
got->sh_type = SHT_NOBITS;
/* Examine all LITERAL relocations to find out what GOT entries
are required. This sizes the GOT section as well. */
for (s = sechdrs; s < esechdrs; ++s)
if (s->sh_type == SHT_RELA) {
nrela = s->sh_size / sizeof(Elf64_Rela);
rela = (void *)hdr + s->sh_offset;
for (i = 0; i < nrela; ++i)
process_reloc_for_got(rela+i, chains,
&got->sh_size);
}
/* Free the memory we allocated. */
for (i = 0; i < nsyms; ++i) {
struct got_entry *g, *n;
for (g = chains[i].next; g ; g = n) {
n = g->next;
kfree(g);
}
}
kfree(chains);
return 0;
}
int
apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela);
Elf64_Sym *symtab, *sym;
void *base, *location;
unsigned long got, gp;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
/* The small sections were sorted to the end of the segment.
The following should definitely cover them. */
gp = (u64)me->module_core + me->core_size - 0x8000;
got = sechdrs[me->arch.gotsecindex].sh_addr;
for (i = 0; i < n; i++) {
unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
unsigned long r_type = ELF64_R_TYPE (rela[i].r_info);
unsigned long r_got_offset = r_type >> 8;
unsigned long value, hi, lo;
r_type &= 0xff;
/* This is where to make the change. */
location = base + rela[i].r_offset;
/* This is the symbol it is referring to. Note that all
unresolved symbols have been resolved. */
sym = symtab + r_sym;
value = sym->st_value + rela[i].r_addend;
switch (r_type) {
case R_ALPHA_NONE:
break;
case R_ALPHA_REFQUAD:
/* BUG() can produce misaligned relocations. */
((u32 *)location)[0] = value;
((u32 *)location)[1] = value >> 32;
break;
case R_ALPHA_GPREL32:
value -= gp;
if ((int)value != value)
goto reloc_overflow;
*(u32 *)location = value;
break;
case R_ALPHA_LITERAL:
hi = got + r_got_offset;
lo = hi - gp;
if ((short)lo != lo)
goto reloc_overflow;
*(u16 *)location = lo;
*(u64 *)hi = value;
break;
case R_ALPHA_LITUSE:
break;
case R_ALPHA_GPDISP:
value = gp - (u64)location;
lo = (short)value;
hi = (int)(value - lo);
if (hi + lo != value)
goto reloc_overflow;
*(u16 *)location = hi >> 16;
*(u16 *)(location + rela[i].r_addend) = lo;
break;
case R_ALPHA_BRSGP:
/* BRSGP is only allowed to bind to local symbols.
If the section is undef, this means that the
value was resolved from somewhere else. */
if (sym->st_shndx == SHN_UNDEF)
goto reloc_overflow;
if ((sym->st_other & STO_ALPHA_STD_GPLOAD) ==
STO_ALPHA_STD_GPLOAD)
/* Omit the prologue. */
value += 8;
/* FALLTHRU */
case R_ALPHA_BRADDR:
value -= (u64)location + 4;
if (value & 3)
goto reloc_overflow;
value = (long)value >> 2;
if (value + (1<<21) >= 1<<22)
goto reloc_overflow;
value &= 0x1fffff;
value |= *(u32 *)location & ~0x1fffff;
*(u32 *)location = value;
break;
case R_ALPHA_HINT:
break;
case R_ALPHA_SREL32:
value -= (u64)location;
if ((int)value != value)
goto reloc_overflow;
*(u32 *)location = value;
break;
case R_ALPHA_SREL64:
value -= (u64)location;
*(u64 *)location = value;
break;
case R_ALPHA_GPRELHIGH:
value = (long)(value - gp + 0x8000) >> 16;
if ((short) value != value)
goto reloc_overflow;
*(u16 *)location = value;
break;
case R_ALPHA_GPRELLOW:
value -= gp;
*(u16 *)location = value;
break;
case R_ALPHA_GPREL16:
value -= gp;
if ((short) value != value)
goto reloc_overflow;
*(u16 *)location = value;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %lu\n",
me->name, r_type);
return -ENOEXEC;
reloc_overflow:
if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION)
printk(KERN_ERR
"module %s: Relocation (type %lu) overflow vs section %d\n",
me->name, r_type, sym->st_shndx);
else
printk(KERN_ERR
"module %s: Relocation (type %lu) overflow vs %s\n",
me->name, r_type, strtab + sym->st_name);
return -ENOEXEC;
}
}
return 0;
}
| gpl-2.0 |
wolverine2k/htc7x30-3.0 | drivers/net/pch_gbe/pch_gbe_phy.c | 10300 | 9519 | /*
* Copyright (C) 1999 - 2010 Intel Corporation.
* Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
*
* This code was derived from the Intel e1000e Linux driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*/
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
#define PHY_MAX_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
/* PHY 1000 MII Register/Bit Definitions */
/* PHY Registers defined by IEEE */
#define PHY_CONTROL 0x00 /* Control Register */
#define PHY_STATUS 0x01 /* Status Regiser */
#define PHY_ID1 0x02 /* Phy Id Register (word 1) */
#define PHY_ID2 0x03 /* Phy Id Register (word 2) */
#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */
#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */
#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Register */
#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Register */
#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Register */
#define PHY_EXT_STATUS 0x0F /* Extended Status Register */
#define PHY_PHYSP_CONTROL 0x10 /* PHY Specific Control Register */
#define PHY_EXT_PHYSP_CONTROL 0x14 /* Extended PHY Specific Control Register */
#define PHY_LED_CONTROL 0x18 /* LED Control Register */
#define PHY_EXT_PHYSP_STATUS 0x1B /* Extended PHY Specific Status Register */
/* PHY Control Register */
#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
/* Phy Id Register (word 2) */
#define PHY_REVISION_MASK 0x000F
/* PHY Specific Control Register */
#define PHYSP_CTRL_ASSERT_CRS_TX 0x0800
/* Default value of PHY register */
#define PHY_CONTROL_DEFAULT 0x1140 /* Control Register */
#define PHY_AUTONEG_ADV_DEFAULT 0x01e0 /* Autoneg Advertisement */
#define PHY_NEXT_PAGE_TX_DEFAULT 0x2001 /* Next Page TX */
#define PHY_1000T_CTRL_DEFAULT 0x0300 /* 1000Base-T Control Register */
#define PHY_PHYSP_CONTROL_DEFAULT 0x01EE /* PHY Specific Control Register */
/**
* pch_gbe_phy_get_id - Retrieve the PHY ID and revision
* @hw: Pointer to the HW structure
* Returns
* 0: Successful.
* Negative value: Failed.
*/
s32 pch_gbe_phy_get_id(struct pch_gbe_hw *hw)
{
struct pch_gbe_phy_info *phy = &hw->phy;
s32 ret;
u16 phy_id1;
u16 phy_id2;
ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID1, &phy_id1);
if (ret)
return ret;
ret = pch_gbe_phy_read_reg_miic(hw, PHY_ID2, &phy_id2);
if (ret)
return ret;
/*
* PHY_ID1: [bit15-0:ID(21-6)]
* PHY_ID2: [bit15-10:ID(5-0)][bit9-4:Model][bit3-0:revision]
*/
phy->id = (u32)phy_id1;
phy->id = ((phy->id << 6) | ((phy_id2 & 0xFC00) >> 10));
phy->revision = (u32) (phy_id2 & 0x000F);
pr_debug("phy->id : 0x%08x phy->revision : 0x%08x\n",
phy->id, phy->revision);
return 0;
}
/**
* pch_gbe_phy_read_reg_miic - Read MII control register
* @hw: Pointer to the HW structure
* @offset: Register offset to be read
* @data: Pointer to the read data
* Returns
* 0: Successful.
* -EINVAL: Invalid argument.
*/
s32 pch_gbe_phy_read_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 *data)
{
struct pch_gbe_phy_info *phy = &hw->phy;
if (offset > PHY_MAX_REG_ADDRESS) {
pr_err("PHY Address %d is out of range\n", offset);
return -EINVAL;
}
*data = pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_READ,
offset, (u16)0);
return 0;
}
/**
* pch_gbe_phy_write_reg_miic - Write MII control register
* @hw: Pointer to the HW structure
* @offset: Register offset to be read
* @data: data to write to register at offset
* Returns
* 0: Successful.
* -EINVAL: Invalid argument.
*/
s32 pch_gbe_phy_write_reg_miic(struct pch_gbe_hw *hw, u32 offset, u16 data)
{
struct pch_gbe_phy_info *phy = &hw->phy;
if (offset > PHY_MAX_REG_ADDRESS) {
pr_err("PHY Address %d is out of range\n", offset);
return -EINVAL;
}
pch_gbe_mac_ctrl_miim(hw, phy->addr, PCH_GBE_HAL_MIIM_WRITE,
offset, data);
return 0;
}
/**
* pch_gbe_phy_sw_reset - PHY software reset
* @hw: Pointer to the HW structure
*/
void pch_gbe_phy_sw_reset(struct pch_gbe_hw *hw)
{
u16 phy_ctrl;
pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &phy_ctrl);
phy_ctrl |= MII_CR_RESET;
pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, phy_ctrl);
udelay(1);
}
/**
* pch_gbe_phy_hw_reset - PHY hardware reset
* @hw: Pointer to the HW structure
*/
void pch_gbe_phy_hw_reset(struct pch_gbe_hw *hw)
{
pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, PHY_CONTROL_DEFAULT);
pch_gbe_phy_write_reg_miic(hw, PHY_AUTONEG_ADV,
PHY_AUTONEG_ADV_DEFAULT);
pch_gbe_phy_write_reg_miic(hw, PHY_NEXT_PAGE_TX,
PHY_NEXT_PAGE_TX_DEFAULT);
pch_gbe_phy_write_reg_miic(hw, PHY_1000T_CTRL, PHY_1000T_CTRL_DEFAULT);
pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL,
PHY_PHYSP_CONTROL_DEFAULT);
}
/**
* pch_gbe_phy_power_up - restore link in case the phy was powered down
* @hw: Pointer to the HW structure
*/
void pch_gbe_phy_power_up(struct pch_gbe_hw *hw)
{
u16 mii_reg;
mii_reg = 0;
/* Just clear the power down bit to wake the phy back up */
/* according to the manual, the phy will retain its
* settings across a power-down/up cycle */
pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
}
/**
* pch_gbe_phy_power_down - Power down PHY
* @hw: Pointer to the HW structure
*/
void pch_gbe_phy_power_down(struct pch_gbe_hw *hw)
{
u16 mii_reg;
mii_reg = 0;
/* Power down the PHY so no link is implied when interface is down *
* The PHY cannot be powered down if any of the following is TRUE *
* (a) WoL is enabled
* (b) AMT is active
*/
pch_gbe_phy_read_reg_miic(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
pch_gbe_phy_write_reg_miic(hw, PHY_CONTROL, mii_reg);
mdelay(1);
}
/**
* pch_gbe_phy_set_rgmii - RGMII interface setting
* @hw: Pointer to the HW structure
*/
inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
{
pch_gbe_phy_sw_reset(hw);
}
/**
* pch_gbe_phy_init_setting - PHY initial setting
* @hw: Pointer to the HW structure
*/
void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
{
struct pch_gbe_adapter *adapter;
struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
int ret;
u16 mii_reg;
adapter = container_of(hw, struct pch_gbe_adapter, hw);
ret = mii_ethtool_gset(&adapter->mii, &cmd);
if (ret)
pr_err("Error: mii_ethtool_gset\n");
ethtool_cmd_speed_set(&cmd, hw->mac.link_speed);
cmd.duplex = hw->mac.link_duplex;
cmd.advertising = hw->phy.autoneg_advertised;
cmd.autoneg = hw->mac.autoneg;
pch_gbe_phy_write_reg_miic(hw, MII_BMCR, BMCR_RESET);
ret = mii_ethtool_sset(&adapter->mii, &cmd);
if (ret)
pr_err("Error: mii_ethtool_sset\n");
pch_gbe_phy_sw_reset(hw);
pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
}
| gpl-2.0 |
ndmsystems/linux-2.6.22-tc | arch/powerpc/platforms/cell/spufs/spu_save.c | 61 | 4969 | /*
* spu_save.c
*
* (C) Copyright IBM Corp. 2005
*
* SPU-side context save sequence outlined in
* Synergistic Processor Element Book IV
*
* Author: Mark Nutter <mnutter@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#ifndef LS_SIZE
#define LS_SIZE 0x40000 /* 256K (in bytes) */
#endif
typedef unsigned int u32;
typedef unsigned long long u64;
#include <spu_intrinsics.h>
#include <asm/spu_csa.h>
#include "spu_utils.h"
static inline void save_event_mask(void)
{
unsigned int offset;
/* Save, Step 2:
* Read the SPU_RdEventMsk channel and save to the LSCSA.
*/
offset = LSCSA_QW_OFFSET(event_mask);
regs_spill[offset].slot[0] = spu_readch(SPU_RdEventStatMask);
}
static inline void save_tag_mask(void)
{
unsigned int offset;
/* Save, Step 3:
* Read the SPU_RdTagMsk channel and save to the LSCSA.
*/
offset = LSCSA_QW_OFFSET(tag_mask);
regs_spill[offset].slot[0] = spu_readch(MFC_RdTagMask);
}
static inline void save_upper_240kb(addr64 lscsa_ea)
{
unsigned int ls = 16384;
unsigned int list = (unsigned int)&dma_list[0];
unsigned int size = sizeof(dma_list);
unsigned int tag_id = 0;
unsigned int cmd = 0x24; /* PUTL */
/* Save, Step 7:
* Enqueue the PUTL command (tag 0) to the MFC SPU command
* queue to transfer the remaining 240 kb of LS to CSA.
*/
spu_writech(MFC_LSA, ls);
spu_writech(MFC_EAH, lscsa_ea.ui[0]);
spu_writech(MFC_EAL, list);
spu_writech(MFC_Size, size);
spu_writech(MFC_TagID, tag_id);
spu_writech(MFC_Cmd, cmd);
}
static inline void save_fpcr(void)
{
// vector unsigned int fpcr;
unsigned int offset;
/* Save, Step 9:
* Issue the floating-point status and control register
* read instruction, and save to the LSCSA.
*/
offset = LSCSA_QW_OFFSET(fpcr);
regs_spill[offset].v = spu_mffpscr();
}
static inline void save_decr(void)
{
unsigned int offset;
/* Save, Step 10:
* Read and save the SPU_RdDec channel data to
* the LSCSA.
*/
offset = LSCSA_QW_OFFSET(decr);
regs_spill[offset].slot[0] = spu_readch(SPU_RdDec);
}
static inline void save_srr0(void)
{
unsigned int offset;
/* Save, Step 11:
* Read and save the SPU_WSRR0 channel data to
* the LSCSA.
*/
offset = LSCSA_QW_OFFSET(srr0);
regs_spill[offset].slot[0] = spu_readch(SPU_RdSRR0);
}
static inline void spill_regs_to_mem(addr64 lscsa_ea)
{
unsigned int ls = (unsigned int)®s_spill[0];
unsigned int size = sizeof(regs_spill);
unsigned int tag_id = 0;
unsigned int cmd = 0x20; /* PUT */
/* Save, Step 13:
* Enqueue a PUT command (tag 0) to send the LSCSA
* to the CSA.
*/
spu_writech(MFC_LSA, ls);
spu_writech(MFC_EAH, lscsa_ea.ui[0]);
spu_writech(MFC_EAL, lscsa_ea.ui[1]);
spu_writech(MFC_Size, size);
spu_writech(MFC_TagID, tag_id);
spu_writech(MFC_Cmd, cmd);
}
static inline void enqueue_sync(addr64 lscsa_ea)
{
unsigned int tag_id = 0;
unsigned int cmd = 0xCC;
/* Save, Step 14:
* Enqueue an MFC_SYNC command (tag 0).
*/
spu_writech(MFC_TagID, tag_id);
spu_writech(MFC_Cmd, cmd);
}
static inline void save_complete(void)
{
/* Save, Step 18:
* Issue a stop-and-signal instruction indicating
* "save complete". Note: This function will not
* return!!
*/
spu_stop(SPU_SAVE_COMPLETE);
}
/**
* main - entry point for SPU-side context save.
*
* This code deviates from the documented sequence as follows:
*
* 1. The EA for LSCSA is passed from PPE in the
* signal notification channels.
* 2. All 128 registers are saved by crt0.o.
*/
int main()
{
addr64 lscsa_ea;
lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1);
lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2);
/* Step 1: done by exit(). */
save_event_mask(); /* Step 2. */
save_tag_mask(); /* Step 3. */
set_event_mask(); /* Step 4. */
set_tag_mask(); /* Step 5. */
build_dma_list(lscsa_ea); /* Step 6. */
save_upper_240kb(lscsa_ea); /* Step 7. */
/* Step 8: done by exit(). */
save_fpcr(); /* Step 9. */
save_decr(); /* Step 10. */
save_srr0(); /* Step 11. */
enqueue_putllc(lscsa_ea); /* Step 12. */
spill_regs_to_mem(lscsa_ea); /* Step 13. */
enqueue_sync(lscsa_ea); /* Step 14. */
set_tag_update(); /* Step 15. */
read_tag_status(); /* Step 16. */
read_llar_status(); /* Step 17. */
save_complete(); /* Step 18. */
return 0;
}
| gpl-2.0 |
vkomenda/linux-sunxi | sound/core/pcm_native.c | 61 | 100937 | /*
* Digital Audio (PCM) abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/pm_qos.h>
#include <linux/aio.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/timer.h>
#include <sound/minors.h>
#include <asm/io.h>
/*
* Compatibility
*/
struct snd_pcm_hw_params_old {
unsigned int flags;
unsigned int masks[SNDRV_PCM_HW_PARAM_SUBFORMAT -
SNDRV_PCM_HW_PARAM_ACCESS + 1];
struct snd_interval intervals[SNDRV_PCM_HW_PARAM_TICK_TIME -
SNDRV_PCM_HW_PARAM_SAMPLE_BITS + 1];
unsigned int rmask;
unsigned int cmask;
unsigned int info;
unsigned int msbits;
unsigned int rate_num;
unsigned int rate_den;
snd_pcm_uframes_t fifo_size;
unsigned char reserved[64];
};
#ifdef CONFIG_SND_SUPPORT_OLD_API
#define SNDRV_PCM_IOCTL_HW_REFINE_OLD _IOWR('A', 0x10, struct snd_pcm_hw_params_old)
#define SNDRV_PCM_IOCTL_HW_PARAMS_OLD _IOWR('A', 0x11, struct snd_pcm_hw_params_old)
static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params_old __user * _oparams);
static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params_old __user * _oparams);
#endif
static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
/*
*
*/
static DEFINE_RWLOCK(snd_pcm_link_rwlock);
static DECLARE_RWSEM(snd_pcm_link_rwsem);
/**
* snd_pcm_stream_lock - Lock the PCM stream
* @substream: PCM substream
*
* This locks the PCM stream's spinlock or mutex depending on the nonatomic
* flag of the given substream. This also takes the global link rw lock
* (or rw sem), too, for avoiding the race with linked streams.
*/
void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
{
if (substream->pcm->nonatomic) {
down_read(&snd_pcm_link_rwsem);
mutex_lock(&substream->self_group.mutex);
} else {
read_lock(&snd_pcm_link_rwlock);
spin_lock(&substream->self_group.lock);
}
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock);
/**
* snd_pcm_stream_lock - Unlock the PCM stream
* @substream: PCM substream
*
* This unlocks the PCM stream that has been locked via snd_pcm_stream_lock().
*/
void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
{
if (substream->pcm->nonatomic) {
mutex_unlock(&substream->self_group.mutex);
up_read(&snd_pcm_link_rwsem);
} else {
spin_unlock(&substream->self_group.lock);
read_unlock(&snd_pcm_link_rwlock);
}
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock);
/**
* snd_pcm_stream_lock_irq - Lock the PCM stream
* @substream: PCM substream
*
* This locks the PCM stream like snd_pcm_stream_lock() and disables the local
* IRQ (only when nonatomic is false). In nonatomic case, this is identical
* as snd_pcm_stream_lock().
*/
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
if (!substream->pcm->nonatomic)
local_irq_disable();
snd_pcm_stream_lock(substream);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
/**
* snd_pcm_stream_unlock_irq - Unlock the PCM stream
* @substream: PCM substream
*
* This is a counter-part of snd_pcm_stream_lock_irq().
*/
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
{
snd_pcm_stream_unlock(substream);
if (!substream->pcm->nonatomic)
local_irq_enable();
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream)
{
unsigned long flags = 0;
if (!substream->pcm->nonatomic)
local_irq_save(flags);
snd_pcm_stream_lock(substream);
return flags;
}
EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
/**
* snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
* @substream: PCM substream
* @flags: irq flags
*
* This is a counter-part of snd_pcm_stream_lock_irqsave().
*/
void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags)
{
snd_pcm_stream_unlock(substream);
if (!substream->pcm->nonatomic)
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
static inline mm_segment_t snd_enter_user(void)
{
mm_segment_t fs = get_fs();
set_fs(get_ds());
return fs;
}
static inline void snd_leave_user(mm_segment_t fs)
{
set_fs(fs);
}
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
{
struct snd_pcm_runtime *runtime;
struct snd_pcm *pcm = substream->pcm;
struct snd_pcm_str *pstr = substream->pstr;
memset(info, 0, sizeof(*info));
info->card = pcm->card->number;
info->device = pcm->device;
info->stream = substream->stream;
info->subdevice = substream->number;
strlcpy(info->id, pcm->id, sizeof(info->id));
strlcpy(info->name, pcm->name, sizeof(info->name));
info->dev_class = pcm->dev_class;
info->dev_subclass = pcm->dev_subclass;
info->subdevices_count = pstr->substream_count;
info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
strlcpy(info->subname, substream->name, sizeof(info->subname));
runtime = substream->runtime;
/* AB: FIXME!!! This is definitely nonsense */
if (runtime) {
info->sync = runtime->sync;
substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info);
}
return 0;
}
int snd_pcm_info_user(struct snd_pcm_substream *substream,
struct snd_pcm_info __user * _info)
{
struct snd_pcm_info *info;
int err;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
err = snd_pcm_info(substream, info);
if (err >= 0) {
if (copy_to_user(_info, info, sizeof(*info)))
err = -EFAULT;
}
kfree(info);
return err;
}
static bool hw_support_mmap(struct snd_pcm_substream *substream)
{
if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP))
return false;
/* check architectures that return -EINVAL from dma_mmap_coherent() */
/* FIXME: this should be some global flag */
#if defined(CONFIG_C6X) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) ||\
defined(CONFIG_PARISC) || defined(CONFIG_XTENSA)
if (!substream->ops->mmap &&
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
return false;
#endif
return true;
}
#undef RULES_DEBUG
#ifdef RULES_DEBUG
#define HW_PARAM(v) [SNDRV_PCM_HW_PARAM_##v] = #v
static const char * const snd_pcm_hw_param_names[] = {
HW_PARAM(ACCESS),
HW_PARAM(FORMAT),
HW_PARAM(SUBFORMAT),
HW_PARAM(SAMPLE_BITS),
HW_PARAM(FRAME_BITS),
HW_PARAM(CHANNELS),
HW_PARAM(RATE),
HW_PARAM(PERIOD_TIME),
HW_PARAM(PERIOD_SIZE),
HW_PARAM(PERIOD_BYTES),
HW_PARAM(PERIODS),
HW_PARAM(BUFFER_TIME),
HW_PARAM(BUFFER_SIZE),
HW_PARAM(BUFFER_BYTES),
HW_PARAM(TICK_TIME),
};
#endif
int snd_pcm_hw_refine(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
unsigned int k;
struct snd_pcm_hardware *hw;
struct snd_interval *i = NULL;
struct snd_mask *m = NULL;
struct snd_pcm_hw_constraints *constrs = &substream->runtime->hw_constraints;
unsigned int rstamps[constrs->rules_num];
unsigned int vstamps[SNDRV_PCM_HW_PARAM_LAST_INTERVAL + 1];
unsigned int stamp = 2;
int changed, again;
params->info = 0;
params->fifo_size = 0;
if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_SAMPLE_BITS))
params->msbits = 0;
if (params->rmask & (1 << SNDRV_PCM_HW_PARAM_RATE)) {
params->rate_num = 0;
params->rate_den = 0;
}
for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
m = hw_param_mask(params, k);
if (snd_mask_empty(m))
return -EINVAL;
if (!(params->rmask & (1 << k)))
continue;
#ifdef RULES_DEBUG
pr_debug("%s = ", snd_pcm_hw_param_names[k]);
pr_cont("%04x%04x%04x%04x -> ", m->bits[3], m->bits[2], m->bits[1], m->bits[0]);
#endif
changed = snd_mask_refine(m, constrs_mask(constrs, k));
#ifdef RULES_DEBUG
pr_cont("%04x%04x%04x%04x\n", m->bits[3], m->bits[2], m->bits[1], m->bits[0]);
#endif
if (changed)
params->cmask |= 1 << k;
if (changed < 0)
return changed;
}
for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
i = hw_param_interval(params, k);
if (snd_interval_empty(i))
return -EINVAL;
if (!(params->rmask & (1 << k)))
continue;
#ifdef RULES_DEBUG
pr_debug("%s = ", snd_pcm_hw_param_names[k]);
if (i->empty)
pr_cont("empty");
else
pr_cont("%c%u %u%c",
i->openmin ? '(' : '[', i->min,
i->max, i->openmax ? ')' : ']');
pr_cont(" -> ");
#endif
changed = snd_interval_refine(i, constrs_interval(constrs, k));
#ifdef RULES_DEBUG
if (i->empty)
pr_cont("empty\n");
else
pr_cont("%c%u %u%c\n",
i->openmin ? '(' : '[', i->min,
i->max, i->openmax ? ')' : ']');
#endif
if (changed)
params->cmask |= 1 << k;
if (changed < 0)
return changed;
}
for (k = 0; k < constrs->rules_num; k++)
rstamps[k] = 0;
for (k = 0; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++)
vstamps[k] = (params->rmask & (1 << k)) ? 1 : 0;
do {
again = 0;
for (k = 0; k < constrs->rules_num; k++) {
struct snd_pcm_hw_rule *r = &constrs->rules[k];
unsigned int d;
int doit = 0;
if (r->cond && !(r->cond & params->flags))
continue;
for (d = 0; r->deps[d] >= 0; d++) {
if (vstamps[r->deps[d]] > rstamps[k]) {
doit = 1;
break;
}
}
if (!doit)
continue;
#ifdef RULES_DEBUG
pr_debug("Rule %d [%p]: ", k, r->func);
if (r->var >= 0) {
pr_cont("%s = ", snd_pcm_hw_param_names[r->var]);
if (hw_is_mask(r->var)) {
m = hw_param_mask(params, r->var);
pr_cont("%x", *m->bits);
} else {
i = hw_param_interval(params, r->var);
if (i->empty)
pr_cont("empty");
else
pr_cont("%c%u %u%c",
i->openmin ? '(' : '[', i->min,
i->max, i->openmax ? ')' : ']');
}
}
#endif
changed = r->func(params, r);
#ifdef RULES_DEBUG
if (r->var >= 0) {
pr_cont(" -> ");
if (hw_is_mask(r->var))
pr_cont("%x", *m->bits);
else {
if (i->empty)
pr_cont("empty");
else
pr_cont("%c%u %u%c",
i->openmin ? '(' : '[', i->min,
i->max, i->openmax ? ')' : ']');
}
}
pr_cont("\n");
#endif
rstamps[k] = stamp;
if (changed && r->var >= 0) {
params->cmask |= (1 << r->var);
vstamps[r->var] = stamp;
again = 1;
}
if (changed < 0)
return changed;
stamp++;
}
} while (again);
if (!params->msbits) {
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS);
if (snd_interval_single(i))
params->msbits = snd_interval_value(i);
}
if (!params->rate_den) {
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
if (snd_interval_single(i)) {
params->rate_num = snd_interval_value(i);
params->rate_den = 1;
}
}
hw = &substream->runtime->hw;
if (!params->info) {
params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES;
if (!hw_support_mmap(substream))
params->info &= ~(SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID);
}
if (!params->fifo_size) {
m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
if (snd_mask_min(m) == snd_mask_max(m) &&
snd_interval_min(i) == snd_interval_max(i)) {
changed = substream->ops->ioctl(substream,
SNDRV_PCM_IOCTL1_FIFO_SIZE, params);
if (changed < 0)
return changed;
}
}
params->rmask = 0;
return 0;
}
EXPORT_SYMBOL(snd_pcm_hw_refine);
static int snd_pcm_hw_refine_user(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params __user * _params)
{
struct snd_pcm_hw_params *params;
int err;
params = memdup_user(_params, sizeof(*params));
if (IS_ERR(params))
return PTR_ERR(params);
err = snd_pcm_hw_refine(substream, params);
if (copy_to_user(_params, params, sizeof(*params))) {
if (!err)
err = -EFAULT;
}
kfree(params);
return err;
}
static int period_to_usecs(struct snd_pcm_runtime *runtime)
{
int usecs;
if (! runtime->rate)
return -1; /* invalid */
/* take 75% of period time as the deadline */
usecs = (750000 / runtime->rate) * runtime->period_size;
usecs += ((750000 % runtime->rate) * runtime->period_size) /
runtime->rate;
return usecs;
}
static void snd_pcm_set_state(struct snd_pcm_substream *substream, int state)
{
snd_pcm_stream_lock_irq(substream);
if (substream->runtime->status->state != SNDRV_PCM_STATE_DISCONNECTED)
substream->runtime->status->state = state;
snd_pcm_stream_unlock_irq(substream);
}
static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime;
int err, usecs;
unsigned int bits;
snd_pcm_uframes_t frames;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_SETUP:
case SNDRV_PCM_STATE_PREPARED:
break;
default:
snd_pcm_stream_unlock_irq(substream);
return -EBADFD;
}
snd_pcm_stream_unlock_irq(substream);
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
if (!substream->oss.oss)
#endif
if (atomic_read(&substream->mmap_count))
return -EBADFD;
params->rmask = ~0U;
err = snd_pcm_hw_refine(substream, params);
if (err < 0)
goto _error;
err = snd_pcm_hw_params_choose(substream, params);
if (err < 0)
goto _error;
if (substream->ops->hw_params != NULL) {
err = substream->ops->hw_params(substream, params);
if (err < 0)
goto _error;
}
runtime->access = params_access(params);
runtime->format = params_format(params);
runtime->subformat = params_subformat(params);
runtime->channels = params_channels(params);
runtime->rate = params_rate(params);
runtime->period_size = params_period_size(params);
runtime->periods = params_periods(params);
runtime->buffer_size = params_buffer_size(params);
runtime->info = params->info;
runtime->rate_num = params->rate_num;
runtime->rate_den = params->rate_den;
runtime->no_period_wakeup =
(params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
(params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
bits = snd_pcm_format_physical_width(runtime->format);
runtime->sample_bits = bits;
bits *= runtime->channels;
runtime->frame_bits = bits;
frames = 1;
while (bits % 8 != 0) {
bits *= 2;
frames *= 2;
}
runtime->byte_align = bits / 8;
runtime->min_align = frames;
/* Default sw params */
runtime->tstamp_mode = SNDRV_PCM_TSTAMP_NONE;
runtime->period_step = 1;
runtime->control->avail_min = runtime->period_size;
runtime->start_threshold = 1;
runtime->stop_threshold = runtime->buffer_size;
runtime->silence_threshold = 0;
runtime->silence_size = 0;
runtime->boundary = runtime->buffer_size;
while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size)
runtime->boundary *= 2;
snd_pcm_timer_resolution_change(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
if (pm_qos_request_active(&substream->latency_pm_qos_req))
pm_qos_remove_request(&substream->latency_pm_qos_req);
if ((usecs = period_to_usecs(runtime)) >= 0)
pm_qos_add_request(&substream->latency_pm_qos_req,
PM_QOS_CPU_DMA_LATENCY, usecs);
return 0;
_error:
/* hardware might be unusable from this time,
so we force application to retry to set
the correct hardware parameter settings */
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
if (substream->ops->hw_free != NULL)
substream->ops->hw_free(substream);
return err;
}
static int snd_pcm_hw_params_user(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params __user * _params)
{
struct snd_pcm_hw_params *params;
int err;
params = memdup_user(_params, sizeof(*params));
if (IS_ERR(params))
return PTR_ERR(params);
err = snd_pcm_hw_params(substream, params);
if (copy_to_user(_params, params, sizeof(*params))) {
if (!err)
err = -EFAULT;
}
kfree(params);
return err;
}
static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
int result = 0;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_SETUP:
case SNDRV_PCM_STATE_PREPARED:
break;
default:
snd_pcm_stream_unlock_irq(substream);
return -EBADFD;
}
snd_pcm_stream_unlock_irq(substream);
if (atomic_read(&substream->mmap_count))
return -EBADFD;
if (substream->ops->hw_free)
result = substream->ops->hw_free(substream);
snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
pm_qos_remove_request(&substream->latency_pm_qos_req);
return result;
}
static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
struct snd_pcm_sw_params *params)
{
struct snd_pcm_runtime *runtime;
int err;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
snd_pcm_stream_lock_irq(substream);
if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
snd_pcm_stream_unlock_irq(substream);
return -EBADFD;
}
snd_pcm_stream_unlock_irq(substream);
if (params->tstamp_mode > SNDRV_PCM_TSTAMP_LAST)
return -EINVAL;
if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12) &&
params->tstamp_type > SNDRV_PCM_TSTAMP_TYPE_LAST)
return -EINVAL;
if (params->avail_min == 0)
return -EINVAL;
if (params->silence_size >= runtime->boundary) {
if (params->silence_threshold != 0)
return -EINVAL;
} else {
if (params->silence_size > params->silence_threshold)
return -EINVAL;
if (params->silence_threshold > runtime->buffer_size)
return -EINVAL;
}
err = 0;
snd_pcm_stream_lock_irq(substream);
runtime->tstamp_mode = params->tstamp_mode;
if (params->proto >= SNDRV_PROTOCOL_VERSION(2, 0, 12))
runtime->tstamp_type = params->tstamp_type;
runtime->period_step = params->period_step;
runtime->control->avail_min = params->avail_min;
runtime->start_threshold = params->start_threshold;
runtime->stop_threshold = params->stop_threshold;
runtime->silence_threshold = params->silence_threshold;
runtime->silence_size = params->silence_size;
params->boundary = runtime->boundary;
if (snd_pcm_running(substream)) {
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
runtime->silence_size > 0)
snd_pcm_playback_silence(substream, ULONG_MAX);
err = snd_pcm_update_state(substream, runtime);
}
snd_pcm_stream_unlock_irq(substream);
return err;
}
static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
struct snd_pcm_sw_params __user * _params)
{
struct snd_pcm_sw_params params;
int err;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
err = snd_pcm_sw_params(substream, ¶ms);
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
int snd_pcm_status(struct snd_pcm_substream *substream,
struct snd_pcm_status *status)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_stream_lock_irq(substream);
status->state = runtime->status->state;
status->suspended_state = runtime->status->suspended_state;
if (status->state == SNDRV_PCM_STATE_OPEN)
goto _end;
status->trigger_tstamp = runtime->trigger_tstamp;
if (snd_pcm_running(substream)) {
snd_pcm_update_hw_ptr(substream);
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) {
status->tstamp = runtime->status->tstamp;
status->audio_tstamp =
runtime->status->audio_tstamp;
goto _tstamp_end;
}
}
snd_pcm_gettime(runtime, &status->tstamp);
_tstamp_end:
status->appl_ptr = runtime->control->appl_ptr;
status->hw_ptr = runtime->status->hw_ptr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
status->avail = snd_pcm_playback_avail(runtime);
if (runtime->status->state == SNDRV_PCM_STATE_RUNNING ||
runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
status->delay = runtime->buffer_size - status->avail;
status->delay += runtime->delay;
} else
status->delay = 0;
} else {
status->avail = snd_pcm_capture_avail(runtime);
if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
status->delay = status->avail + runtime->delay;
else
status->delay = 0;
}
status->avail_max = runtime->avail_max;
status->overrange = runtime->overrange;
runtime->avail_max = 0;
runtime->overrange = 0;
_end:
snd_pcm_stream_unlock_irq(substream);
return 0;
}
static int snd_pcm_status_user(struct snd_pcm_substream *substream,
struct snd_pcm_status __user * _status)
{
struct snd_pcm_status status;
int res;
memset(&status, 0, sizeof(status));
res = snd_pcm_status(substream, &status);
if (res < 0)
return res;
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_pcm_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info * info)
{
struct snd_pcm_runtime *runtime;
unsigned int channel;
channel = info->channel;
runtime = substream->runtime;
snd_pcm_stream_lock_irq(substream);
if (runtime->status->state == SNDRV_PCM_STATE_OPEN) {
snd_pcm_stream_unlock_irq(substream);
return -EBADFD;
}
snd_pcm_stream_unlock_irq(substream);
if (channel >= runtime->channels)
return -EINVAL;
memset(info, 0, sizeof(*info));
info->channel = channel;
return substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_CHANNEL_INFO, info);
}
static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info __user * _info)
{
struct snd_pcm_channel_info info;
int res;
if (copy_from_user(&info, _info, sizeof(info)))
return -EFAULT;
res = snd_pcm_channel_info(substream, &info);
if (res < 0)
return res;
if (copy_to_user(_info, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static void snd_pcm_trigger_tstamp(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->trigger_master == NULL)
return;
if (runtime->trigger_master == substream) {
snd_pcm_gettime(runtime, &runtime->trigger_tstamp);
} else {
snd_pcm_trigger_tstamp(runtime->trigger_master);
runtime->trigger_tstamp = runtime->trigger_master->runtime->trigger_tstamp;
}
runtime->trigger_master = NULL;
}
struct action_ops {
int (*pre_action)(struct snd_pcm_substream *substream, int state);
int (*do_action)(struct snd_pcm_substream *substream, int state);
void (*undo_action)(struct snd_pcm_substream *substream, int state);
void (*post_action)(struct snd_pcm_substream *substream, int state);
};
/*
* this functions is core for handling of linked stream
* Note: the stream state might be changed also on failure
* Note2: call with calling stream lock + link lock
*/
static int snd_pcm_action_group(struct action_ops *ops,
struct snd_pcm_substream *substream,
int state, int do_lock)
{
struct snd_pcm_substream *s = NULL;
struct snd_pcm_substream *s1;
int res = 0, depth = 1;
snd_pcm_group_for_each_entry(s, substream) {
if (do_lock && s != substream) {
if (s->pcm->nonatomic)
mutex_lock_nested(&s->self_group.mutex, depth);
else
spin_lock_nested(&s->self_group.lock, depth);
depth++;
}
res = ops->pre_action(s, state);
if (res < 0)
goto _unlock;
}
snd_pcm_group_for_each_entry(s, substream) {
res = ops->do_action(s, state);
if (res < 0) {
if (ops->undo_action) {
snd_pcm_group_for_each_entry(s1, substream) {
if (s1 == s) /* failed stream */
break;
ops->undo_action(s1, state);
}
}
s = NULL; /* unlock all */
goto _unlock;
}
}
snd_pcm_group_for_each_entry(s, substream) {
ops->post_action(s, state);
}
_unlock:
if (do_lock) {
/* unlock streams */
snd_pcm_group_for_each_entry(s1, substream) {
if (s1 != substream) {
if (s1->pcm->nonatomic)
mutex_unlock(&s1->self_group.mutex);
else
spin_unlock(&s1->self_group.lock);
}
if (s1 == s) /* end */
break;
}
}
return res;
}
/*
* Note: call with stream lock
*/
static int snd_pcm_action_single(struct action_ops *ops,
struct snd_pcm_substream *substream,
int state)
{
int res;
res = ops->pre_action(substream, state);
if (res < 0)
return res;
res = ops->do_action(substream, state);
if (res == 0)
ops->post_action(substream, state);
else if (ops->undo_action)
ops->undo_action(substream, state);
return res;
}
/*
* Note: call with stream lock
*/
static int snd_pcm_action(struct action_ops *ops,
struct snd_pcm_substream *substream,
int state)
{
int res;
if (!snd_pcm_stream_linked(substream))
return snd_pcm_action_single(ops, substream, state);
if (substream->pcm->nonatomic) {
if (!mutex_trylock(&substream->group->mutex)) {
mutex_unlock(&substream->self_group.mutex);
mutex_lock(&substream->group->mutex);
mutex_lock(&substream->self_group.mutex);
}
res = snd_pcm_action_group(ops, substream, state, 1);
mutex_unlock(&substream->group->mutex);
} else {
if (!spin_trylock(&substream->group->lock)) {
spin_unlock(&substream->self_group.lock);
spin_lock(&substream->group->lock);
spin_lock(&substream->self_group.lock);
}
res = snd_pcm_action_group(ops, substream, state, 1);
spin_unlock(&substream->group->lock);
}
return res;
}
/*
* Note: don't use any locks before
*/
static int snd_pcm_action_lock_irq(struct action_ops *ops,
struct snd_pcm_substream *substream,
int state)
{
int res;
snd_pcm_stream_lock_irq(substream);
res = snd_pcm_action(ops, substream, state);
snd_pcm_stream_unlock_irq(substream);
return res;
}
/*
*/
static int snd_pcm_action_nonatomic(struct action_ops *ops,
struct snd_pcm_substream *substream,
int state)
{
int res;
down_read(&snd_pcm_link_rwsem);
if (snd_pcm_stream_linked(substream))
res = snd_pcm_action_group(ops, substream, state, 0);
else
res = snd_pcm_action_single(ops, substream, state);
up_read(&snd_pcm_link_rwsem);
return res;
}
/*
* start callbacks
*/
static int snd_pcm_pre_start(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state != SNDRV_PCM_STATE_PREPARED)
return -EBADFD;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
!snd_pcm_playback_data(substream))
return -EPIPE;
runtime->trigger_master = substream;
return 0;
}
static int snd_pcm_do_start(struct snd_pcm_substream *substream, int state)
{
if (substream->runtime->trigger_master != substream)
return 0;
return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_START);
}
static void snd_pcm_undo_start(struct snd_pcm_substream *substream, int state)
{
if (substream->runtime->trigger_master == substream)
substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
}
static void snd_pcm_post_start(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
runtime->hw_ptr_jiffies = jiffies;
runtime->hw_ptr_buffer_jiffies = (runtime->buffer_size * HZ) /
runtime->rate;
runtime->status->state = state;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
runtime->silence_size > 0)
snd_pcm_playback_silence(substream, ULONG_MAX);
if (substream->timer)
snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTART,
&runtime->trigger_tstamp);
}
static struct action_ops snd_pcm_action_start = {
.pre_action = snd_pcm_pre_start,
.do_action = snd_pcm_do_start,
.undo_action = snd_pcm_undo_start,
.post_action = snd_pcm_post_start
};
/**
* snd_pcm_start - start all linked streams
* @substream: the PCM substream instance
*
* Return: Zero if successful, or a negative error code.
*/
int snd_pcm_start(struct snd_pcm_substream *substream)
{
return snd_pcm_action(&snd_pcm_action_start, substream,
SNDRV_PCM_STATE_RUNNING);
}
/*
* stop callbacks
*/
static int snd_pcm_pre_stop(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
runtime->trigger_master = substream;
return 0;
}
static int snd_pcm_do_stop(struct snd_pcm_substream *substream, int state)
{
if (substream->runtime->trigger_master == substream &&
snd_pcm_running(substream))
substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP);
return 0; /* unconditonally stop all substreams */
}
static void snd_pcm_post_stop(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state != state) {
snd_pcm_trigger_tstamp(substream);
runtime->status->state = state;
if (substream->timer)
snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSTOP,
&runtime->trigger_tstamp);
}
wake_up(&runtime->sleep);
wake_up(&runtime->tsleep);
}
static struct action_ops snd_pcm_action_stop = {
.pre_action = snd_pcm_pre_stop,
.do_action = snd_pcm_do_stop,
.post_action = snd_pcm_post_stop
};
/**
* snd_pcm_stop - try to stop all running streams in the substream group
* @substream: the PCM substream instance
* @state: PCM state after stopping the stream
*
* The state of each stream is then changed to the given state unconditionally.
*
* Return: Zero if successful, or a negative error code.
*/
int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t state)
{
return snd_pcm_action(&snd_pcm_action_stop, substream, state);
}
EXPORT_SYMBOL(snd_pcm_stop);
/**
* snd_pcm_drain_done - stop the DMA only when the given stream is playback
* @substream: the PCM substream
*
* After stopping, the state is changed to SETUP.
* Unlike snd_pcm_stop(), this affects only the given stream.
*
* Return: Zero if succesful, or a negative error code.
*/
int snd_pcm_drain_done(struct snd_pcm_substream *substream)
{
return snd_pcm_action_single(&snd_pcm_action_stop, substream,
SNDRV_PCM_STATE_SETUP);
}
/**
* snd_pcm_stop_xrun - stop the running streams as XRUN
* @substream: the PCM substream instance
*
* This stops the given running substream (and all linked substreams) as XRUN.
* Unlike snd_pcm_stop(), this function takes the substream lock by itself.
*
* Return: Zero if successful, or a negative error code.
*/
int snd_pcm_stop_xrun(struct snd_pcm_substream *substream)
{
unsigned long flags;
int ret = 0;
snd_pcm_stream_lock_irqsave(substream, flags);
if (snd_pcm_running(substream))
ret = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
snd_pcm_stream_unlock_irqrestore(substream, flags);
return ret;
}
EXPORT_SYMBOL_GPL(snd_pcm_stop_xrun);
/*
* pause callbacks
*/
static int snd_pcm_pre_pause(struct snd_pcm_substream *substream, int push)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (!(runtime->info & SNDRV_PCM_INFO_PAUSE))
return -ENOSYS;
if (push) {
if (runtime->status->state != SNDRV_PCM_STATE_RUNNING)
return -EBADFD;
} else if (runtime->status->state != SNDRV_PCM_STATE_PAUSED)
return -EBADFD;
runtime->trigger_master = substream;
return 0;
}
static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push)
{
if (substream->runtime->trigger_master != substream)
return 0;
/* some drivers might use hw_ptr to recover from the pause -
update the hw_ptr now */
if (push)
snd_pcm_update_hw_ptr(substream);
/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
* a delta between the current jiffies, this gives a large enough
* delta, effectively to skip the check once.
*/
substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
return substream->ops->trigger(substream,
push ? SNDRV_PCM_TRIGGER_PAUSE_PUSH :
SNDRV_PCM_TRIGGER_PAUSE_RELEASE);
}
static void snd_pcm_undo_pause(struct snd_pcm_substream *substream, int push)
{
if (substream->runtime->trigger_master == substream)
substream->ops->trigger(substream,
push ? SNDRV_PCM_TRIGGER_PAUSE_RELEASE :
SNDRV_PCM_TRIGGER_PAUSE_PUSH);
}
static void snd_pcm_post_pause(struct snd_pcm_substream *substream, int push)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
if (push) {
runtime->status->state = SNDRV_PCM_STATE_PAUSED;
if (substream->timer)
snd_timer_notify(substream->timer,
SNDRV_TIMER_EVENT_MPAUSE,
&runtime->trigger_tstamp);
wake_up(&runtime->sleep);
wake_up(&runtime->tsleep);
} else {
runtime->status->state = SNDRV_PCM_STATE_RUNNING;
if (substream->timer)
snd_timer_notify(substream->timer,
SNDRV_TIMER_EVENT_MCONTINUE,
&runtime->trigger_tstamp);
}
}
static struct action_ops snd_pcm_action_pause = {
.pre_action = snd_pcm_pre_pause,
.do_action = snd_pcm_do_pause,
.undo_action = snd_pcm_undo_pause,
.post_action = snd_pcm_post_pause
};
/*
* Push/release the pause for all linked streams.
*/
static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
{
return snd_pcm_action(&snd_pcm_action_pause, substream, push);
}
#ifdef CONFIG_PM
/* suspend */
static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
return -EBUSY;
runtime->trigger_master = substream;
return 0;
}
static int snd_pcm_do_suspend(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->trigger_master != substream)
return 0;
if (! snd_pcm_running(substream))
return 0;
substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
return 0; /* suspend unconditionally */
}
static void snd_pcm_post_suspend(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
runtime->status->suspended_state = runtime->status->state;
runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
if (substream->timer)
snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSUSPEND,
&runtime->trigger_tstamp);
wake_up(&runtime->sleep);
wake_up(&runtime->tsleep);
}
static struct action_ops snd_pcm_action_suspend = {
.pre_action = snd_pcm_pre_suspend,
.do_action = snd_pcm_do_suspend,
.post_action = snd_pcm_post_suspend
};
/**
* snd_pcm_suspend - trigger SUSPEND to all linked streams
* @substream: the PCM substream
*
* After this call, all streams are changed to SUSPENDED state.
*
* Return: Zero if successful (or @substream is %NULL), or a negative error
* code.
*/
int snd_pcm_suspend(struct snd_pcm_substream *substream)
{
int err;
unsigned long flags;
if (! substream)
return 0;
snd_pcm_stream_lock_irqsave(substream, flags);
err = snd_pcm_action(&snd_pcm_action_suspend, substream, 0);
snd_pcm_stream_unlock_irqrestore(substream, flags);
return err;
}
EXPORT_SYMBOL(snd_pcm_suspend);
/**
* snd_pcm_suspend_all - trigger SUSPEND to all substreams in the given pcm
* @pcm: the PCM instance
*
* After this call, all streams are changed to SUSPENDED state.
*
* Return: Zero if successful (or @pcm is %NULL), or a negative error code.
*/
int snd_pcm_suspend_all(struct snd_pcm *pcm)
{
struct snd_pcm_substream *substream;
int stream, err = 0;
if (! pcm)
return 0;
for (stream = 0; stream < 2; stream++) {
for (substream = pcm->streams[stream].substream;
substream; substream = substream->next) {
/* FIXME: the open/close code should lock this as well */
if (substream->runtime == NULL)
continue;
err = snd_pcm_suspend(substream);
if (err < 0 && err != -EBUSY)
return err;
}
}
return 0;
}
EXPORT_SYMBOL(snd_pcm_suspend_all);
/* resume */
static int snd_pcm_pre_resume(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (!(runtime->info & SNDRV_PCM_INFO_RESUME))
return -ENOSYS;
runtime->trigger_master = substream;
return 0;
}
static int snd_pcm_do_resume(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->trigger_master != substream)
return 0;
/* DMA not running previously? */
if (runtime->status->suspended_state != SNDRV_PCM_STATE_RUNNING &&
(runtime->status->suspended_state != SNDRV_PCM_STATE_DRAINING ||
substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
return 0;
return substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_RESUME);
}
static void snd_pcm_undo_resume(struct snd_pcm_substream *substream, int state)
{
if (substream->runtime->trigger_master == substream &&
snd_pcm_running(substream))
substream->ops->trigger(substream, SNDRV_PCM_TRIGGER_SUSPEND);
}
static void snd_pcm_post_resume(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_trigger_tstamp(substream);
runtime->status->state = runtime->status->suspended_state;
if (substream->timer)
snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MRESUME,
&runtime->trigger_tstamp);
}
static struct action_ops snd_pcm_action_resume = {
.pre_action = snd_pcm_pre_resume,
.do_action = snd_pcm_do_resume,
.undo_action = snd_pcm_undo_resume,
.post_action = snd_pcm_post_resume
};
static int snd_pcm_resume(struct snd_pcm_substream *substream)
{
struct snd_card *card = substream->pcm->card;
int res;
snd_power_lock(card);
if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0)
res = snd_pcm_action_lock_irq(&snd_pcm_action_resume, substream, 0);
snd_power_unlock(card);
return res;
}
#else
static int snd_pcm_resume(struct snd_pcm_substream *substream)
{
return -ENOSYS;
}
#endif /* CONFIG_PM */
/*
* xrun ioctl
*
* Change the RUNNING stream(s) to XRUN state.
*/
static int snd_pcm_xrun(struct snd_pcm_substream *substream)
{
struct snd_card *card = substream->pcm->card;
struct snd_pcm_runtime *runtime = substream->runtime;
int result;
snd_power_lock(card);
if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result < 0)
goto _unlock;
}
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_XRUN:
result = 0; /* already there */
break;
case SNDRV_PCM_STATE_RUNNING:
result = snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
break;
default:
result = -EBADFD;
}
snd_pcm_stream_unlock_irq(substream);
_unlock:
snd_power_unlock(card);
return result;
}
/*
* reset ioctl
*/
static int snd_pcm_pre_reset(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
switch (runtime->status->state) {
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_PAUSED:
case SNDRV_PCM_STATE_SUSPENDED:
return 0;
default:
return -EBADFD;
}
}
static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err = substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_RESET, NULL);
if (err < 0)
return err;
runtime->hw_ptr_base = 0;
runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
runtime->status->hw_ptr % runtime->period_size;
runtime->silence_start = runtime->status->hw_ptr;
runtime->silence_filled = 0;
return 0;
}
static void snd_pcm_post_reset(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->control->appl_ptr = runtime->status->hw_ptr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
runtime->silence_size > 0)
snd_pcm_playback_silence(substream, ULONG_MAX);
}
static struct action_ops snd_pcm_action_reset = {
.pre_action = snd_pcm_pre_reset,
.do_action = snd_pcm_do_reset,
.post_action = snd_pcm_post_reset
};
static int snd_pcm_reset(struct snd_pcm_substream *substream)
{
return snd_pcm_action_nonatomic(&snd_pcm_action_reset, substream, 0);
}
/*
* prepare ioctl
*/
/* we use the second argument for updating f_flags */
static int snd_pcm_pre_prepare(struct snd_pcm_substream *substream,
int f_flags)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED)
return -EBADFD;
if (snd_pcm_running(substream))
return -EBUSY;
substream->f_flags = f_flags;
return 0;
}
static int snd_pcm_do_prepare(struct snd_pcm_substream *substream, int state)
{
int err;
err = substream->ops->prepare(substream);
if (err < 0)
return err;
return snd_pcm_do_reset(substream, 0);
}
static void snd_pcm_post_prepare(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
runtime->control->appl_ptr = runtime->status->hw_ptr;
snd_pcm_set_state(substream, SNDRV_PCM_STATE_PREPARED);
}
static struct action_ops snd_pcm_action_prepare = {
.pre_action = snd_pcm_pre_prepare,
.do_action = snd_pcm_do_prepare,
.post_action = snd_pcm_post_prepare
};
/**
* snd_pcm_prepare - prepare the PCM substream to be triggerable
* @substream: the PCM substream instance
* @file: file to refer f_flags
*
* Return: Zero if successful, or a negative error code.
*/
static int snd_pcm_prepare(struct snd_pcm_substream *substream,
struct file *file)
{
int res;
struct snd_card *card = substream->pcm->card;
int f_flags;
if (file)
f_flags = file->f_flags;
else
f_flags = substream->f_flags;
snd_power_lock(card);
if ((res = snd_power_wait(card, SNDRV_CTL_POWER_D0)) >= 0)
res = snd_pcm_action_nonatomic(&snd_pcm_action_prepare,
substream, f_flags);
snd_power_unlock(card);
return res;
}
/*
* drain ioctl
*/
static int snd_pcm_pre_drain_init(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
switch (runtime->status->state) {
case SNDRV_PCM_STATE_OPEN:
case SNDRV_PCM_STATE_DISCONNECTED:
case SNDRV_PCM_STATE_SUSPENDED:
return -EBADFD;
}
runtime->trigger_master = substream;
return 0;
}
static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
switch (runtime->status->state) {
case SNDRV_PCM_STATE_PREPARED:
/* start playback stream if possible */
if (! snd_pcm_playback_empty(substream)) {
snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
}
break;
case SNDRV_PCM_STATE_RUNNING:
runtime->status->state = SNDRV_PCM_STATE_DRAINING;
break;
case SNDRV_PCM_STATE_XRUN:
runtime->status->state = SNDRV_PCM_STATE_SETUP;
break;
default:
break;
}
} else {
/* stop running stream */
if (runtime->status->state == SNDRV_PCM_STATE_RUNNING) {
int new_state = snd_pcm_capture_avail(runtime) > 0 ?
SNDRV_PCM_STATE_DRAINING : SNDRV_PCM_STATE_SETUP;
snd_pcm_do_stop(substream, new_state);
snd_pcm_post_stop(substream, new_state);
}
}
return 0;
}
static void snd_pcm_post_drain_init(struct snd_pcm_substream *substream, int state)
{
}
static struct action_ops snd_pcm_action_drain_init = {
.pre_action = snd_pcm_pre_drain_init,
.do_action = snd_pcm_do_drain_init,
.post_action = snd_pcm_post_drain_init
};
static int snd_pcm_drop(struct snd_pcm_substream *substream);
/*
* Drain the stream(s).
* When the substream is linked, sync until the draining of all playback streams
* is finished.
* After this call, all streams are supposed to be either SETUP or DRAINING
* (capture only) state.
*/
static int snd_pcm_drain(struct snd_pcm_substream *substream,
struct file *file)
{
struct snd_card *card;
struct snd_pcm_runtime *runtime;
struct snd_pcm_substream *s;
wait_queue_t wait;
int result = 0;
int nonblock = 0;
card = substream->pcm->card;
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
snd_power_lock(card);
if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
result = snd_power_wait(card, SNDRV_CTL_POWER_D0);
if (result < 0) {
snd_power_unlock(card);
return result;
}
}
if (file) {
if (file->f_flags & O_NONBLOCK)
nonblock = 1;
} else if (substream->f_flags & O_NONBLOCK)
nonblock = 1;
down_read(&snd_pcm_link_rwsem);
snd_pcm_stream_lock_irq(substream);
/* resume pause */
if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
snd_pcm_pause(substream, 0);
/* pre-start/stop - all running streams are changed to DRAINING state */
result = snd_pcm_action(&snd_pcm_action_drain_init, substream, 0);
if (result < 0)
goto unlock;
/* in non-blocking, we don't wait in ioctl but let caller poll */
if (nonblock) {
result = -EAGAIN;
goto unlock;
}
for (;;) {
long tout;
struct snd_pcm_runtime *to_check;
if (signal_pending(current)) {
result = -ERESTARTSYS;
break;
}
/* find a substream to drain */
to_check = NULL;
snd_pcm_group_for_each_entry(s, substream) {
if (s->stream != SNDRV_PCM_STREAM_PLAYBACK)
continue;
runtime = s->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
to_check = runtime;
break;
}
}
if (!to_check)
break; /* all drained */
init_waitqueue_entry(&wait, current);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream);
up_read(&snd_pcm_link_rwsem);
snd_power_unlock(card);
if (runtime->no_period_wakeup)
tout = MAX_SCHEDULE_TIMEOUT;
else {
tout = 10;
if (runtime->rate) {
long t = runtime->period_size * 2 / runtime->rate;
tout = max(t, tout);
}
tout = msecs_to_jiffies(tout * 1000);
}
tout = schedule_timeout_interruptible(tout);
snd_power_lock(card);
down_read(&snd_pcm_link_rwsem);
snd_pcm_stream_lock_irq(substream);
remove_wait_queue(&to_check->sleep, &wait);
if (card->shutdown) {
result = -ENODEV;
break;
}
if (tout == 0) {
if (substream->runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
result = -ESTRPIPE;
else {
dev_dbg(substream->pcm->card->dev,
"playback drain error (DMA or IRQ trouble?)\n");
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
result = -EIO;
}
break;
}
}
unlock:
snd_pcm_stream_unlock_irq(substream);
up_read(&snd_pcm_link_rwsem);
snd_power_unlock(card);
return result;
}
/*
* drop ioctl
*
* Immediately put all linked substreams into SETUP state.
*/
static int snd_pcm_drop(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime;
int result = 0;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED ||
runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
return -EBADFD;
snd_pcm_stream_lock_irq(substream);
/* resume pause */
if (runtime->status->state == SNDRV_PCM_STATE_PAUSED)
snd_pcm_pause(substream, 0);
snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
/* runtime->control->appl_ptr = runtime->status->hw_ptr; */
snd_pcm_stream_unlock_irq(substream);
return result;
}
static bool is_pcm_file(struct file *file)
{
struct inode *inode = file_inode(file);
unsigned int minor;
if (!S_ISCHR(inode->i_mode) || imajor(inode) != snd_major)
return false;
minor = iminor(inode);
return snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_PLAYBACK) ||
snd_lookup_minor_data(minor, SNDRV_DEVICE_TYPE_PCM_CAPTURE);
}
/*
* PCM link handling
*/
static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
{
int res = 0;
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream1;
struct snd_pcm_group *group;
struct fd f = fdget(fd);
if (!f.file)
return -EBADFD;
if (!is_pcm_file(f.file)) {
res = -EBADFD;
goto _badf;
}
pcm_file = f.file->private_data;
substream1 = pcm_file->substream;
group = kmalloc(sizeof(*group), GFP_KERNEL);
if (!group) {
res = -ENOMEM;
goto _nolock;
}
down_write(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
substream->runtime->status->state != substream1->runtime->status->state ||
substream->pcm->nonatomic != substream1->pcm->nonatomic) {
res = -EBADFD;
goto _end;
}
if (snd_pcm_stream_linked(substream1)) {
res = -EALREADY;
goto _end;
}
if (!snd_pcm_stream_linked(substream)) {
substream->group = group;
group = NULL;
spin_lock_init(&substream->group->lock);
mutex_init(&substream->group->mutex);
INIT_LIST_HEAD(&substream->group->substreams);
list_add_tail(&substream->link_list, &substream->group->substreams);
substream->group->count = 1;
}
list_add_tail(&substream1->link_list, &substream->group->substreams);
substream->group->count++;
substream1->group = substream->group;
_end:
write_unlock_irq(&snd_pcm_link_rwlock);
up_write(&snd_pcm_link_rwsem);
_nolock:
snd_card_unref(substream1->pcm->card);
kfree(group);
_badf:
fdput(f);
return res;
}
static void relink_to_local(struct snd_pcm_substream *substream)
{
substream->group = &substream->self_group;
INIT_LIST_HEAD(&substream->self_group.substreams);
list_add_tail(&substream->link_list, &substream->self_group.substreams);
}
static int snd_pcm_unlink(struct snd_pcm_substream *substream)
{
struct snd_pcm_substream *s;
int res = 0;
down_write(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (!snd_pcm_stream_linked(substream)) {
res = -EALREADY;
goto _end;
}
list_del(&substream->link_list);
substream->group->count--;
if (substream->group->count == 1) { /* detach the last stream, too */
snd_pcm_group_for_each_entry(s, substream) {
relink_to_local(s);
break;
}
kfree(substream->group);
}
relink_to_local(substream);
_end:
write_unlock_irq(&snd_pcm_link_rwlock);
up_write(&snd_pcm_link_rwsem);
return res;
}
/*
* hw configurator
*/
static int snd_pcm_hw_rule_mul(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
snd_interval_mul(hw_param_interval_c(params, rule->deps[0]),
hw_param_interval_c(params, rule->deps[1]), &t);
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
static int snd_pcm_hw_rule_div(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
snd_interval_div(hw_param_interval_c(params, rule->deps[0]),
hw_param_interval_c(params, rule->deps[1]), &t);
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
static int snd_pcm_hw_rule_muldivk(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
snd_interval_muldivk(hw_param_interval_c(params, rule->deps[0]),
hw_param_interval_c(params, rule->deps[1]),
(unsigned long) rule->private, &t);
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
static int snd_pcm_hw_rule_mulkdiv(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
snd_interval_mulkdiv(hw_param_interval_c(params, rule->deps[0]),
(unsigned long) rule->private,
hw_param_interval_c(params, rule->deps[1]), &t);
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
static int snd_pcm_hw_rule_format(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
unsigned int k;
struct snd_interval *i = hw_param_interval(params, rule->deps[0]);
struct snd_mask m;
struct snd_mask *mask = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
snd_mask_any(&m);
for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
int bits;
if (! snd_mask_test(mask, k))
continue;
bits = snd_pcm_format_physical_width(k);
if (bits <= 0)
continue; /* ignore invalid formats */
if ((unsigned)bits < i->min || (unsigned)bits > i->max)
snd_mask_reset(&m, k);
}
return snd_mask_refine(mask, &m);
}
static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
unsigned int k;
t.min = UINT_MAX;
t.max = 0;
t.openmin = 0;
t.openmax = 0;
for (k = 0; k <= SNDRV_PCM_FORMAT_LAST; ++k) {
int bits;
if (! snd_mask_test(hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT), k))
continue;
bits = snd_pcm_format_physical_width(k);
if (bits <= 0)
continue; /* ignore invalid formats */
if (t.min > (unsigned)bits)
t.min = bits;
if (t.max < (unsigned)bits)
t.max = bits;
}
t.integer = 1;
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
#if SNDRV_PCM_RATE_5512 != 1 << 0 || SNDRV_PCM_RATE_192000 != 1 << 12
#error "Change this table"
#endif
static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100,
48000, 64000, 88200, 96000, 176400, 192000 };
const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
.count = ARRAY_SIZE(rates),
.list = rates,
};
static int snd_pcm_hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_pcm_hardware *hw = rule->private;
return snd_interval_list(hw_param_interval(params, rule->var),
snd_pcm_known_rates.count,
snd_pcm_known_rates.list, hw->rates);
}
static int snd_pcm_hw_rule_buffer_bytes_max(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
struct snd_interval t;
struct snd_pcm_substream *substream = rule->private;
t.min = 0;
t.max = substream->buffer_bytes_max;
t.openmin = 0;
t.openmax = 0;
t.integer = 1;
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
}
int snd_pcm_hw_constraints_init(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hw_constraints *constrs = &runtime->hw_constraints;
int k, err;
for (k = SNDRV_PCM_HW_PARAM_FIRST_MASK; k <= SNDRV_PCM_HW_PARAM_LAST_MASK; k++) {
snd_mask_any(constrs_mask(constrs, k));
}
for (k = SNDRV_PCM_HW_PARAM_FIRST_INTERVAL; k <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; k++) {
snd_interval_any(constrs_interval(constrs, k));
}
snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_CHANNELS));
snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_SIZE));
snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_BUFFER_BYTES));
snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_SAMPLE_BITS));
snd_interval_setinteger(constrs_interval(constrs, SNDRV_PCM_HW_PARAM_FRAME_BITS));
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
snd_pcm_hw_rule_format, NULL,
SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
snd_pcm_hw_rule_sample_bits, NULL,
SNDRV_PCM_HW_PARAM_FORMAT,
SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
snd_pcm_hw_rule_div, NULL,
SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
snd_pcm_hw_rule_mul, NULL,
SNDRV_PCM_HW_PARAM_SAMPLE_BITS, SNDRV_PCM_HW_PARAM_CHANNELS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
snd_pcm_hw_rule_mulkdiv, (void*) 8,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FRAME_BITS,
snd_pcm_hw_rule_mulkdiv, (void*) 8,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
snd_pcm_hw_rule_div, NULL,
SNDRV_PCM_HW_PARAM_FRAME_BITS, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_TIME, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_BUFFER_TIME, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
snd_pcm_hw_rule_div, NULL,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
snd_pcm_hw_rule_div, NULL,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
snd_pcm_hw_rule_mulkdiv, (void*) 8,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
snd_pcm_hw_rule_muldivk, (void*) 1000000,
SNDRV_PCM_HW_PARAM_PERIOD_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
snd_pcm_hw_rule_mul, NULL,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_PERIODS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
snd_pcm_hw_rule_mulkdiv, (void*) 8,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
snd_pcm_hw_rule_muldivk, (void*) 1000000,
SNDRV_PCM_HW_PARAM_BUFFER_TIME, SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
snd_pcm_hw_rule_muldivk, (void*) 8,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
snd_pcm_hw_rule_muldivk, (void*) 8,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_FRAME_BITS, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_TIME,
snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
snd_pcm_hw_rule_mulkdiv, (void*) 1000000,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE, SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
return err;
return 0;
}
int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hardware *hw = &runtime->hw;
int err;
unsigned int mask = 0;
if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED;
if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED;
if (hw_support_mmap(substream)) {
if (hw->info & SNDRV_PCM_INFO_INTERLEAVED)
mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED;
if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED)
mask |= 1 << SNDRV_PCM_ACCESS_MMAP_NONINTERLEAVED;
if (hw->info & SNDRV_PCM_INFO_COMPLEX)
mask |= 1 << SNDRV_PCM_ACCESS_MMAP_COMPLEX;
}
err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_ACCESS, mask);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_mask64(runtime, SNDRV_PCM_HW_PARAM_FORMAT, hw->formats);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_mask(runtime, SNDRV_PCM_HW_PARAM_SUBFORMAT, 1 << SNDRV_PCM_SUBFORMAT_STD);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_CHANNELS,
hw->channels_min, hw->channels_max);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
hw->rate_min, hw->rate_max);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
hw->period_bytes_min, hw->period_bytes_max);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
hw->periods_min, hw->periods_max);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
hw->period_bytes_min, hw->buffer_bytes_max);
if (err < 0)
return err;
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
snd_pcm_hw_rule_buffer_bytes_max, substream,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, -1);
if (err < 0)
return err;
/* FIXME: remove */
if (runtime->dma_bytes) {
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, runtime->dma_bytes);
if (err < 0)
return err;
}
if (!(hw->rates & (SNDRV_PCM_RATE_KNOT | SNDRV_PCM_RATE_CONTINUOUS))) {
err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
snd_pcm_hw_rule_rate, hw,
SNDRV_PCM_HW_PARAM_RATE, -1);
if (err < 0)
return err;
}
/* FIXME: this belong to lowlevel */
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
return 0;
}
static void pcm_release_private(struct snd_pcm_substream *substream)
{
snd_pcm_unlink(substream);
}
void snd_pcm_release_substream(struct snd_pcm_substream *substream)
{
substream->ref_count--;
if (substream->ref_count > 0)
return;
snd_pcm_drop(substream);
if (substream->hw_opened) {
if (substream->ops->hw_free != NULL)
substream->ops->hw_free(substream);
substream->ops->close(substream);
substream->hw_opened = 0;
}
if (pm_qos_request_active(&substream->latency_pm_qos_req))
pm_qos_remove_request(&substream->latency_pm_qos_req);
if (substream->pcm_release) {
substream->pcm_release(substream);
substream->pcm_release = NULL;
}
snd_pcm_detach_substream(substream);
}
EXPORT_SYMBOL(snd_pcm_release_substream);
int snd_pcm_open_substream(struct snd_pcm *pcm, int stream,
struct file *file,
struct snd_pcm_substream **rsubstream)
{
struct snd_pcm_substream *substream;
int err;
err = snd_pcm_attach_substream(pcm, stream, file, &substream);
if (err < 0)
return err;
if (substream->ref_count > 1) {
*rsubstream = substream;
return 0;
}
err = snd_pcm_hw_constraints_init(substream);
if (err < 0) {
pcm_dbg(pcm, "snd_pcm_hw_constraints_init failed\n");
goto error;
}
if ((err = substream->ops->open(substream)) < 0)
goto error;
substream->hw_opened = 1;
err = snd_pcm_hw_constraints_complete(substream);
if (err < 0) {
pcm_dbg(pcm, "snd_pcm_hw_constraints_complete failed\n");
goto error;
}
*rsubstream = substream;
return 0;
error:
snd_pcm_release_substream(substream);
return err;
}
EXPORT_SYMBOL(snd_pcm_open_substream);
static int snd_pcm_open_file(struct file *file,
struct snd_pcm *pcm,
int stream)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
int err;
err = snd_pcm_open_substream(pcm, stream, file, &substream);
if (err < 0)
return err;
pcm_file = kzalloc(sizeof(*pcm_file), GFP_KERNEL);
if (pcm_file == NULL) {
snd_pcm_release_substream(substream);
return -ENOMEM;
}
pcm_file->substream = substream;
if (substream->ref_count == 1) {
substream->file = pcm_file;
substream->pcm_release = pcm_release_private;
}
file->private_data = pcm_file;
return 0;
}
static int snd_pcm_playback_open(struct inode *inode, struct file *file)
{
struct snd_pcm *pcm;
int err = nonseekable_open(inode, file);
if (err < 0)
return err;
pcm = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_PCM_PLAYBACK);
err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_PLAYBACK);
if (pcm)
snd_card_unref(pcm->card);
return err;
}
static int snd_pcm_capture_open(struct inode *inode, struct file *file)
{
struct snd_pcm *pcm;
int err = nonseekable_open(inode, file);
if (err < 0)
return err;
pcm = snd_lookup_minor_data(iminor(inode),
SNDRV_DEVICE_TYPE_PCM_CAPTURE);
err = snd_pcm_open(file, pcm, SNDRV_PCM_STREAM_CAPTURE);
if (pcm)
snd_card_unref(pcm->card);
return err;
}
static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream)
{
int err;
wait_queue_t wait;
if (pcm == NULL) {
err = -ENODEV;
goto __error1;
}
err = snd_card_file_add(pcm->card, file);
if (err < 0)
goto __error1;
if (!try_module_get(pcm->card->module)) {
err = -EFAULT;
goto __error2;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&pcm->open_wait, &wait);
mutex_lock(&pcm->open_mutex);
while (1) {
err = snd_pcm_open_file(file, pcm, stream);
if (err >= 0)
break;
if (err == -EAGAIN) {
if (file->f_flags & O_NONBLOCK) {
err = -EBUSY;
break;
}
} else
break;
set_current_state(TASK_INTERRUPTIBLE);
mutex_unlock(&pcm->open_mutex);
schedule();
mutex_lock(&pcm->open_mutex);
if (pcm->card->shutdown) {
err = -ENODEV;
break;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
}
}
remove_wait_queue(&pcm->open_wait, &wait);
mutex_unlock(&pcm->open_mutex);
if (err < 0)
goto __error;
return err;
__error:
module_put(pcm->card->module);
__error2:
snd_card_file_remove(pcm->card, file);
__error1:
return err;
}
static int snd_pcm_release(struct inode *inode, struct file *file)
{
struct snd_pcm *pcm;
struct snd_pcm_substream *substream;
struct snd_pcm_file *pcm_file;
pcm_file = file->private_data;
substream = pcm_file->substream;
if (snd_BUG_ON(!substream))
return -ENXIO;
pcm = substream->pcm;
mutex_lock(&pcm->open_mutex);
snd_pcm_release_substream(substream);
kfree(pcm_file);
mutex_unlock(&pcm->open_mutex);
wake_up(&pcm->open_wait);
module_put(pcm->card->module);
snd_card_file_remove(pcm->card, file);
return 0;
}
static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_sframes_t appl_ptr;
snd_pcm_sframes_t ret;
snd_pcm_sframes_t hw_avail;
if (frames == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_PREPARED:
break;
case SNDRV_PCM_STATE_DRAINING:
case SNDRV_PCM_STATE_RUNNING:
if (snd_pcm_update_hw_ptr(substream) >= 0)
break;
/* Fall through */
case SNDRV_PCM_STATE_XRUN:
ret = -EPIPE;
goto __end;
case SNDRV_PCM_STATE_SUSPENDED:
ret = -ESTRPIPE;
goto __end;
default:
ret = -EBADFD;
goto __end;
}
hw_avail = snd_pcm_playback_hw_avail(runtime);
if (hw_avail <= 0) {
ret = 0;
goto __end;
}
if (frames > (snd_pcm_uframes_t)hw_avail)
frames = hw_avail;
appl_ptr = runtime->control->appl_ptr - frames;
if (appl_ptr < 0)
appl_ptr += runtime->boundary;
runtime->control->appl_ptr = appl_ptr;
ret = frames;
__end:
snd_pcm_stream_unlock_irq(substream);
return ret;
}
static snd_pcm_sframes_t snd_pcm_capture_rewind(struct snd_pcm_substream *substream,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_sframes_t appl_ptr;
snd_pcm_sframes_t ret;
snd_pcm_sframes_t hw_avail;
if (frames == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_DRAINING:
break;
case SNDRV_PCM_STATE_RUNNING:
if (snd_pcm_update_hw_ptr(substream) >= 0)
break;
/* Fall through */
case SNDRV_PCM_STATE_XRUN:
ret = -EPIPE;
goto __end;
case SNDRV_PCM_STATE_SUSPENDED:
ret = -ESTRPIPE;
goto __end;
default:
ret = -EBADFD;
goto __end;
}
hw_avail = snd_pcm_capture_hw_avail(runtime);
if (hw_avail <= 0) {
ret = 0;
goto __end;
}
if (frames > (snd_pcm_uframes_t)hw_avail)
frames = hw_avail;
appl_ptr = runtime->control->appl_ptr - frames;
if (appl_ptr < 0)
appl_ptr += runtime->boundary;
runtime->control->appl_ptr = appl_ptr;
ret = frames;
__end:
snd_pcm_stream_unlock_irq(substream);
return ret;
}
static snd_pcm_sframes_t snd_pcm_playback_forward(struct snd_pcm_substream *substream,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_sframes_t appl_ptr;
snd_pcm_sframes_t ret;
snd_pcm_sframes_t avail;
if (frames == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_PAUSED:
break;
case SNDRV_PCM_STATE_DRAINING:
case SNDRV_PCM_STATE_RUNNING:
if (snd_pcm_update_hw_ptr(substream) >= 0)
break;
/* Fall through */
case SNDRV_PCM_STATE_XRUN:
ret = -EPIPE;
goto __end;
case SNDRV_PCM_STATE_SUSPENDED:
ret = -ESTRPIPE;
goto __end;
default:
ret = -EBADFD;
goto __end;
}
avail = snd_pcm_playback_avail(runtime);
if (avail <= 0) {
ret = 0;
goto __end;
}
if (frames > (snd_pcm_uframes_t)avail)
frames = avail;
appl_ptr = runtime->control->appl_ptr + frames;
if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
appl_ptr -= runtime->boundary;
runtime->control->appl_ptr = appl_ptr;
ret = frames;
__end:
snd_pcm_stream_unlock_irq(substream);
return ret;
}
static snd_pcm_sframes_t snd_pcm_capture_forward(struct snd_pcm_substream *substream,
snd_pcm_uframes_t frames)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_sframes_t appl_ptr;
snd_pcm_sframes_t ret;
snd_pcm_sframes_t avail;
if (frames == 0)
return 0;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_DRAINING:
case SNDRV_PCM_STATE_PAUSED:
break;
case SNDRV_PCM_STATE_RUNNING:
if (snd_pcm_update_hw_ptr(substream) >= 0)
break;
/* Fall through */
case SNDRV_PCM_STATE_XRUN:
ret = -EPIPE;
goto __end;
case SNDRV_PCM_STATE_SUSPENDED:
ret = -ESTRPIPE;
goto __end;
default:
ret = -EBADFD;
goto __end;
}
avail = snd_pcm_capture_avail(runtime);
if (avail <= 0) {
ret = 0;
goto __end;
}
if (frames > (snd_pcm_uframes_t)avail)
frames = avail;
appl_ptr = runtime->control->appl_ptr + frames;
if (appl_ptr >= (snd_pcm_sframes_t)runtime->boundary)
appl_ptr -= runtime->boundary;
runtime->control->appl_ptr = appl_ptr;
ret = frames;
__end:
snd_pcm_stream_unlock_irq(substream);
return ret;
}
static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_DRAINING:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
goto __badfd;
/* Fall through */
case SNDRV_PCM_STATE_RUNNING:
if ((err = snd_pcm_update_hw_ptr(substream)) < 0)
break;
/* Fall through */
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_SUSPENDED:
err = 0;
break;
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
break;
default:
__badfd:
err = -EBADFD;
break;
}
snd_pcm_stream_unlock_irq(substream);
return err;
}
static int snd_pcm_delay(struct snd_pcm_substream *substream,
snd_pcm_sframes_t __user *res)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
snd_pcm_sframes_t n = 0;
snd_pcm_stream_lock_irq(substream);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_DRAINING:
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
goto __badfd;
/* Fall through */
case SNDRV_PCM_STATE_RUNNING:
if ((err = snd_pcm_update_hw_ptr(substream)) < 0)
break;
/* Fall through */
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_SUSPENDED:
err = 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
n = snd_pcm_playback_hw_avail(runtime);
else
n = snd_pcm_capture_avail(runtime);
n += runtime->delay;
break;
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
break;
default:
__badfd:
err = -EBADFD;
break;
}
snd_pcm_stream_unlock_irq(substream);
if (!err)
if (put_user(n, res))
err = -EFAULT;
return err;
}
static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
struct snd_pcm_sync_ptr __user *_sync_ptr)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_sync_ptr sync_ptr;
volatile struct snd_pcm_mmap_status *status;
volatile struct snd_pcm_mmap_control *control;
int err;
memset(&sync_ptr, 0, sizeof(sync_ptr));
if (get_user(sync_ptr.flags, (unsigned __user *)&(_sync_ptr->flags)))
return -EFAULT;
if (copy_from_user(&sync_ptr.c.control, &(_sync_ptr->c.control), sizeof(struct snd_pcm_mmap_control)))
return -EFAULT;
status = runtime->status;
control = runtime->control;
if (sync_ptr.flags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
err = snd_pcm_hwsync(substream);
if (err < 0)
return err;
}
snd_pcm_stream_lock_irq(substream);
if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_APPL))
control->appl_ptr = sync_ptr.c.control.appl_ptr;
else
sync_ptr.c.control.appl_ptr = control->appl_ptr;
if (!(sync_ptr.flags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
control->avail_min = sync_ptr.c.control.avail_min;
else
sync_ptr.c.control.avail_min = control->avail_min;
sync_ptr.s.status.state = status->state;
sync_ptr.s.status.hw_ptr = status->hw_ptr;
sync_ptr.s.status.tstamp = status->tstamp;
sync_ptr.s.status.suspended_state = status->suspended_state;
snd_pcm_stream_unlock_irq(substream);
if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
return -EFAULT;
return 0;
}
static int snd_pcm_tstamp(struct snd_pcm_substream *substream, int __user *_arg)
{
struct snd_pcm_runtime *runtime = substream->runtime;
int arg;
if (get_user(arg, _arg))
return -EFAULT;
if (arg < 0 || arg > SNDRV_PCM_TSTAMP_TYPE_LAST)
return -EINVAL;
runtime->tstamp_type = arg;
return 0;
}
static int snd_pcm_common_ioctl1(struct file *file,
struct snd_pcm_substream *substream,
unsigned int cmd, void __user *arg)
{
switch (cmd) {
case SNDRV_PCM_IOCTL_PVERSION:
return put_user(SNDRV_PCM_VERSION, (int __user *)arg) ? -EFAULT : 0;
case SNDRV_PCM_IOCTL_INFO:
return snd_pcm_info_user(substream, arg);
case SNDRV_PCM_IOCTL_TSTAMP: /* just for compatibility */
return 0;
case SNDRV_PCM_IOCTL_TTSTAMP:
return snd_pcm_tstamp(substream, arg);
case SNDRV_PCM_IOCTL_HW_REFINE:
return snd_pcm_hw_refine_user(substream, arg);
case SNDRV_PCM_IOCTL_HW_PARAMS:
return snd_pcm_hw_params_user(substream, arg);
case SNDRV_PCM_IOCTL_HW_FREE:
return snd_pcm_hw_free(substream);
case SNDRV_PCM_IOCTL_SW_PARAMS:
return snd_pcm_sw_params_user(substream, arg);
case SNDRV_PCM_IOCTL_STATUS:
return snd_pcm_status_user(substream, arg);
case SNDRV_PCM_IOCTL_CHANNEL_INFO:
return snd_pcm_channel_info_user(substream, arg);
case SNDRV_PCM_IOCTL_PREPARE:
return snd_pcm_prepare(substream, file);
case SNDRV_PCM_IOCTL_RESET:
return snd_pcm_reset(substream);
case SNDRV_PCM_IOCTL_START:
return snd_pcm_action_lock_irq(&snd_pcm_action_start, substream, SNDRV_PCM_STATE_RUNNING);
case SNDRV_PCM_IOCTL_LINK:
return snd_pcm_link(substream, (int)(unsigned long) arg);
case SNDRV_PCM_IOCTL_UNLINK:
return snd_pcm_unlink(substream);
case SNDRV_PCM_IOCTL_RESUME:
return snd_pcm_resume(substream);
case SNDRV_PCM_IOCTL_XRUN:
return snd_pcm_xrun(substream);
case SNDRV_PCM_IOCTL_HWSYNC:
return snd_pcm_hwsync(substream);
case SNDRV_PCM_IOCTL_DELAY:
return snd_pcm_delay(substream, arg);
case SNDRV_PCM_IOCTL_SYNC_PTR:
return snd_pcm_sync_ptr(substream, arg);
#ifdef CONFIG_SND_SUPPORT_OLD_API
case SNDRV_PCM_IOCTL_HW_REFINE_OLD:
return snd_pcm_hw_refine_old_user(substream, arg);
case SNDRV_PCM_IOCTL_HW_PARAMS_OLD:
return snd_pcm_hw_params_old_user(substream, arg);
#endif
case SNDRV_PCM_IOCTL_DRAIN:
return snd_pcm_drain(substream, file);
case SNDRV_PCM_IOCTL_DROP:
return snd_pcm_drop(substream);
case SNDRV_PCM_IOCTL_PAUSE:
{
int res;
snd_pcm_stream_lock_irq(substream);
res = snd_pcm_pause(substream, (int)(unsigned long)arg);
snd_pcm_stream_unlock_irq(substream);
return res;
}
}
pcm_dbg(substream->pcm, "unknown ioctl = 0x%x\n", cmd);
return -ENOTTY;
}
static int snd_pcm_playback_ioctl1(struct file *file,
struct snd_pcm_substream *substream,
unsigned int cmd, void __user *arg)
{
if (snd_BUG_ON(!substream))
return -ENXIO;
if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_PLAYBACK))
return -EINVAL;
switch (cmd) {
case SNDRV_PCM_IOCTL_WRITEI_FRAMES:
{
struct snd_xferi xferi;
struct snd_xferi __user *_xferi = arg;
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_sframes_t result;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (put_user(0, &_xferi->result))
return -EFAULT;
if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
return -EFAULT;
result = snd_pcm_lib_write(substream, xferi.buf, xferi.frames);
__put_user(result, &_xferi->result);
return result < 0 ? result : 0;
}
case SNDRV_PCM_IOCTL_WRITEN_FRAMES:
{
struct snd_xfern xfern;
struct snd_xfern __user *_xfern = arg;
struct snd_pcm_runtime *runtime = substream->runtime;
void __user **bufs;
snd_pcm_sframes_t result;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (runtime->channels > 128)
return -EINVAL;
if (put_user(0, &_xfern->result))
return -EFAULT;
if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
return -EFAULT;
bufs = memdup_user(xfern.bufs,
sizeof(void *) * runtime->channels);
if (IS_ERR(bufs))
return PTR_ERR(bufs);
result = snd_pcm_lib_writev(substream, bufs, xfern.frames);
kfree(bufs);
__put_user(result, &_xfern->result);
return result < 0 ? result : 0;
}
case SNDRV_PCM_IOCTL_REWIND:
{
snd_pcm_uframes_t frames;
snd_pcm_uframes_t __user *_frames = arg;
snd_pcm_sframes_t result;
if (get_user(frames, _frames))
return -EFAULT;
if (put_user(0, _frames))
return -EFAULT;
result = snd_pcm_playback_rewind(substream, frames);
__put_user(result, _frames);
return result < 0 ? result : 0;
}
case SNDRV_PCM_IOCTL_FORWARD:
{
snd_pcm_uframes_t frames;
snd_pcm_uframes_t __user *_frames = arg;
snd_pcm_sframes_t result;
if (get_user(frames, _frames))
return -EFAULT;
if (put_user(0, _frames))
return -EFAULT;
result = snd_pcm_playback_forward(substream, frames);
__put_user(result, _frames);
return result < 0 ? result : 0;
}
}
return snd_pcm_common_ioctl1(file, substream, cmd, arg);
}
static int snd_pcm_capture_ioctl1(struct file *file,
struct snd_pcm_substream *substream,
unsigned int cmd, void __user *arg)
{
if (snd_BUG_ON(!substream))
return -ENXIO;
if (snd_BUG_ON(substream->stream != SNDRV_PCM_STREAM_CAPTURE))
return -EINVAL;
switch (cmd) {
case SNDRV_PCM_IOCTL_READI_FRAMES:
{
struct snd_xferi xferi;
struct snd_xferi __user *_xferi = arg;
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_sframes_t result;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (put_user(0, &_xferi->result))
return -EFAULT;
if (copy_from_user(&xferi, _xferi, sizeof(xferi)))
return -EFAULT;
result = snd_pcm_lib_read(substream, xferi.buf, xferi.frames);
__put_user(result, &_xferi->result);
return result < 0 ? result : 0;
}
case SNDRV_PCM_IOCTL_READN_FRAMES:
{
struct snd_xfern xfern;
struct snd_xfern __user *_xfern = arg;
struct snd_pcm_runtime *runtime = substream->runtime;
void *bufs;
snd_pcm_sframes_t result;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (runtime->channels > 128)
return -EINVAL;
if (put_user(0, &_xfern->result))
return -EFAULT;
if (copy_from_user(&xfern, _xfern, sizeof(xfern)))
return -EFAULT;
bufs = memdup_user(xfern.bufs,
sizeof(void *) * runtime->channels);
if (IS_ERR(bufs))
return PTR_ERR(bufs);
result = snd_pcm_lib_readv(substream, bufs, xfern.frames);
kfree(bufs);
__put_user(result, &_xfern->result);
return result < 0 ? result : 0;
}
case SNDRV_PCM_IOCTL_REWIND:
{
snd_pcm_uframes_t frames;
snd_pcm_uframes_t __user *_frames = arg;
snd_pcm_sframes_t result;
if (get_user(frames, _frames))
return -EFAULT;
if (put_user(0, _frames))
return -EFAULT;
result = snd_pcm_capture_rewind(substream, frames);
__put_user(result, _frames);
return result < 0 ? result : 0;
}
case SNDRV_PCM_IOCTL_FORWARD:
{
snd_pcm_uframes_t frames;
snd_pcm_uframes_t __user *_frames = arg;
snd_pcm_sframes_t result;
if (get_user(frames, _frames))
return -EFAULT;
if (put_user(0, _frames))
return -EFAULT;
result = snd_pcm_capture_forward(substream, frames);
__put_user(result, _frames);
return result < 0 ? result : 0;
}
}
return snd_pcm_common_ioctl1(file, substream, cmd, arg);
}
static long snd_pcm_playback_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_pcm_file *pcm_file;
pcm_file = file->private_data;
if (((cmd >> 8) & 0xff) != 'A')
return -ENOTTY;
return snd_pcm_playback_ioctl1(file, pcm_file->substream, cmd,
(void __user *)arg);
}
static long snd_pcm_capture_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_pcm_file *pcm_file;
pcm_file = file->private_data;
if (((cmd >> 8) & 0xff) != 'A')
return -ENOTTY;
return snd_pcm_capture_ioctl1(file, pcm_file->substream, cmd,
(void __user *)arg);
}
int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
mm_segment_t fs;
int result;
fs = snd_enter_user();
switch (substream->stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
(void __user *)arg);
break;
case SNDRV_PCM_STREAM_CAPTURE:
result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
(void __user *)arg);
break;
default:
result = -EINVAL;
break;
}
snd_leave_user(fs);
return result;
}
EXPORT_SYMBOL(snd_pcm_kernel_ioctl);
static ssize_t snd_pcm_read(struct file *file, char __user *buf, size_t count,
loff_t * offset)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
snd_pcm_sframes_t result;
pcm_file = file->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (!frame_aligned(runtime, count))
return -EINVAL;
count = bytes_to_frames(runtime, count);
result = snd_pcm_lib_read(substream, buf, count);
if (result > 0)
result = frames_to_bytes(runtime, result);
return result;
}
static ssize_t snd_pcm_write(struct file *file, const char __user *buf,
size_t count, loff_t * offset)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
snd_pcm_sframes_t result;
pcm_file = file->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (!frame_aligned(runtime, count))
return -EINVAL;
count = bytes_to_frames(runtime, count);
result = snd_pcm_lib_write(substream, buf, count);
if (result > 0)
result = frames_to_bytes(runtime, result);
return result;
}
static ssize_t snd_pcm_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
snd_pcm_sframes_t result;
unsigned long i;
void __user **bufs;
snd_pcm_uframes_t frames;
pcm_file = iocb->ki_filp->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (nr_segs > 1024 || nr_segs != runtime->channels)
return -EINVAL;
if (!frame_aligned(runtime, iov->iov_len))
return -EINVAL;
frames = bytes_to_samples(runtime, iov->iov_len);
bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
for (i = 0; i < nr_segs; ++i)
bufs[i] = iov[i].iov_base;
result = snd_pcm_lib_readv(substream, bufs, frames);
if (result > 0)
result = frames_to_bytes(runtime, result);
kfree(bufs);
return result;
}
static ssize_t snd_pcm_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
snd_pcm_sframes_t result;
unsigned long i;
void __user **bufs;
snd_pcm_uframes_t frames;
pcm_file = iocb->ki_filp->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (nr_segs > 128 || nr_segs != runtime->channels ||
!frame_aligned(runtime, iov->iov_len))
return -EINVAL;
frames = bytes_to_samples(runtime, iov->iov_len);
bufs = kmalloc(sizeof(void *) * nr_segs, GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
for (i = 0; i < nr_segs; ++i)
bufs[i] = iov[i].iov_base;
result = snd_pcm_lib_writev(substream, bufs, frames);
if (result > 0)
result = frames_to_bytes(runtime, result);
kfree(bufs);
return result;
}
static unsigned int snd_pcm_playback_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
unsigned int mask;
snd_pcm_uframes_t avail;
pcm_file = file->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
poll_wait(file, &runtime->sleep, wait);
snd_pcm_stream_lock_irq(substream);
avail = snd_pcm_playback_avail(runtime);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_PAUSED:
if (avail >= runtime->control->avail_min) {
mask = POLLOUT | POLLWRNORM;
break;
}
/* Fall through */
case SNDRV_PCM_STATE_DRAINING:
mask = 0;
break;
default:
mask = POLLOUT | POLLWRNORM | POLLERR;
break;
}
snd_pcm_stream_unlock_irq(substream);
return mask;
}
static unsigned int snd_pcm_capture_poll(struct file *file, poll_table * wait)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
unsigned int mask;
snd_pcm_uframes_t avail;
pcm_file = file->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
poll_wait(file, &runtime->sleep, wait);
snd_pcm_stream_lock_irq(substream);
avail = snd_pcm_capture_avail(runtime);
switch (runtime->status->state) {
case SNDRV_PCM_STATE_RUNNING:
case SNDRV_PCM_STATE_PREPARED:
case SNDRV_PCM_STATE_PAUSED:
if (avail >= runtime->control->avail_min) {
mask = POLLIN | POLLRDNORM;
break;
}
mask = 0;
break;
case SNDRV_PCM_STATE_DRAINING:
if (avail > 0) {
mask = POLLIN | POLLRDNORM;
break;
}
/* Fall through */
default:
mask = POLLIN | POLLRDNORM | POLLERR;
break;
}
snd_pcm_stream_unlock_irq(substream);
return mask;
}
/*
* mmap support
*/
/*
* Only on coherent architectures, we can mmap the status and the control records
* for effcient data transfer. On others, we have to use HWSYNC ioctl...
*/
#if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA)
/*
* mmap status record
*/
static int snd_pcm_mmap_status_fault(struct vm_area_struct *area,
struct vm_fault *vmf)
{
struct snd_pcm_substream *substream = area->vm_private_data;
struct snd_pcm_runtime *runtime;
if (substream == NULL)
return VM_FAULT_SIGBUS;
runtime = substream->runtime;
vmf->page = virt_to_page(runtime->status);
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct snd_pcm_vm_ops_status =
{
.fault = snd_pcm_mmap_status_fault,
};
static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
struct vm_area_struct *area)
{
long size;
if (!(area->vm_flags & VM_READ))
return -EINVAL;
size = area->vm_end - area->vm_start;
if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
return -EINVAL;
area->vm_ops = &snd_pcm_vm_ops_status;
area->vm_private_data = substream;
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
/*
* mmap control record
*/
static int snd_pcm_mmap_control_fault(struct vm_area_struct *area,
struct vm_fault *vmf)
{
struct snd_pcm_substream *substream = area->vm_private_data;
struct snd_pcm_runtime *runtime;
if (substream == NULL)
return VM_FAULT_SIGBUS;
runtime = substream->runtime;
vmf->page = virt_to_page(runtime->control);
get_page(vmf->page);
return 0;
}
static const struct vm_operations_struct snd_pcm_vm_ops_control =
{
.fault = snd_pcm_mmap_control_fault,
};
static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
struct vm_area_struct *area)
{
long size;
if (!(area->vm_flags & VM_READ))
return -EINVAL;
size = area->vm_end - area->vm_start;
if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
return -EINVAL;
area->vm_ops = &snd_pcm_vm_ops_control;
area->vm_private_data = substream;
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
#else /* ! coherent mmap */
/*
* don't support mmap for status and control records.
*/
static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
struct vm_area_struct *area)
{
return -ENXIO;
}
static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
struct vm_area_struct *area)
{
return -ENXIO;
}
#endif /* coherent mmap */
static inline struct page *
snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs)
{
void *vaddr = substream->runtime->dma_area + ofs;
return virt_to_page(vaddr);
}
/*
* fault callback for mmapping a RAM page
*/
static int snd_pcm_mmap_data_fault(struct vm_area_struct *area,
struct vm_fault *vmf)
{
struct snd_pcm_substream *substream = area->vm_private_data;
struct snd_pcm_runtime *runtime;
unsigned long offset;
struct page * page;
size_t dma_bytes;
if (substream == NULL)
return VM_FAULT_SIGBUS;
runtime = substream->runtime;
offset = vmf->pgoff << PAGE_SHIFT;
dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
if (offset > dma_bytes - PAGE_SIZE)
return VM_FAULT_SIGBUS;
if (substream->ops->page)
page = substream->ops->page(substream, offset);
else
page = snd_pcm_default_page_ops(substream, offset);
if (!page)
return VM_FAULT_SIGBUS;
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct snd_pcm_vm_ops_data = {
.open = snd_pcm_mmap_data_open,
.close = snd_pcm_mmap_data_close,
};
static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
.open = snd_pcm_mmap_data_open,
.close = snd_pcm_mmap_data_close,
.fault = snd_pcm_mmap_data_fault,
};
/*
* mmap the DMA buffer on RAM
*/
/**
* snd_pcm_lib_default_mmap - Default PCM data mmap function
* @substream: PCM substream
* @area: VMA
*
* This is the default mmap handler for PCM data. When mmap pcm_ops is NULL,
* this function is invoked implicitly.
*/
int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
#ifdef CONFIG_GENERIC_ALLOCATOR
if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_IRAM) {
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
return remap_pfn_range(area, area->vm_start,
substream->dma_buffer.addr >> PAGE_SHIFT,
area->vm_end - area->vm_start, area->vm_page_prot);
}
#endif /* CONFIG_GENERIC_ALLOCATOR */
#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
if (!substream->ops->page &&
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV)
return dma_mmap_coherent(substream->dma_buffer.dev.dev,
area,
substream->runtime->dma_area,
substream->runtime->dma_addr,
area->vm_end - area->vm_start);
#endif /* CONFIG_X86 */
/* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault;
return 0;
}
EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap);
/*
* mmap the DMA buffer on I/O memory area
*/
#if SNDRV_PCM_INFO_MMAP_IOMEM
/**
* snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem
* @substream: PCM substream
* @area: VMA
*
* When your hardware uses the iomapped pages as the hardware buffer and
* wants to mmap it, pass this function as mmap pcm_ops. Note that this
* is supposed to work only on limited architectures.
*/
int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
struct snd_pcm_runtime *runtime = substream->runtime;;
area->vm_page_prot = pgprot_noncached(area->vm_page_prot);
return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes);
}
EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem);
#endif /* SNDRV_PCM_INFO_MMAP */
/*
* mmap DMA buffer
*/
int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file,
struct vm_area_struct *area)
{
struct snd_pcm_runtime *runtime;
long size;
unsigned long offset;
size_t dma_bytes;
int err;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (!(area->vm_flags & (VM_WRITE|VM_READ)))
return -EINVAL;
} else {
if (!(area->vm_flags & VM_READ))
return -EINVAL;
}
runtime = substream->runtime;
if (runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (!(runtime->info & SNDRV_PCM_INFO_MMAP))
return -ENXIO;
if (runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED ||
runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
return -EINVAL;
size = area->vm_end - area->vm_start;
offset = area->vm_pgoff << PAGE_SHIFT;
dma_bytes = PAGE_ALIGN(runtime->dma_bytes);
if ((size_t)size > dma_bytes)
return -EINVAL;
if (offset > dma_bytes - size)
return -EINVAL;
area->vm_ops = &snd_pcm_vm_ops_data;
area->vm_private_data = substream;
if (substream->ops->mmap)
err = substream->ops->mmap(substream, area);
else
err = snd_pcm_lib_default_mmap(substream, area);
if (!err)
atomic_inc(&substream->mmap_count);
return err;
}
EXPORT_SYMBOL(snd_pcm_mmap_data);
static int snd_pcm_mmap(struct file *file, struct vm_area_struct *area)
{
struct snd_pcm_file * pcm_file;
struct snd_pcm_substream *substream;
unsigned long offset;
pcm_file = file->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
offset = area->vm_pgoff << PAGE_SHIFT;
switch (offset) {
case SNDRV_PCM_MMAP_OFFSET_STATUS:
if (pcm_file->no_compat_mmap)
return -ENXIO;
return snd_pcm_mmap_status(substream, file, area);
case SNDRV_PCM_MMAP_OFFSET_CONTROL:
if (pcm_file->no_compat_mmap)
return -ENXIO;
return snd_pcm_mmap_control(substream, file, area);
default:
return snd_pcm_mmap_data(substream, file, area);
}
return 0;
}
static int snd_pcm_fasync(int fd, struct file * file, int on)
{
struct snd_pcm_file * pcm_file;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
pcm_file = file->private_data;
substream = pcm_file->substream;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
runtime = substream->runtime;
return fasync_helper(fd, file, on, &runtime->fasync);
}
/*
* ioctl32 compat
*/
#ifdef CONFIG_COMPAT
#include "pcm_compat.c"
#else
#define snd_pcm_ioctl_compat NULL
#endif
/*
* To be removed helpers to keep binary compatibility
*/
#ifdef CONFIG_SND_SUPPORT_OLD_API
#define __OLD_TO_NEW_MASK(x) ((x&7)|((x&0x07fffff8)<<5))
#define __NEW_TO_OLD_MASK(x) ((x&7)|((x&0xffffff00)>>5))
static void snd_pcm_hw_convert_from_old_params(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_params_old *oparams)
{
unsigned int i;
memset(params, 0, sizeof(*params));
params->flags = oparams->flags;
for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
params->masks[i].bits[0] = oparams->masks[i];
memcpy(params->intervals, oparams->intervals, sizeof(oparams->intervals));
params->rmask = __OLD_TO_NEW_MASK(oparams->rmask);
params->cmask = __OLD_TO_NEW_MASK(oparams->cmask);
params->info = oparams->info;
params->msbits = oparams->msbits;
params->rate_num = oparams->rate_num;
params->rate_den = oparams->rate_den;
params->fifo_size = oparams->fifo_size;
}
static void snd_pcm_hw_convert_to_old_params(struct snd_pcm_hw_params_old *oparams,
struct snd_pcm_hw_params *params)
{
unsigned int i;
memset(oparams, 0, sizeof(*oparams));
oparams->flags = params->flags;
for (i = 0; i < ARRAY_SIZE(oparams->masks); i++)
oparams->masks[i] = params->masks[i].bits[0];
memcpy(oparams->intervals, params->intervals, sizeof(oparams->intervals));
oparams->rmask = __NEW_TO_OLD_MASK(params->rmask);
oparams->cmask = __NEW_TO_OLD_MASK(params->cmask);
oparams->info = params->info;
oparams->msbits = params->msbits;
oparams->rate_num = params->rate_num;
oparams->rate_den = params->rate_den;
oparams->fifo_size = params->fifo_size;
}
static int snd_pcm_hw_refine_old_user(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params_old __user * _oparams)
{
struct snd_pcm_hw_params *params;
struct snd_pcm_hw_params_old *oparams = NULL;
int err;
params = kmalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
oparams = memdup_user(_oparams, sizeof(*oparams));
if (IS_ERR(oparams)) {
err = PTR_ERR(oparams);
goto out;
}
snd_pcm_hw_convert_from_old_params(params, oparams);
err = snd_pcm_hw_refine(substream, params);
snd_pcm_hw_convert_to_old_params(oparams, params);
if (copy_to_user(_oparams, oparams, sizeof(*oparams))) {
if (!err)
err = -EFAULT;
}
kfree(oparams);
out:
kfree(params);
return err;
}
static int snd_pcm_hw_params_old_user(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params_old __user * _oparams)
{
struct snd_pcm_hw_params *params;
struct snd_pcm_hw_params_old *oparams = NULL;
int err;
params = kmalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
oparams = memdup_user(_oparams, sizeof(*oparams));
if (IS_ERR(oparams)) {
err = PTR_ERR(oparams);
goto out;
}
snd_pcm_hw_convert_from_old_params(params, oparams);
err = snd_pcm_hw_params(substream, params);
snd_pcm_hw_convert_to_old_params(oparams, params);
if (copy_to_user(_oparams, oparams, sizeof(*oparams))) {
if (!err)
err = -EFAULT;
}
kfree(oparams);
out:
kfree(params);
return err;
}
#endif /* CONFIG_SND_SUPPORT_OLD_API */
#ifndef CONFIG_MMU
static unsigned long snd_pcm_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
struct snd_pcm_file *pcm_file = file->private_data;
struct snd_pcm_substream *substream = pcm_file->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned long offset = pgoff << PAGE_SHIFT;
switch (offset) {
case SNDRV_PCM_MMAP_OFFSET_STATUS:
return (unsigned long)runtime->status;
case SNDRV_PCM_MMAP_OFFSET_CONTROL:
return (unsigned long)runtime->control;
default:
return (unsigned long)runtime->dma_area + offset;
}
}
#else
# define snd_pcm_get_unmapped_area NULL
#endif
/*
* Register section
*/
const struct file_operations snd_pcm_f_ops[2] = {
{
.owner = THIS_MODULE,
.write = snd_pcm_write,
.aio_write = snd_pcm_aio_write,
.open = snd_pcm_playback_open,
.release = snd_pcm_release,
.llseek = no_llseek,
.poll = snd_pcm_playback_poll,
.unlocked_ioctl = snd_pcm_playback_ioctl,
.compat_ioctl = snd_pcm_ioctl_compat,
.mmap = snd_pcm_mmap,
.fasync = snd_pcm_fasync,
.get_unmapped_area = snd_pcm_get_unmapped_area,
},
{
.owner = THIS_MODULE,
.read = snd_pcm_read,
.aio_read = snd_pcm_aio_read,
.open = snd_pcm_capture_open,
.release = snd_pcm_release,
.llseek = no_llseek,
.poll = snd_pcm_capture_poll,
.unlocked_ioctl = snd_pcm_capture_ioctl,
.compat_ioctl = snd_pcm_ioctl_compat,
.mmap = snd_pcm_mmap,
.fasync = snd_pcm_fasync,
.get_unmapped_area = snd_pcm_get_unmapped_area,
}
};
| gpl-2.0 |
bitthunder-toolchain/gcc | gcc/testsuite/gcc.dg/attr-alias-3.c | 61 | 1676 | // { dg-do link }
// { dg-skip-if "" { "powerpc-ibm-aix*" } { "*" } { "" } }
// { dg-require-alias "" }
// { dg-options "-O2 -fno-common" }
// Copyright 2005 Free Software Foundation, Inc.
// Contributed by Alexandre Oliva <aoliva@redhat.com>
// PR middle-end/24295
// The unit-at-a-time call graph code used to fail to emit variables
// without external linkage that were only used indirectly, through
// aliases. Although the PR above is about #pragma weak-introduced
// aliases, the underlying machinery is the same.
#ifndef ATTRIBUTE_USED
# define ATTRIBUTE_USED __attribute__((used))
#endif
static int lv1;
extern int Av1a __attribute__((alias ("lv1")));
int *pv1a = &Av1a;
static int lv2;
extern int Av2a __attribute__((alias ("lv2")));
int *pv2a = &lv2;
static int lv3;
extern int Av3a __attribute__((alias ("lv3")));
static int *pv3a ATTRIBUTE_USED = &Av3a;
static int lv4;
extern int Av4a __attribute__((alias ("lv4")));
static int *pv4a = &Av4a;
typedef void ftype(void);
static void lf1(void) {}
extern ftype Af1a __attribute__((alias ("lf1")));
ftype *pf1a = &Af1a;
static void lf2(void) {}
extern ftype Af2a __attribute__((alias ("lf2")));
ftype *pf2a = &Af2a;
static void lf3(void) {}
extern ftype Af3a __attribute__((alias ("lf3")));
static ftype *pf3a ATTRIBUTE_USED = &Af3a;
static void lf4(void) {}
extern ftype Af4a __attribute__((alias ("lf4")));
static ftype *pf4a = &Af4a;
main() {
#ifdef __mips
/* Use real asm for MIPS, to stop the assembler warning about
orphaned high-part relocations. */
asm volatile ("lw $2,%0\n\tlw $2,%1" : : "m" (pv4a), "m" (pf4a) : "$2");
#else
asm volatile ("" : : "m" (pv4a), "m" (pf4a));
#endif
}
| gpl-2.0 |
119/aircam-openwrt | build_dir/target-arm_v5te_uClibc-0.9.32_eabi/busybox-1.18.4/scripts/basic/split-include.c | 317 | 5649 | /*
* split-include.c
*
* Copyright abandoned, Michael Chastain, <mailto:mec@shout.net>.
* This is a C version of syncdep.pl by Werner Almesberger.
*
* This program takes autoconf.h as input and outputs a directory full
* of one-line include files, merging onto the old values.
*
* Think of the configuration options as key-value pairs. Then there
* are five cases:
*
* key old value new value action
*
* KEY-1 VALUE-1 VALUE-1 leave file alone
* KEY-2 VALUE-2A VALUE-2B write VALUE-2B into file
* KEY-3 - VALUE-3 write VALUE-3 into file
* KEY-4 VALUE-4 - write an empty file
* KEY-5 (empty) - leave old empty file alone
*/
#include <sys/stat.h>
#include <sys/types.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#define ERROR_EXIT(strExit) \
{ \
const int errnoSave = errno; \
fprintf(stderr, "%s: ", str_my_name); \
errno = errnoSave; \
perror((strExit)); \
exit(1); \
}
int main(int argc, const char * argv [])
{
const char * str_my_name;
const char * str_file_autoconf;
const char * str_dir_config;
FILE * fp_config;
FILE * fp_target;
FILE * fp_find;
int buffer_size;
char * line;
char * old_line;
char * list_target;
char * ptarget;
struct stat stat_buf;
/* Check arg count. */
if (argc != 3)
{
fprintf(stderr, "%s: wrong number of arguments.\n", argv[0]);
exit(1);
}
str_my_name = argv[0];
str_file_autoconf = argv[1];
str_dir_config = argv[2];
/* Find a buffer size. */
if (stat(str_file_autoconf, &stat_buf) != 0)
ERROR_EXIT(str_file_autoconf);
buffer_size = 2 * stat_buf.st_size + 4096;
/* Allocate buffers. */
if ( (line = malloc(buffer_size)) == NULL
|| (old_line = malloc(buffer_size)) == NULL
|| (list_target = malloc(buffer_size)) == NULL )
ERROR_EXIT(str_file_autoconf);
/* Open autoconfig file. */
if ((fp_config = fopen(str_file_autoconf, "r")) == NULL)
ERROR_EXIT(str_file_autoconf);
/* Make output directory if needed. */
if (stat(str_dir_config, &stat_buf) != 0)
{
if (mkdir(str_dir_config, 0755) != 0)
ERROR_EXIT(str_dir_config);
}
/* Change to output directory. */
if (chdir(str_dir_config) != 0)
ERROR_EXIT(str_dir_config);
/* Put initial separator into target list. */
ptarget = list_target;
*ptarget++ = '\n';
/* Read config lines. */
while (fgets(line, buffer_size, fp_config))
{
const char * str_config;
int is_same;
int itarget;
if (line[0] != '#')
continue;
if ((str_config = strstr(line, " CONFIG_")) == NULL)
continue;
/* We found #define CONFIG_foo or #undef CONFIG_foo.
* Make the output file name. */
str_config += sizeof(" CONFIG_") - 1;
for (itarget = 0; !isspace(str_config[itarget]); itarget++)
{
int c = (unsigned char) str_config[itarget];
if (isupper(c)) c = tolower(c);
if (c == '_') c = '/';
ptarget[itarget] = c;
}
ptarget[itarget++] = '.';
ptarget[itarget++] = 'h';
ptarget[itarget++] = '\0';
/* Check for existing file. */
is_same = 0;
if ((fp_target = fopen(ptarget, "r")) != NULL)
{
fgets(old_line, buffer_size, fp_target);
if (fclose(fp_target) != 0)
ERROR_EXIT(ptarget);
if (!strcmp(line, old_line))
is_same = 1;
}
if (!is_same)
{
/* Auto-create directories. */
int islash;
for (islash = 0; islash < itarget; islash++)
{
if (ptarget[islash] == '/')
{
ptarget[islash] = '\0';
if (stat(ptarget, &stat_buf) != 0
&& mkdir(ptarget, 0755) != 0)
ERROR_EXIT( ptarget );
ptarget[islash] = '/';
}
}
/* Write the file. */
if ((fp_target = fopen(ptarget, "w")) == NULL)
ERROR_EXIT(ptarget);
fputs(line, fp_target);
if (ferror(fp_target) || fclose(fp_target) != 0)
ERROR_EXIT(ptarget);
}
/* Update target list */
ptarget += itarget;
*(ptarget-1) = '\n';
}
/*
* Close autoconfig file.
* Terminate the target list.
*/
if (fclose(fp_config) != 0)
ERROR_EXIT(str_file_autoconf);
*ptarget = '\0';
/*
* Fix up existing files which have no new value.
* This is Case 4 and Case 5.
*
* I re-read the tree and filter it against list_target.
* This is crude. But it avoids data copies. Also, list_target
* is compact and contiguous, so it easily fits into cache.
*
* Notice that list_target contains strings separated by \n,
* with a \n before the first string and after the last.
* fgets gives the incoming names a terminating \n.
* So by having an initial \n, strstr will find exact matches.
*/
fp_find = popen("find * -type f -name \"*.h\" -print", "r");
if (fp_find == 0)
ERROR_EXIT( "find" );
line[0] = '\n';
while (fgets(line+1, buffer_size, fp_find))
{
if (strstr(list_target, line) == NULL)
{
/*
* This is an old file with no CONFIG_* flag in autoconf.h.
*/
/* First strip the \n. */
line[strlen(line)-1] = '\0';
/* Grab size. */
if (stat(line+1, &stat_buf) != 0)
ERROR_EXIT(line);
/* If file is not empty, make it empty and give it a fresh date. */
if (stat_buf.st_size != 0)
{
if ((fp_target = fopen(line+1, "w")) == NULL)
ERROR_EXIT(line);
if (fclose(fp_target) != 0)
ERROR_EXIT(line);
}
}
}
if (pclose(fp_find) != 0)
ERROR_EXIT("find");
return 0;
}
| gpl-2.0 |
uwehermann/easybox-904-xdsl-firmware | linux/linux-2.6.32.32/security/keys/key.c | 573 | 24950 | /* Basic authentication token and access key management
*
* Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/err.h>
#include <linux/user_namespace.h>
#include "internal.h"
static struct kmem_cache *key_jar;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
struct rb_root key_user_tree; /* tree of quota records indexed by UID */
DEFINE_SPINLOCK(key_user_lock);
unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
unsigned int key_quota_maxkeys = 200; /* general key count quota */
unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
static void key_cleanup(struct work_struct *work);
static DECLARE_WORK(key_cleanup_task, key_cleanup);
/* we serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
/* any key who's type gets unegistered will be re-typed to this */
static struct key_type key_type_dead = {
.name = "dead",
};
#ifdef KEY_DEBUGGING
void __key_check(const struct key *key)
{
printk("__key_check: key %p {%08x} should be {%08x}\n",
key, key->magic, KEY_DEBUG_MAGIC);
BUG();
}
#endif
/*****************************************************************************/
/*
* get the key quota record for a user, allocating a new record if one doesn't
* already exist
*/
struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent = NULL;
struct rb_node **p;
try_again:
p = &key_user_tree.rb_node;
spin_lock(&key_user_lock);
/* search the tree for a user record with a matching UID */
while (*p) {
parent = *p;
user = rb_entry(parent, struct key_user, node);
if (uid < user->uid)
p = &(*p)->rb_left;
else if (uid > user->uid)
p = &(*p)->rb_right;
else if (user_ns < user->user_ns)
p = &(*p)->rb_left;
else if (user_ns > user->user_ns)
p = &(*p)->rb_right;
else
goto found;
}
/* if we get here, we failed to find a match in the tree */
if (!candidate) {
/* allocate a candidate user record if we don't already have
* one */
spin_unlock(&key_user_lock);
user = NULL;
candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
if (unlikely(!candidate))
goto out;
/* the allocation may have scheduled, so we need to repeat the
* search lest someone else added the record whilst we were
* asleep */
goto try_again;
}
/* if we get here, then the user record still hadn't appeared on the
* second pass - so we use the candidate record */
atomic_set(&candidate->usage, 1);
atomic_set(&candidate->nkeys, 0);
atomic_set(&candidate->nikeys, 0);
candidate->uid = uid;
candidate->user_ns = get_user_ns(user_ns);
candidate->qnkeys = 0;
candidate->qnbytes = 0;
spin_lock_init(&candidate->lock);
mutex_init(&candidate->cons_lock);
rb_link_node(&candidate->node, parent, p);
rb_insert_color(&candidate->node, &key_user_tree);
spin_unlock(&key_user_lock);
user = candidate;
goto out;
/* okay - we found a user record for this UID */
found:
atomic_inc(&user->usage);
spin_unlock(&key_user_lock);
kfree(candidate);
out:
return user;
} /* end key_user_lookup() */
/*****************************************************************************/
/*
* dispose of a user structure
*/
void key_user_put(struct key_user *user)
{
if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
rb_erase(&user->node, &key_user_tree);
spin_unlock(&key_user_lock);
put_user_ns(user->user_ns);
kfree(user);
}
} /* end key_user_put() */
/*****************************************************************************/
/*
* assign a key the next unique serial number
* - these are assigned randomly to avoid security issues through covert
* channel problems
*/
static inline void key_alloc_serial(struct key *key)
{
struct rb_node *parent, **p;
struct key *xkey;
/* propose a random serial number and look for a hole for it in the
* serial number tree */
do {
get_random_bytes(&key->serial, sizeof(key->serial));
key->serial >>= 1; /* negative numbers are not permitted */
} while (key->serial < 3);
spin_lock(&key_serial_lock);
attempt_insertion:
parent = NULL;
p = &key_serial_tree.rb_node;
while (*p) {
parent = *p;
xkey = rb_entry(parent, struct key, serial_node);
if (key->serial < xkey->serial)
p = &(*p)->rb_left;
else if (key->serial > xkey->serial)
p = &(*p)->rb_right;
else
goto serial_exists;
}
/* we've found a suitable hole - arrange for this key to occupy it */
rb_link_node(&key->serial_node, parent, p);
rb_insert_color(&key->serial_node, &key_serial_tree);
spin_unlock(&key_serial_lock);
return;
/* we found a key with the proposed serial number - walk the tree from
* that point looking for the next unused serial number */
serial_exists:
for (;;) {
key->serial++;
if (key->serial < 3) {
key->serial = 3;
goto attempt_insertion;
}
parent = rb_next(parent);
if (!parent)
goto attempt_insertion;
xkey = rb_entry(parent, struct key, serial_node);
if (key->serial < xkey->serial)
goto attempt_insertion;
}
} /* end key_alloc_serial() */
/*****************************************************************************/
/*
* allocate a key of the specified type
* - update the user's quota to reflect the existence of the key
* - called from a key-type operation with key_types_sem read-locked by
* key_create_or_update()
* - this prevents unregistration of the key type
* - upon return the key is as yet uninstantiated; the caller needs to either
* instantiate the key or discard it before returning
*/
struct key *key_alloc(struct key_type *type, const char *desc,
uid_t uid, gid_t gid, const struct cred *cred,
key_perm_t perm, unsigned long flags)
{
struct key_user *user = NULL;
struct key *key;
size_t desclen, quotalen;
int ret;
key = ERR_PTR(-EINVAL);
if (!desc || !*desc)
goto error;
desclen = strlen(desc) + 1;
quotalen = desclen + type->def_datalen;
/* get hold of the key tracking for this user */
user = key_user_lookup(uid, cred->user->user_ns);
if (!user)
goto no_memory_1;
/* check that the user's quota permits allocation of another key and
* its description */
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
unsigned maxkeys = (uid == 0) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = (uid == 0) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&user->lock);
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
if (user->qnkeys + 1 >= maxkeys ||
user->qnbytes + quotalen >= maxbytes ||
user->qnbytes + quotalen < user->qnbytes)
goto no_quota;
}
user->qnkeys++;
user->qnbytes += quotalen;
spin_unlock(&user->lock);
}
/* allocate and initialise the key and its description */
key = kmem_cache_alloc(key_jar, GFP_KERNEL);
if (!key)
goto no_memory_2;
if (desc) {
key->description = kmemdup(desc, desclen, GFP_KERNEL);
if (!key->description)
goto no_memory_3;
}
atomic_set(&key->usage, 1);
init_rwsem(&key->sem);
key->type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
key->uid = uid;
key->gid = gid;
key->perm = perm;
key->flags = 0;
key->expiry = 0;
key->payload.data = NULL;
key->security = NULL;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
memset(&key->type_data, 0, sizeof(key->type_data));
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
#endif
/* let the security module know about the key */
ret = security_key_alloc(key, cred, flags);
if (ret < 0)
goto security_error;
/* publish the key by giving it a serial number */
atomic_inc(&user->nkeys);
key_alloc_serial(key);
error:
return key;
security_error:
kfree(key->description);
kmem_cache_free(key_jar, key);
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
user->qnkeys--;
user->qnbytes -= quotalen;
spin_unlock(&user->lock);
}
key_user_put(user);
key = ERR_PTR(ret);
goto error;
no_memory_3:
kmem_cache_free(key_jar, key);
no_memory_2:
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
user->qnkeys--;
user->qnbytes -= quotalen;
spin_unlock(&user->lock);
}
key_user_put(user);
no_memory_1:
key = ERR_PTR(-ENOMEM);
goto error;
no_quota:
spin_unlock(&user->lock);
key_user_put(user);
key = ERR_PTR(-EDQUOT);
goto error;
} /* end key_alloc() */
EXPORT_SYMBOL(key_alloc);
/*****************************************************************************/
/*
* reserve an amount of quota for the key's payload
*/
int key_payload_reserve(struct key *key, size_t datalen)
{
int delta = (int) datalen - key->datalen;
int ret = 0;
key_check(key);
/* contemplate the quota adjustment */
if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxbytes = (key->user->uid == 0) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&key->user->lock);
if (delta > 0 &&
(key->user->qnbytes + delta >= maxbytes ||
key->user->qnbytes + delta < key->user->qnbytes)) {
ret = -EDQUOT;
}
else {
key->user->qnbytes += delta;
key->quotalen += delta;
}
spin_unlock(&key->user->lock);
}
/* change the recorded data length if that didn't generate an error */
if (ret == 0)
key->datalen = datalen;
return ret;
} /* end key_payload_reserve() */
EXPORT_SYMBOL(key_payload_reserve);
/*****************************************************************************/
/*
* instantiate a key and link it into the target keyring atomically
* - called with the target keyring's semaphore writelocked
*/
static int __key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
struct key *authkey)
{
int ret, awaken;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* instantiate the key */
ret = key->type->instantiate(key, data, datalen);
if (ret == 0) {
/* mark the key as being instantiated */
atomic_inc(&key->user->nikeys);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
/* and link it into the destination keyring */
if (keyring)
ret = __key_link(keyring, key);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
}
mutex_unlock(&key_construction_mutex);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret;
} /* end __key_instantiate_and_link() */
/*****************************************************************************/
/*
* instantiate a key and link it into the target keyring atomically
*/
int key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
struct key *authkey)
{
int ret;
if (keyring)
down_write(&keyring->sem);
ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey);
if (keyring)
up_write(&keyring->sem);
return ret;
} /* end key_instantiate_and_link() */
EXPORT_SYMBOL(key_instantiate_and_link);
/*****************************************************************************/
/*
* negatively instantiate a key and link it into the target keyring atomically
*/
int key_negate_and_link(struct key *key,
unsigned timeout,
struct key *keyring,
struct key *authkey)
{
struct timespec now;
int ret, awaken;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring)
down_write(&keyring->sem);
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring)
ret = __key_link(keyring, key);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring)
up_write(&keyring->sem);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret;
} /* end key_negate_and_link() */
EXPORT_SYMBOL(key_negate_and_link);
/*****************************************************************************/
/*
* do cleaning up in process context so that we don't have to disable
* interrupts all over the place
*/
static void key_cleanup(struct work_struct *work)
{
struct rb_node *_n;
struct key *key;
go_again:
/* look for a dead key in the tree */
spin_lock(&key_serial_lock);
for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
key = rb_entry(_n, struct key, serial_node);
if (atomic_read(&key->usage) == 0)
goto found_dead_key;
}
spin_unlock(&key_serial_lock);
return;
found_dead_key:
/* we found a dead key - once we've removed it from the tree, we can
* drop the lock */
rb_erase(&key->serial_node, &key_serial_tree);
spin_unlock(&key_serial_lock);
key_check(key);
security_key_free(key);
/* deal with the user's key tracking and quota */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
/* now throw away the key memory */
if (key->type->destroy)
key->type->destroy(key);
kfree(key->description);
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC_X;
#endif
kmem_cache_free(key_jar, key);
/* there may, of course, be more than one key to destroy */
goto go_again;
} /* end key_cleanup() */
/*****************************************************************************/
/*
* dispose of a reference to a key
* - when all the references are gone, we schedule the cleanup task to come and
* pull it out of the tree in definite process context
*/
void key_put(struct key *key)
{
if (key) {
key_check(key);
if (atomic_dec_and_test(&key->usage))
schedule_work(&key_cleanup_task);
}
} /* end key_put() */
EXPORT_SYMBOL(key_put);
/*****************************************************************************/
/*
* find a key by its serial number
*/
struct key *key_lookup(key_serial_t id)
{
struct rb_node *n;
struct key *key;
spin_lock(&key_serial_lock);
/* search the tree for the specified key */
n = key_serial_tree.rb_node;
while (n) {
key = rb_entry(n, struct key, serial_node);
if (id < key->serial)
n = n->rb_left;
else if (id > key->serial)
n = n->rb_right;
else
goto found;
}
not_found:
key = ERR_PTR(-ENOKEY);
goto error;
found:
/* pretend it doesn't exist if it is awaiting deletion */
if (atomic_read(&key->usage) == 0)
goto not_found;
/* this races with key_put(), but that doesn't matter since key_put()
* doesn't actually change the key
*/
atomic_inc(&key->usage);
error:
spin_unlock(&key_serial_lock);
return key;
} /* end key_lookup() */
/*****************************************************************************/
/*
* find and lock the specified key type against removal
* - we return with the sem readlocked
*/
struct key_type *key_type_lookup(const char *type)
{
struct key_type *ktype;
down_read(&key_types_sem);
/* look up the key type to see if it's one of the registered kernel
* types */
list_for_each_entry(ktype, &key_types_list, link) {
if (strcmp(ktype->name, type) == 0)
goto found_kernel_type;
}
up_read(&key_types_sem);
ktype = ERR_PTR(-ENOKEY);
found_kernel_type:
return ktype;
} /* end key_type_lookup() */
/*****************************************************************************/
/*
* unlock a key type
*/
void key_type_put(struct key_type *ktype)
{
up_read(&key_types_sem);
} /* end key_type_put() */
/*****************************************************************************/
/*
* attempt to update an existing key
* - the key has an incremented refcount
* - we need to put the key if we get an error
*/
static inline key_ref_t __key_update(key_ref_t key_ref,
const void *payload, size_t plen)
{
struct key *key = key_ref_to_ptr(key_ref);
int ret;
/* need write permission on the key to update it */
ret = key_permission(key_ref, KEY_WRITE);
if (ret < 0)
goto error;
ret = -EEXIST;
if (!key->type->update)
goto error;
down_write(&key->sem);
ret = key->type->update(key, payload, plen);
if (ret == 0)
/* updating a negative key instantiates it */
clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
up_write(&key->sem);
if (ret < 0)
goto error;
out:
return key_ref;
error:
key_put(key);
key_ref = ERR_PTR(ret);
goto out;
} /* end __key_update() */
/*****************************************************************************/
/*
* search the specified keyring for a key of the same description; if one is
* found, update it, otherwise add a new one
*/
key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags)
{
const struct cred *cred = current_cred();
struct key_type *ktype;
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
/* look up the key type to see if it's one of the registered kernel
* types */
ktype = key_type_lookup(type);
if (IS_ERR(ktype)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
if (!ktype->match || !ktype->instantiate)
goto error_2;
keyring = key_ref_to_ptr(keyring_ref);
key_check(keyring);
key_ref = ERR_PTR(-ENOTDIR);
if (keyring->type != &key_type_keyring)
goto error_2;
down_write(&keyring->sem);
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
ret = key_permission(keyring_ref, KEY_WRITE);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_3;
}
/* if it's possible to update this type of key, search for an existing
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
if (ktype->update) {
key_ref = __keyring_search_one(keyring_ref, ktype, description,
0);
if (!IS_ERR(key_ref))
goto found_matching_key;
}
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
if (ktype->read)
perm |= KEY_POS_READ | KEY_USR_READ;
if (ktype == &key_type_keyring || ktype->update)
perm |= KEY_USR_WRITE;
}
/* allocate a new key */
key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error_3;
}
/* instantiate it and link it into the target keyring */
ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
goto error_3;
}
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_3:
up_write(&keyring->sem);
error_2:
key_type_put(ktype);
error:
return key_ref;
found_matching_key:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
up_write(&keyring->sem);
key_type_put(ktype);
key_ref = __key_update(key_ref, payload, plen);
goto error;
} /* end key_create_or_update() */
EXPORT_SYMBOL(key_create_or_update);
/*****************************************************************************/
/*
* update a key
*/
int key_update(key_ref_t key_ref, const void *payload, size_t plen)
{
struct key *key = key_ref_to_ptr(key_ref);
int ret;
key_check(key);
/* the key must be writable */
ret = key_permission(key_ref, KEY_WRITE);
if (ret < 0)
goto error;
/* attempt to update it if supported */
ret = -EOPNOTSUPP;
if (key->type->update) {
down_write(&key->sem);
ret = key->type->update(key, payload, plen);
if (ret == 0)
/* updating a negative key instantiates it */
clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
up_write(&key->sem);
}
error:
return ret;
} /* end key_update() */
EXPORT_SYMBOL(key_update);
/*****************************************************************************/
/*
* revoke a key
*/
void key_revoke(struct key *key)
{
struct timespec now;
time_t time;
key_check(key);
/* make sure no one's trying to change or use the key when we mark it
* - we tell lockdep that we might nest because we might be revoking an
* authorisation key whilst holding the sem on a key we've just
* instantiated
*/
down_write_nested(&key->sem, 1);
if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
key->type->revoke)
key->type->revoke(key);
/* set the death time to no more than the expiry time */
now = current_kernel_time();
time = now.tv_sec;
if (key->revoked_at == 0 || key->revoked_at > time) {
key->revoked_at = time;
key_schedule_gc(key->revoked_at + key_gc_delay);
}
up_write(&key->sem);
} /* end key_revoke() */
EXPORT_SYMBOL(key_revoke);
/*****************************************************************************/
/*
* register a type of key
*/
int register_key_type(struct key_type *ktype)
{
struct key_type *p;
int ret;
ret = -EEXIST;
down_write(&key_types_sem);
/* disallow key types with the same name */
list_for_each_entry(p, &key_types_list, link) {
if (strcmp(p->name, ktype->name) == 0)
goto out;
}
/* store the type */
list_add(&ktype->link, &key_types_list);
ret = 0;
out:
up_write(&key_types_sem);
return ret;
} /* end register_key_type() */
EXPORT_SYMBOL(register_key_type);
/*****************************************************************************/
/*
* unregister a type of key
*/
void unregister_key_type(struct key_type *ktype)
{
struct rb_node *_n;
struct key *key;
down_write(&key_types_sem);
/* withdraw the key type */
list_del_init(&ktype->link);
/* mark all the keys of this type dead */
spin_lock(&key_serial_lock);
for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
key = rb_entry(_n, struct key, serial_node);
if (key->type == ktype) {
key->type = &key_type_dead;
set_bit(KEY_FLAG_DEAD, &key->flags);
}
}
spin_unlock(&key_serial_lock);
/* make sure everyone revalidates their keys */
synchronize_rcu();
/* we should now be able to destroy the payloads of all the keys of
* this type with impunity */
spin_lock(&key_serial_lock);
for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
key = rb_entry(_n, struct key, serial_node);
if (key->type == ktype) {
if (ktype->destroy)
ktype->destroy(key);
memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
}
}
spin_unlock(&key_serial_lock);
up_write(&key_types_sem);
key_schedule_gc(0);
} /* end unregister_key_type() */
EXPORT_SYMBOL(unregister_key_type);
/*****************************************************************************/
/*
* initialise the key management stuff
*/
void __init key_init(void)
{
/* allocate a slab in which we can store keys */
key_jar = kmem_cache_create("key_jar", sizeof(struct key),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* add the special key types */
list_add_tail(&key_type_keyring.link, &key_types_list);
list_add_tail(&key_type_dead.link, &key_types_list);
list_add_tail(&key_type_user.link, &key_types_list);
/* record the root user tracking */
rb_link_node(&root_key_user.node,
NULL,
&key_user_tree.rb_node);
rb_insert_color(&root_key_user.node,
&key_user_tree);
} /* end key_init() */
| gpl-2.0 |
int0x19/android_kernel_xiaomi_msm8992 | drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c | 573 | 10286 | /* amr-wbplus audio output device
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/msm_audio_amrwbplus.h>
#include <linux/compat.h>
#include "audio_utils_aio.h"
#ifdef CONFIG_DEBUG_FS
static const struct file_operations audio_amrwbplus_debug_fops = {
.read = audio_aio_debug_read,
.open = audio_aio_debug_open,
};
static void config_debug_fs(struct q6audio_aio *audio)
{
if (audio != NULL) {
char name[sizeof("msm_amrwbplus_") + 5];
snprintf(name, sizeof(name), "msm_amrwbplus_%04x",
audio->ac->session);
audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
NULL, (void *)audio,
&audio_amrwbplus_debug_fops);
if (IS_ERR(audio->dentry))
pr_debug("debugfs_create_file failed\n");
}
}
#else
static void config_debug_fs(struct q6audio_aio *audio)
{
}
#endif
static long audio_ioctl_shared(struct file *file, unsigned int cmd,
void *arg)
{
struct asm_amrwbplus_cfg q6_amrwbplus_cfg;
struct msm_audio_amrwbplus_config_v2 *amrwbplus_drv_config;
struct q6audio_aio *audio = file->private_data;
int rc = 0;
switch (cmd) {
case AUDIO_START: {
pr_err("%s[%p]: AUDIO_START session_id[%d]\n", __func__,
audio, audio->ac->session);
if (audio->feedback == NON_TUNNEL_MODE) {
/* Configure PCM output block */
rc = q6asm_enc_cfg_blk_pcm(audio->ac,
audio->pcm_cfg.sample_rate,
audio->pcm_cfg.channel_count);
if (rc < 0) {
pr_err("pcm output block config failed\n");
break;
}
}
amrwbplus_drv_config =
(struct msm_audio_amrwbplus_config_v2 *)audio->codec_cfg;
q6_amrwbplus_cfg.size_bytes =
amrwbplus_drv_config->size_bytes;
q6_amrwbplus_cfg.version =
amrwbplus_drv_config->version;
q6_amrwbplus_cfg.num_channels =
amrwbplus_drv_config->num_channels;
q6_amrwbplus_cfg.amr_band_mode =
amrwbplus_drv_config->amr_band_mode;
q6_amrwbplus_cfg.amr_dtx_mode =
amrwbplus_drv_config->amr_dtx_mode;
q6_amrwbplus_cfg.amr_frame_fmt =
amrwbplus_drv_config->amr_frame_fmt;
q6_amrwbplus_cfg.amr_lsf_idx =
amrwbplus_drv_config->amr_lsf_idx;
rc = q6asm_media_format_block_amrwbplus(audio->ac,
&q6_amrwbplus_cfg);
if (rc < 0) {
pr_err("q6asm_media_format_block_amrwb+ failed...\n");
break;
}
rc = audio_aio_enable(audio);
audio->eos_rsp = 0;
audio->eos_flag = 0;
if (!rc) {
audio->enabled = 1;
} else {
audio->enabled = 0;
pr_err("Audio Start procedure failed rc=%d\n", rc);
break;
}
pr_debug("%s:AUDIO_START sessionid[%d]enable[%d]\n", __func__,
audio->ac->session,
audio->enabled);
if (audio->stopped == 1)
audio->stopped = 0;
break;
}
default:
pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd);
rc = -EINVAL;
break;
}
return rc;
}
static long audio_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct q6audio_aio *audio = file->private_data;
int rc = 0;
switch (cmd) {
case AUDIO_START: {
rc = audio_ioctl_shared(file, cmd, (void *)arg);
break;
}
case AUDIO_GET_AMRWBPLUS_CONFIG_V2: {
if ((audio) && (arg) && (audio->codec_cfg)) {
if (copy_to_user((void *)arg, audio->codec_cfg,
sizeof(struct msm_audio_amrwbplus_config_v2))) {
rc = -EFAULT;
pr_err("%s: copy_to_user for AUDIO_GET_AMRWBPLUS_CONFIG_V2 failed\n",
__func__);
break;
}
} else {
pr_err("%s: wb+ config v2 invalid parameters\n"
, __func__);
rc = -EFAULT;
break;
}
break;
}
case AUDIO_SET_AMRWBPLUS_CONFIG_V2: {
if ((audio) && (arg) && (audio->codec_cfg)) {
if (copy_from_user(audio->codec_cfg, (void *)arg,
sizeof(struct msm_audio_amrwbplus_config_v2))) {
rc = -EFAULT;
pr_err("%s: copy_from_user for AUDIO_SET_AMRWBPLUS_CONFIG_V2 failed\n",
__func__);
break;
}
} else {
pr_err("%s: wb+ config invalid parameters\n",
__func__);
rc = -EFAULT;
break;
}
break;
}
default: {
pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio);
rc = audio->codec_ioctl(file, cmd, arg);
break;
}
}
return rc;
}
#ifdef CONFIG_COMPAT
struct msm_audio_amrwbplus_config_v2_32 {
u32 size_bytes;
u32 version;
u32 num_channels;
u32 amr_band_mode;
u32 amr_dtx_mode;
u32 amr_frame_fmt;
u32 amr_lsf_idx;
};
enum {
AUDIO_GET_AMRWBPLUS_CONFIG_V2_32 = _IOR(AUDIO_IOCTL_MAGIC,
(AUDIO_MAX_COMMON_IOCTL_NUM+2),
struct msm_audio_amrwbplus_config_v2_32),
AUDIO_SET_AMRWBPLUS_CONFIG_V2_32 = _IOW(AUDIO_IOCTL_MAGIC,
(AUDIO_MAX_COMMON_IOCTL_NUM+3),
struct msm_audio_amrwbplus_config_v2_32)
};
static long audio_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct q6audio_aio *audio = file->private_data;
int rc = 0;
switch (cmd) {
case AUDIO_START: {
rc = audio_ioctl_shared(file, cmd, (void *)arg);
break;
}
case AUDIO_GET_AMRWBPLUS_CONFIG_V2_32: {
if (audio && arg && (audio->codec_cfg)) {
struct msm_audio_amrwbplus_config_v2 *amrwbplus_config;
struct msm_audio_amrwbplus_config_v2_32
amrwbplus_config_32;
amrwbplus_config =
(struct msm_audio_amrwbplus_config_v2 *)
audio->codec_cfg;
amrwbplus_config_32.size_bytes =
amrwbplus_config->size_bytes;
amrwbplus_config_32.version =
amrwbplus_config->version;
amrwbplus_config_32.num_channels =
amrwbplus_config->num_channels;
amrwbplus_config_32.amr_band_mode =
amrwbplus_config->amr_band_mode;
amrwbplus_config_32.amr_dtx_mode =
amrwbplus_config->amr_dtx_mode;
amrwbplus_config_32.amr_frame_fmt =
amrwbplus_config->amr_frame_fmt;
amrwbplus_config_32.amr_lsf_idx =
amrwbplus_config->amr_lsf_idx;
if (copy_to_user((void *)arg, &amrwbplus_config_32,
sizeof(amrwbplus_config_32))) {
rc = -EFAULT;
pr_err("%s: copy_to_user for AUDIO_GET_AMRWBPLUS_CONFIG_V2_32 failed\n"
, __func__);
}
} else {
pr_err("%s: wb+ Get config v2 invalid parameters\n"
, __func__);
rc = -EFAULT;
}
break;
}
case AUDIO_SET_AMRWBPLUS_CONFIG_V2_32: {
if ((audio) && (arg) && (audio->codec_cfg)) {
struct msm_audio_amrwbplus_config_v2 *amrwbplus_config;
struct msm_audio_amrwbplus_config_v2_32
amrwbplus_config_32;
if (copy_from_user(&amrwbplus_config_32, (void *)arg,
sizeof(struct msm_audio_amrwbplus_config_v2_32))) {
rc = -EFAULT;
pr_err("%s: copy_from_user for AUDIO_SET_AMRWBPLUS_CONFIG_V2_32 failed\n"
, __func__);
break;
}
amrwbplus_config =
(struct msm_audio_amrwbplus_config_v2 *)
audio->codec_cfg;
amrwbplus_config->size_bytes =
amrwbplus_config_32.size_bytes;
amrwbplus_config->version =
amrwbplus_config_32.version;
amrwbplus_config->num_channels =
amrwbplus_config_32.num_channels;
amrwbplus_config->amr_band_mode =
amrwbplus_config_32.amr_band_mode;
amrwbplus_config->amr_dtx_mode =
amrwbplus_config_32.amr_dtx_mode;
amrwbplus_config->amr_frame_fmt =
amrwbplus_config_32.amr_frame_fmt;
amrwbplus_config->amr_lsf_idx =
amrwbplus_config_32.amr_lsf_idx;
} else {
pr_err("%s: wb+ config invalid parameters\n",
__func__);
rc = -EFAULT;
}
break;
}
default: {
pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio);
rc = audio->codec_compat_ioctl(file, cmd, arg);
break;
}
}
return rc;
}
#else
#define audio_compat_ioctl NULL
#endif
static int audio_open(struct inode *inode, struct file *file)
{
struct q6audio_aio *audio = NULL;
int rc = 0;
audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL);
if (audio == NULL) {
pr_err("kzalloc failed for amrwb+ decode driver\n");
return -ENOMEM;
}
audio->codec_cfg =
kzalloc(sizeof(struct msm_audio_amrwbplus_config_v2), GFP_KERNEL);
if (audio->codec_cfg == NULL) {
pr_err("%s:failed kzalloc for amrwb+ config structure",
__func__);
kfree(audio);
return -ENOMEM;
}
audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN;
audio->ac =
q6asm_audio_client_alloc((app_cb) q6_audio_cb, (void *)audio);
if (!audio->ac) {
pr_err("Could not allocate memory for audio client\n");
kfree(audio->codec_cfg);
kfree(audio);
return -ENOMEM;
}
rc = audio_aio_open(audio, file);
if (rc < 0) {
pr_err("%s: audio_aio_open rc=%d\n",
__func__, rc);
goto fail;
}
/* open in T/NT mode */
if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) {
rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM,
FORMAT_AMR_WB_PLUS);
if (rc < 0) {
pr_err("amrwbplus NT mode Open failed rc=%d\n", rc);
rc = -ENODEV;
goto fail;
}
audio->feedback = NON_TUNNEL_MODE;
audio->buf_cfg.frames_per_buf = 0x01;
audio->buf_cfg.meta_info_enable = 0x01;
} else if ((file->f_mode & FMODE_WRITE) &&
!(file->f_mode & FMODE_READ)) {
rc = q6asm_open_write(audio->ac, FORMAT_AMR_WB_PLUS);
if (rc < 0) {
pr_err("wb+ T mode Open failed rc=%d\n", rc);
rc = -ENODEV;
goto fail;
}
audio->feedback = TUNNEL_MODE;
audio->buf_cfg.meta_info_enable = 0x00;
} else {
pr_err("audio_amrwbplus Not supported mode\n");
rc = -EACCES;
goto fail;
}
config_debug_fs(audio);
pr_debug("%s: AMRWBPLUS dec success mode[%d]session[%d]\n", __func__,
audio->feedback,
audio->ac->session);
return 0;
fail:
q6asm_audio_client_free(audio->ac);
kfree(audio->codec_cfg);
kfree(audio);
return rc;
}
static const struct file_operations audio_amrwbplus_fops = {
.owner = THIS_MODULE,
.open = audio_open,
.release = audio_aio_release,
.unlocked_ioctl = audio_ioctl,
.fsync = audio_aio_fsync,
.compat_ioctl = audio_compat_ioctl
};
struct miscdevice audio_amrwbplus_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_amrwbplus",
.fops = &audio_amrwbplus_fops,
};
static int __init audio_amrwbplus_init(void)
{
return misc_register(&audio_amrwbplus_misc);
}
device_initcall(audio_amrwbplus_init);
| gpl-2.0 |
aqua-project/Linux-Minimal-x86-Reimplementation | block/blk-ioc.c | 1341 | 10441 | /*
* Functions related to io context handling
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include "blk.h"
/*
* For io context allocations
*/
static struct kmem_cache *iocontext_cachep;
/**
* get_io_context - increment reference count to io_context
* @ioc: io_context to get
*
* Increment reference count to @ioc.
*/
void get_io_context(struct io_context *ioc)
{
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
atomic_long_inc(&ioc->refcount);
}
EXPORT_SYMBOL(get_io_context);
static void icq_free_icq_rcu(struct rcu_head *head)
{
struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
kmem_cache_free(icq->__rcu_icq_cache, icq);
}
/* Exit an icq. Called with both ioc and q locked. */
static void ioc_exit_icq(struct io_cq *icq)
{
struct elevator_type *et = icq->q->elevator->type;
if (icq->flags & ICQ_EXITED)
return;
if (et->ops.elevator_exit_icq_fn)
et->ops.elevator_exit_icq_fn(icq);
icq->flags |= ICQ_EXITED;
}
/* Release an icq. Called with both ioc and q locked. */
static void ioc_destroy_icq(struct io_cq *icq)
{
struct io_context *ioc = icq->ioc;
struct request_queue *q = icq->q;
struct elevator_type *et = q->elevator->type;
lockdep_assert_held(&ioc->lock);
lockdep_assert_held(q->queue_lock);
radix_tree_delete(&ioc->icq_tree, icq->q->id);
hlist_del_init(&icq->ioc_node);
list_del_init(&icq->q_node);
/*
* Both setting lookup hint to and clearing it from @icq are done
* under queue_lock. If it's not pointing to @icq now, it never
* will. Hint assignment itself can race safely.
*/
if (rcu_access_pointer(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
ioc_exit_icq(icq);
/*
* @icq->q might have gone away by the time RCU callback runs
* making it impossible to determine icq_cache. Record it in @icq.
*/
icq->__rcu_icq_cache = et->icq_cache;
call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
}
/*
* Slow path for ioc release in put_io_context(). Performs double-lock
* dancing to unlink all icq's and then frees ioc.
*/
static void ioc_release_fn(struct work_struct *work)
{
struct io_context *ioc = container_of(work, struct io_context,
release_work);
unsigned long flags;
/*
* Exiting icq may call into put_io_context() through elevator
* which will trigger lockdep warning. The ioc's are guaranteed to
* be different, use a different locking subclass here. Use
* irqsave variant as there's no spin_lock_irq_nested().
*/
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
while (!hlist_empty(&ioc->icq_list)) {
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
struct io_cq, ioc_node);
struct request_queue *q = icq->q;
if (spin_trylock(q->queue_lock)) {
ioc_destroy_icq(icq);
spin_unlock(q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
cpu_relax();
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
}
}
spin_unlock_irqrestore(&ioc->lock, flags);
kmem_cache_free(iocontext_cachep, ioc);
}
/**
* put_io_context - put a reference of io_context
* @ioc: io_context to put
*
* Decrement reference count of @ioc and release it if the count reaches
* zero.
*/
void put_io_context(struct io_context *ioc)
{
unsigned long flags;
bool free_ioc = false;
if (ioc == NULL)
return;
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
/*
* Releasing ioc requires reverse order double locking and we may
* already be holding a queue_lock. Do it asynchronously from wq.
*/
if (atomic_long_dec_and_test(&ioc->refcount)) {
spin_lock_irqsave(&ioc->lock, flags);
if (!hlist_empty(&ioc->icq_list))
queue_work(system_power_efficient_wq,
&ioc->release_work);
else
free_ioc = true;
spin_unlock_irqrestore(&ioc->lock, flags);
}
if (free_ioc)
kmem_cache_free(iocontext_cachep, ioc);
}
EXPORT_SYMBOL(put_io_context);
/**
* put_io_context_active - put active reference on ioc
* @ioc: ioc of interest
*
* Undo get_io_context_active(). If active reference reaches zero after
* put, @ioc can never issue further IOs and ioscheds are notified.
*/
void put_io_context_active(struct io_context *ioc)
{
unsigned long flags;
struct io_cq *icq;
if (!atomic_dec_and_test(&ioc->active_ref)) {
put_io_context(ioc);
return;
}
/*
* Need ioc lock to walk icq_list and q lock to exit icq. Perform
* reverse double locking. Read comment in ioc_release_fn() for
* explanation on the nested locking annotation.
*/
retry:
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
if (spin_trylock(icq->q->queue_lock)) {
ioc_exit_icq(icq);
spin_unlock(icq->q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
cpu_relax();
goto retry;
}
}
spin_unlock_irqrestore(&ioc->lock, flags);
put_io_context(ioc);
}
/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
{
struct io_context *ioc;
task_lock(task);
ioc = task->io_context;
task->io_context = NULL;
task_unlock(task);
atomic_dec(&ioc->nr_tasks);
put_io_context_active(ioc);
}
/**
* ioc_clear_queue - break any ioc association with the specified queue
* @q: request_queue being cleared
*
* Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
*/
void ioc_clear_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
while (!list_empty(&q->icq_list)) {
struct io_cq *icq = list_entry(q->icq_list.next,
struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock(&ioc->lock);
ioc_destroy_icq(icq);
spin_unlock(&ioc->lock);
}
}
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
{
struct io_context *ioc;
int ret;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
return -ENOMEM;
/* initialize */
atomic_long_set(&ioc->refcount, 1);
atomic_set(&ioc->nr_tasks, 1);
atomic_set(&ioc->active_ref, 1);
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ioc->icq_list);
INIT_WORK(&ioc->release_work, ioc_release_fn);
/*
* Try to install. ioc shouldn't be installed if someone else
* already did or @task, which isn't %current, is exiting. Note
* that we need to allow ioc creation on exiting %current as exit
* path may issue IOs from e.g. exit_files(). The exit path is
* responsible for not issuing IO after exit_io_context().
*/
task_lock(task);
if (!task->io_context &&
(task == current || !(task->flags & PF_EXITING)))
task->io_context = ioc;
else
kmem_cache_free(iocontext_cachep, ioc);
ret = task->io_context ? 0 : -EBUSY;
task_unlock(task);
return ret;
}
/**
* get_task_io_context - get io_context of a task
* @task: task of interest
* @gfp_flags: allocation flags, used if allocation is necessary
* @node: allocation node, used if allocation is necessary
*
* Return io_context of @task. If it doesn't exist, it is created with
* @gfp_flags and @node. The returned io_context has its reference count
* incremented.
*
* This function always goes through task_lock() and it's better to use
* %current->io_context + get_io_context() for %current.
*/
struct io_context *get_task_io_context(struct task_struct *task,
gfp_t gfp_flags, int node)
{
struct io_context *ioc;
might_sleep_if(gfp_flags & __GFP_WAIT);
do {
task_lock(task);
ioc = task->io_context;
if (likely(ioc)) {
get_io_context(ioc);
task_unlock(task);
return ioc;
}
task_unlock(task);
} while (!create_task_io_context(task, gfp_flags, node));
return NULL;
}
EXPORT_SYMBOL(get_task_io_context);
/**
* ioc_lookup_icq - lookup io_cq from ioc
* @ioc: the associated io_context
* @q: the associated request_queue
*
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
* with @q->queue_lock held.
*/
struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
{
struct io_cq *icq;
lockdep_assert_held(q->queue_lock);
/*
* icq's are indexed from @ioc using radix tree and hint pointer,
* both of which are protected with RCU. All removals are done
* holding both q and ioc locks, and we're holding q lock - if we
* find a icq which points to us, it's guaranteed to be valid.
*/
rcu_read_lock();
icq = rcu_dereference(ioc->icq_hint);
if (icq && icq->q == q)
goto out;
icq = radix_tree_lookup(&ioc->icq_tree, q->id);
if (icq && icq->q == q)
rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
else
icq = NULL;
out:
rcu_read_unlock();
return icq;
}
EXPORT_SYMBOL(ioc_lookup_icq);
/**
* ioc_create_icq - create and link io_cq
* @ioc: io_context of interest
* @q: request_queue of interest
* @gfp_mask: allocation mask
*
* Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
* will be created using @gfp_mask.
*
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
gfp_t gfp_mask)
{
struct elevator_type *et = q->elevator->type;
struct io_cq *icq;
/* allocate stuff */
icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
q->node);
if (!icq)
return NULL;
if (radix_tree_maybe_preload(gfp_mask) < 0) {
kmem_cache_free(et->icq_cache, icq);
return NULL;
}
icq->ioc = ioc;
icq->q = q;
INIT_LIST_HEAD(&icq->q_node);
INIT_HLIST_NODE(&icq->ioc_node);
/* lock both q and ioc and try to link @icq */
spin_lock_irq(q->queue_lock);
spin_lock(&ioc->lock);
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
if (et->ops.elevator_init_icq_fn)
et->ops.elevator_init_icq_fn(icq);
} else {
kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q);
if (!icq)
printk(KERN_ERR "cfq: icq link failed!\n");
}
spin_unlock(&ioc->lock);
spin_unlock_irq(q->queue_lock);
radix_tree_preload_end();
return icq;
}
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
sizeof(struct io_context), 0, SLAB_PANIC, NULL);
return 0;
}
subsys_initcall(blk_ioc_init);
| gpl-2.0 |
Dazzozo/android_kernel_huawei_u8815 | drivers/media/video/gspca/stv06xx/stv06xx_vv6410.c | 3133 | 9307 | /*
* Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
* Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
* Copyright (c) 2002, 2003 Tuukka Toivonen
* Copyright (c) 2008 Erik Andrén
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
* P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
* P/N 861075-0040: Sensor HDCS1000 ASIC
* P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
* P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
*/
#include "stv06xx_vv6410.h"
static struct v4l2_pix_format vv6410_mode[] = {
{
356,
292,
V4L2_PIX_FMT_SGRBG8,
V4L2_FIELD_NONE,
.sizeimage = 356 * 292,
.bytesperline = 356,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
}
};
static const struct ctrl vv6410_ctrl[] = {
#define HFLIP_IDX 0
{
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "horizontal flip",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0
},
.set = vv6410_set_hflip,
.get = vv6410_get_hflip
},
#define VFLIP_IDX 1
{
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "vertical flip",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0
},
.set = vv6410_set_vflip,
.get = vv6410_get_vflip
},
#define GAIN_IDX 2
{
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "analog gain",
.minimum = 0,
.maximum = 15,
.step = 1,
.default_value = 10
},
.set = vv6410_set_analog_gain,
.get = vv6410_get_analog_gain
},
#define EXPOSURE_IDX 3
{
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "exposure",
.minimum = 0,
.maximum = 32768,
.step = 1,
.default_value = 20000
},
.set = vv6410_set_exposure,
.get = vv6410_get_exposure
}
};
static int vv6410_probe(struct sd *sd)
{
u16 data;
int err, i;
s32 *sensor_settings;
err = stv06xx_read_sensor(sd, VV6410_DEVICEH, &data);
if (err < 0)
return -ENODEV;
if (data == 0x19) {
info("vv6410 sensor detected");
sensor_settings = kmalloc(ARRAY_SIZE(vv6410_ctrl) * sizeof(s32),
GFP_KERNEL);
if (!sensor_settings)
return -ENOMEM;
sd->gspca_dev.cam.cam_mode = vv6410_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(vv6410_mode);
sd->desc.ctrls = vv6410_ctrl;
sd->desc.nctrls = ARRAY_SIZE(vv6410_ctrl);
for (i = 0; i < sd->desc.nctrls; i++)
sensor_settings[i] = vv6410_ctrl[i].qctrl.default_value;
sd->sensor_priv = sensor_settings;
return 0;
}
return -ENODEV;
}
static int vv6410_init(struct sd *sd)
{
int err = 0, i;
s32 *sensor_settings = sd->sensor_priv;
for (i = 0; i < ARRAY_SIZE(stv_bridge_init); i++) {
/* if NULL then len contains single value */
if (stv_bridge_init[i].data == NULL) {
err = stv06xx_write_bridge(sd,
stv_bridge_init[i].start,
stv_bridge_init[i].len);
} else {
int j;
for (j = 0; j < stv_bridge_init[i].len; j++)
err = stv06xx_write_bridge(sd,
stv_bridge_init[i].start + j,
stv_bridge_init[i].data[j]);
}
}
if (err < 0)
return err;
err = stv06xx_write_sensor_bytes(sd, (u8 *) vv6410_sensor_init,
ARRAY_SIZE(vv6410_sensor_init));
if (err < 0)
return err;
err = vv6410_set_exposure(&sd->gspca_dev,
sensor_settings[EXPOSURE_IDX]);
if (err < 0)
return err;
err = vv6410_set_analog_gain(&sd->gspca_dev,
sensor_settings[GAIN_IDX]);
return (err < 0) ? err : 0;
}
static void vv6410_disconnect(struct sd *sd)
{
sd->sensor = NULL;
kfree(sd->sensor_priv);
}
static int vv6410_start(struct sd *sd)
{
int err;
struct cam *cam = &sd->gspca_dev.cam;
u32 priv = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
if (priv & VV6410_CROP_TO_QVGA) {
PDEBUG(D_CONF, "Cropping to QVGA");
stv06xx_write_sensor(sd, VV6410_XENDH, 320 - 1);
stv06xx_write_sensor(sd, VV6410_YENDH, 240 - 1);
} else {
stv06xx_write_sensor(sd, VV6410_XENDH, 360 - 1);
stv06xx_write_sensor(sd, VV6410_YENDH, 294 - 1);
}
if (priv & VV6410_SUBSAMPLE) {
PDEBUG(D_CONF, "Enabling subsampling");
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x02);
stv06xx_write_bridge(sd, STV_X_CTRL, 0x06);
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x10);
} else {
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x01);
stv06xx_write_bridge(sd, STV_X_CTRL, 0x0a);
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
}
/* Turn on LED */
err = stv06xx_write_bridge(sd, STV_LED_CTRL, LED_ON);
if (err < 0)
return err;
err = stv06xx_write_sensor(sd, VV6410_SETUP0, 0);
if (err < 0)
return err;
PDEBUG(D_STREAM, "Starting stream");
return 0;
}
static int vv6410_stop(struct sd *sd)
{
int err;
/* Turn off LED */
err = stv06xx_write_bridge(sd, STV_LED_CTRL, LED_OFF);
if (err < 0)
return err;
err = stv06xx_write_sensor(sd, VV6410_SETUP0, VV6410_LOW_POWER_MODE);
if (err < 0)
return err;
PDEBUG(D_STREAM, "Halting stream");
return (err < 0) ? err : 0;
}
static int vv6410_dump(struct sd *sd)
{
u8 i;
int err = 0;
info("Dumping all vv6410 sensor registers");
for (i = 0; i < 0xff && !err; i++) {
u16 data;
err = stv06xx_read_sensor(sd, i, &data);
info("Register 0x%x contained 0x%x", i, data);
}
return (err < 0) ? err : 0;
}
static int vv6410_get_hflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[HFLIP_IDX];
PDEBUG(D_V4L2, "Read horizontal flip %d", *val);
return 0;
}
static int vv6410_set_hflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u16 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[HFLIP_IDX] = val;
err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
if (err < 0)
return err;
if (val)
i2c_data |= VV6410_HFLIP;
else
i2c_data &= ~VV6410_HFLIP;
PDEBUG(D_V4L2, "Set horizontal flip to %d", val);
err = stv06xx_write_sensor(sd, VV6410_DATAFORMAT, i2c_data);
return (err < 0) ? err : 0;
}
static int vv6410_get_vflip(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[VFLIP_IDX];
PDEBUG(D_V4L2, "Read vertical flip %d", *val);
return 0;
}
static int vv6410_set_vflip(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u16 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[VFLIP_IDX] = val;
err = stv06xx_read_sensor(sd, VV6410_DATAFORMAT, &i2c_data);
if (err < 0)
return err;
if (val)
i2c_data |= VV6410_VFLIP;
else
i2c_data &= ~VV6410_VFLIP;
PDEBUG(D_V4L2, "Set vertical flip to %d", val);
err = stv06xx_write_sensor(sd, VV6410_DATAFORMAT, i2c_data);
return (err < 0) ? err : 0;
}
static int vv6410_get_analog_gain(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[GAIN_IDX];
PDEBUG(D_V4L2, "Read analog gain %d", *val);
return 0;
}
static int vv6410_set_analog_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
sensor_settings[GAIN_IDX] = val;
PDEBUG(D_V4L2, "Set analog gain to %d", val);
err = stv06xx_write_sensor(sd, VV6410_ANALOGGAIN, 0xf0 | (val & 0xf));
return (err < 0) ? err : 0;
}
static int vv6410_get_exposure(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
*val = sensor_settings[EXPOSURE_IDX];
PDEBUG(D_V4L2, "Read exposure %d", *val);
return 0;
}
static int vv6410_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
s32 *sensor_settings = sd->sensor_priv;
unsigned int fine, coarse;
sensor_settings[EXPOSURE_IDX] = val;
val = (val * val >> 14) + val / 4;
fine = val % VV6410_CIF_LINELENGTH;
coarse = min(512, val / VV6410_CIF_LINELENGTH);
PDEBUG(D_V4L2, "Set coarse exposure to %d, fine expsure to %d",
coarse, fine);
err = stv06xx_write_sensor(sd, VV6410_FINEH, fine >> 8);
if (err < 0)
goto out;
err = stv06xx_write_sensor(sd, VV6410_FINEL, fine & 0xff);
if (err < 0)
goto out;
err = stv06xx_write_sensor(sd, VV6410_COARSEH, coarse >> 8);
if (err < 0)
goto out;
err = stv06xx_write_sensor(sd, VV6410_COARSEL, coarse & 0xff);
out:
return err;
}
| gpl-2.0 |
Silverblade-nz/Alpha15Copy | lib/show_mem.c | 3389 | 1428 | /*
* Generic show_mem() implementation
*
* Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de>
* All code subject to the GPL version 2.
*/
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/quicklist.h>
void show_mem(unsigned int filter)
{
pg_data_t *pgdat;
unsigned long total = 0, reserved = 0, shared = 0,
nonshared = 0, highmem = 0;
printk("Mem-Info:\n");
show_free_areas(filter);
if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
return;
for_each_online_pgdat(pgdat) {
unsigned long i, flags;
pgdat_resize_lock(pgdat, &flags);
for (i = 0; i < pgdat->node_spanned_pages; i++) {
struct page *page;
unsigned long pfn = pgdat->node_start_pfn + i;
if (unlikely(!(i % MAX_ORDER_NR_PAGES)))
touch_nmi_watchdog();
if (!pfn_valid(pfn))
continue;
page = pfn_to_page(pfn);
if (PageHighMem(page))
highmem++;
if (PageReserved(page))
reserved++;
else if (page_count(page) == 1)
nonshared++;
else if (page_count(page) > 1)
shared += page_count(page) - 1;
total++;
}
pgdat_resize_unlock(pgdat, &flags);
}
printk("%lu pages RAM\n", total);
#ifdef CONFIG_HIGHMEM
printk("%lu pages HighMem\n", highmem);
#endif
printk("%lu pages reserved\n", reserved);
printk("%lu pages shared\n", shared);
printk("%lu pages non-shared\n", nonshared);
#ifdef CONFIG_QUICKLIST
printk("%lu pages in pagetable cache\n",
quicklist_total_size());
#endif
}
| gpl-2.0 |
SolidRun/linux-imx6 | drivers/media/video/saa7127.c | 3389 | 27124 | /*
* saa7127 - Philips SAA7127/SAA7129 video encoder driver
*
* Copyright (C) 2003 Roy Bulter <rbulter@hetnet.nl>
*
* Based on SAA7126 video encoder driver by Gillem & Andreas Oberritter
*
* Copyright (C) 2000-2001 Gillem <htoa@gmx.net>
* Copyright (C) 2002 Andreas Oberritter <obi@saftware.de>
*
* Based on Stadis 4:2:2 MPEG-2 Decoder Driver by Nathan Laredo
*
* Copyright (C) 1999 Nathan Laredo <laredo@gnu.org>
*
* This driver is designed for the Hauppauge 250/350 Linux driver
* from the ivtv Project
*
* Copyright (C) 2003 Kevin Thayer <nufan_wfk@yahoo.com>
*
* Dual output support:
* Copyright (C) 2004 Eric Varsanyi
*
* NTSC Tuning and 7.5 IRE Setup
* Copyright (C) 2004 Chris Kennedy <c@groovy.org>
*
* VBI additions & cleanup:
* Copyright (C) 2004, 2005 Hans Verkuil <hverkuil@xs4all.nl>
*
* Note: the saa7126 is identical to the saa7127, and the saa7128 is
* identical to the saa7129, except that the saa7126 and saa7128 have
* macrovision anti-taping support. This driver will almost certainly
* work fine for those chips, except of course for the missing anti-taping
* support.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/saa7127.h>
static int debug;
static int test_image;
MODULE_DESCRIPTION("Philips SAA7127/9 video encoder driver");
MODULE_AUTHOR("Kevin Thayer, Chris Kennedy, Hans Verkuil");
MODULE_LICENSE("GPL");
module_param(debug, int, 0644);
module_param(test_image, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_PARM_DESC(test_image, "test_image (0-1)");
/*
* SAA7127 registers
*/
#define SAA7127_REG_STATUS 0x00
#define SAA7127_REG_WIDESCREEN_CONFIG 0x26
#define SAA7127_REG_WIDESCREEN_ENABLE 0x27
#define SAA7127_REG_BURST_START 0x28
#define SAA7127_REG_BURST_END 0x29
#define SAA7127_REG_COPYGEN_0 0x2a
#define SAA7127_REG_COPYGEN_1 0x2b
#define SAA7127_REG_COPYGEN_2 0x2c
#define SAA7127_REG_OUTPUT_PORT_CONTROL 0x2d
#define SAA7127_REG_GAIN_LUMINANCE_RGB 0x38
#define SAA7127_REG_GAIN_COLORDIFF_RGB 0x39
#define SAA7127_REG_INPUT_PORT_CONTROL_1 0x3a
#define SAA7129_REG_FADE_KEY_COL2 0x4f
#define SAA7127_REG_CHROMA_PHASE 0x5a
#define SAA7127_REG_GAINU 0x5b
#define SAA7127_REG_GAINV 0x5c
#define SAA7127_REG_BLACK_LEVEL 0x5d
#define SAA7127_REG_BLANKING_LEVEL 0x5e
#define SAA7127_REG_VBI_BLANKING 0x5f
#define SAA7127_REG_DAC_CONTROL 0x61
#define SAA7127_REG_BURST_AMP 0x62
#define SAA7127_REG_SUBC3 0x63
#define SAA7127_REG_SUBC2 0x64
#define SAA7127_REG_SUBC1 0x65
#define SAA7127_REG_SUBC0 0x66
#define SAA7127_REG_LINE_21_ODD_0 0x67
#define SAA7127_REG_LINE_21_ODD_1 0x68
#define SAA7127_REG_LINE_21_EVEN_0 0x69
#define SAA7127_REG_LINE_21_EVEN_1 0x6a
#define SAA7127_REG_RCV_PORT_CONTROL 0x6b
#define SAA7127_REG_VTRIG 0x6c
#define SAA7127_REG_HTRIG_HI 0x6d
#define SAA7127_REG_MULTI 0x6e
#define SAA7127_REG_CLOSED_CAPTION 0x6f
#define SAA7127_REG_RCV2_OUTPUT_START 0x70
#define SAA7127_REG_RCV2_OUTPUT_END 0x71
#define SAA7127_REG_RCV2_OUTPUT_MSBS 0x72
#define SAA7127_REG_TTX_REQUEST_H_START 0x73
#define SAA7127_REG_TTX_REQUEST_H_DELAY_LENGTH 0x74
#define SAA7127_REG_CSYNC_ADVANCE_VSYNC_SHIFT 0x75
#define SAA7127_REG_TTX_ODD_REQ_VERT_START 0x76
#define SAA7127_REG_TTX_ODD_REQ_VERT_END 0x77
#define SAA7127_REG_TTX_EVEN_REQ_VERT_START 0x78
#define SAA7127_REG_TTX_EVEN_REQ_VERT_END 0x79
#define SAA7127_REG_FIRST_ACTIVE 0x7a
#define SAA7127_REG_LAST_ACTIVE 0x7b
#define SAA7127_REG_MSB_VERTICAL 0x7c
#define SAA7127_REG_DISABLE_TTX_LINE_LO_0 0x7e
#define SAA7127_REG_DISABLE_TTX_LINE_LO_1 0x7f
/*
**********************************************************************
*
* Arrays with configuration parameters for the SAA7127
*
**********************************************************************
*/
struct i2c_reg_value {
unsigned char reg;
unsigned char value;
};
static const struct i2c_reg_value saa7129_init_config_extra[] = {
{ SAA7127_REG_OUTPUT_PORT_CONTROL, 0x38 },
{ SAA7127_REG_VTRIG, 0xfa },
{ 0, 0 }
};
static const struct i2c_reg_value saa7127_init_config_common[] = {
{ SAA7127_REG_WIDESCREEN_CONFIG, 0x0d },
{ SAA7127_REG_WIDESCREEN_ENABLE, 0x00 },
{ SAA7127_REG_COPYGEN_0, 0x77 },
{ SAA7127_REG_COPYGEN_1, 0x41 },
{ SAA7127_REG_COPYGEN_2, 0x00 }, /* Macrovision enable/disable */
{ SAA7127_REG_OUTPUT_PORT_CONTROL, 0xbf },
{ SAA7127_REG_GAIN_LUMINANCE_RGB, 0x00 },
{ SAA7127_REG_GAIN_COLORDIFF_RGB, 0x00 },
{ SAA7127_REG_INPUT_PORT_CONTROL_1, 0x80 }, /* for color bars */
{ SAA7127_REG_LINE_21_ODD_0, 0x77 },
{ SAA7127_REG_LINE_21_ODD_1, 0x41 },
{ SAA7127_REG_LINE_21_EVEN_0, 0x88 },
{ SAA7127_REG_LINE_21_EVEN_1, 0x41 },
{ SAA7127_REG_RCV_PORT_CONTROL, 0x12 },
{ SAA7127_REG_VTRIG, 0xf9 },
{ SAA7127_REG_HTRIG_HI, 0x00 },
{ SAA7127_REG_RCV2_OUTPUT_START, 0x41 },
{ SAA7127_REG_RCV2_OUTPUT_END, 0xc3 },
{ SAA7127_REG_RCV2_OUTPUT_MSBS, 0x00 },
{ SAA7127_REG_TTX_REQUEST_H_START, 0x3e },
{ SAA7127_REG_TTX_REQUEST_H_DELAY_LENGTH, 0xb8 },
{ SAA7127_REG_CSYNC_ADVANCE_VSYNC_SHIFT, 0x03 },
{ SAA7127_REG_TTX_ODD_REQ_VERT_START, 0x15 },
{ SAA7127_REG_TTX_ODD_REQ_VERT_END, 0x16 },
{ SAA7127_REG_TTX_EVEN_REQ_VERT_START, 0x15 },
{ SAA7127_REG_TTX_EVEN_REQ_VERT_END, 0x16 },
{ SAA7127_REG_FIRST_ACTIVE, 0x1a },
{ SAA7127_REG_LAST_ACTIVE, 0x01 },
{ SAA7127_REG_MSB_VERTICAL, 0xc0 },
{ SAA7127_REG_DISABLE_TTX_LINE_LO_0, 0x00 },
{ SAA7127_REG_DISABLE_TTX_LINE_LO_1, 0x00 },
{ 0, 0 }
};
#define SAA7127_60HZ_DAC_CONTROL 0x15
static const struct i2c_reg_value saa7127_init_config_60hz[] = {
{ SAA7127_REG_BURST_START, 0x19 },
/* BURST_END is also used as a chip ID in saa7127_probe */
{ SAA7127_REG_BURST_END, 0x1d },
{ SAA7127_REG_CHROMA_PHASE, 0xa3 },
{ SAA7127_REG_GAINU, 0x98 },
{ SAA7127_REG_GAINV, 0xd3 },
{ SAA7127_REG_BLACK_LEVEL, 0x39 },
{ SAA7127_REG_BLANKING_LEVEL, 0x2e },
{ SAA7127_REG_VBI_BLANKING, 0x2e },
{ SAA7127_REG_DAC_CONTROL, 0x15 },
{ SAA7127_REG_BURST_AMP, 0x4d },
{ SAA7127_REG_SUBC3, 0x1f },
{ SAA7127_REG_SUBC2, 0x7c },
{ SAA7127_REG_SUBC1, 0xf0 },
{ SAA7127_REG_SUBC0, 0x21 },
{ SAA7127_REG_MULTI, 0x90 },
{ SAA7127_REG_CLOSED_CAPTION, 0x11 },
{ 0, 0 }
};
#define SAA7127_50HZ_PAL_DAC_CONTROL 0x02
static struct i2c_reg_value saa7127_init_config_50hz_pal[] = {
{ SAA7127_REG_BURST_START, 0x21 },
/* BURST_END is also used as a chip ID in saa7127_probe */
{ SAA7127_REG_BURST_END, 0x1d },
{ SAA7127_REG_CHROMA_PHASE, 0x3f },
{ SAA7127_REG_GAINU, 0x7d },
{ SAA7127_REG_GAINV, 0xaf },
{ SAA7127_REG_BLACK_LEVEL, 0x33 },
{ SAA7127_REG_BLANKING_LEVEL, 0x35 },
{ SAA7127_REG_VBI_BLANKING, 0x35 },
{ SAA7127_REG_DAC_CONTROL, 0x02 },
{ SAA7127_REG_BURST_AMP, 0x2f },
{ SAA7127_REG_SUBC3, 0xcb },
{ SAA7127_REG_SUBC2, 0x8a },
{ SAA7127_REG_SUBC1, 0x09 },
{ SAA7127_REG_SUBC0, 0x2a },
{ SAA7127_REG_MULTI, 0xa0 },
{ SAA7127_REG_CLOSED_CAPTION, 0x00 },
{ 0, 0 }
};
#define SAA7127_50HZ_SECAM_DAC_CONTROL 0x08
static struct i2c_reg_value saa7127_init_config_50hz_secam[] = {
{ SAA7127_REG_BURST_START, 0x21 },
/* BURST_END is also used as a chip ID in saa7127_probe */
{ SAA7127_REG_BURST_END, 0x1d },
{ SAA7127_REG_CHROMA_PHASE, 0x3f },
{ SAA7127_REG_GAINU, 0x6a },
{ SAA7127_REG_GAINV, 0x81 },
{ SAA7127_REG_BLACK_LEVEL, 0x33 },
{ SAA7127_REG_BLANKING_LEVEL, 0x35 },
{ SAA7127_REG_VBI_BLANKING, 0x35 },
{ SAA7127_REG_DAC_CONTROL, 0x08 },
{ SAA7127_REG_BURST_AMP, 0x2f },
{ SAA7127_REG_SUBC3, 0xb2 },
{ SAA7127_REG_SUBC2, 0x3b },
{ SAA7127_REG_SUBC1, 0xa3 },
{ SAA7127_REG_SUBC0, 0x28 },
{ SAA7127_REG_MULTI, 0x90 },
{ SAA7127_REG_CLOSED_CAPTION, 0x00 },
{ 0, 0 }
};
/*
**********************************************************************
*
* Encoder Struct, holds the configuration state of the encoder
*
**********************************************************************
*/
struct saa7127_state {
struct v4l2_subdev sd;
v4l2_std_id std;
u32 ident;
enum saa7127_input_type input_type;
enum saa7127_output_type output_type;
int video_enable;
int wss_enable;
u16 wss_mode;
int cc_enable;
u16 cc_data;
int xds_enable;
u16 xds_data;
int vps_enable;
u8 vps_data[5];
u8 reg_2d;
u8 reg_3a;
u8 reg_3a_cb; /* colorbar bit */
u8 reg_61;
};
static inline struct saa7127_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct saa7127_state, sd);
}
static const char * const output_strs[] =
{
"S-Video + Composite",
"Composite",
"S-Video",
"RGB",
"YUV C",
"YUV V"
};
static const char * const wss_strs[] = {
"invalid",
"letterbox 14:9 center",
"letterbox 14:9 top",
"invalid",
"letterbox 16:9 top",
"invalid",
"invalid",
"16:9 full format anamorphic",
"4:3 full format",
"invalid",
"invalid",
"letterbox 16:9 center",
"invalid",
"letterbox >16:9 center",
"14:9 full format center",
"invalid",
};
/* ----------------------------------------------------------------------- */
static int saa7127_read(struct v4l2_subdev *sd, u8 reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_read_byte_data(client, reg);
}
/* ----------------------------------------------------------------------- */
static int saa7127_write(struct v4l2_subdev *sd, u8 reg, u8 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int i;
for (i = 0; i < 3; i++) {
if (i2c_smbus_write_byte_data(client, reg, val) == 0)
return 0;
}
v4l2_err(sd, "I2C Write Problem\n");
return -1;
}
/* ----------------------------------------------------------------------- */
static int saa7127_write_inittab(struct v4l2_subdev *sd,
const struct i2c_reg_value *regs)
{
while (regs->reg != 0) {
saa7127_write(sd, regs->reg, regs->value);
regs++;
}
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_set_vps(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
{
struct saa7127_state *state = to_state(sd);
int enable = (data->line != 0);
if (enable && (data->field != 0 || data->line != 16))
return -EINVAL;
if (state->vps_enable != enable) {
v4l2_dbg(1, debug, sd, "Turn VPS Signal %s\n", enable ? "on" : "off");
saa7127_write(sd, 0x54, enable << 7);
state->vps_enable = enable;
}
if (!enable)
return 0;
state->vps_data[0] = data->data[2];
state->vps_data[1] = data->data[8];
state->vps_data[2] = data->data[9];
state->vps_data[3] = data->data[10];
state->vps_data[4] = data->data[11];
v4l2_dbg(1, debug, sd, "Set VPS data %02x %02x %02x %02x %02x\n",
state->vps_data[0], state->vps_data[1],
state->vps_data[2], state->vps_data[3],
state->vps_data[4]);
saa7127_write(sd, 0x55, state->vps_data[0]);
saa7127_write(sd, 0x56, state->vps_data[1]);
saa7127_write(sd, 0x57, state->vps_data[2]);
saa7127_write(sd, 0x58, state->vps_data[3]);
saa7127_write(sd, 0x59, state->vps_data[4]);
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_set_cc(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
{
struct saa7127_state *state = to_state(sd);
u16 cc = data->data[1] << 8 | data->data[0];
int enable = (data->line != 0);
if (enable && (data->field != 0 || data->line != 21))
return -EINVAL;
if (state->cc_enable != enable) {
v4l2_dbg(1, debug, sd,
"Turn CC %s\n", enable ? "on" : "off");
saa7127_write(sd, SAA7127_REG_CLOSED_CAPTION,
(state->xds_enable << 7) | (enable << 6) | 0x11);
state->cc_enable = enable;
}
if (!enable)
return 0;
v4l2_dbg(2, debug, sd, "CC data: %04x\n", cc);
saa7127_write(sd, SAA7127_REG_LINE_21_ODD_0, cc & 0xff);
saa7127_write(sd, SAA7127_REG_LINE_21_ODD_1, cc >> 8);
state->cc_data = cc;
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_set_xds(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
{
struct saa7127_state *state = to_state(sd);
u16 xds = data->data[1] << 8 | data->data[0];
int enable = (data->line != 0);
if (enable && (data->field != 1 || data->line != 21))
return -EINVAL;
if (state->xds_enable != enable) {
v4l2_dbg(1, debug, sd, "Turn XDS %s\n", enable ? "on" : "off");
saa7127_write(sd, SAA7127_REG_CLOSED_CAPTION,
(enable << 7) | (state->cc_enable << 6) | 0x11);
state->xds_enable = enable;
}
if (!enable)
return 0;
v4l2_dbg(2, debug, sd, "XDS data: %04x\n", xds);
saa7127_write(sd, SAA7127_REG_LINE_21_EVEN_0, xds & 0xff);
saa7127_write(sd, SAA7127_REG_LINE_21_EVEN_1, xds >> 8);
state->xds_data = xds;
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_set_wss(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
{
struct saa7127_state *state = to_state(sd);
int enable = (data->line != 0);
if (enable && (data->field != 0 || data->line != 23))
return -EINVAL;
if (state->wss_enable != enable) {
v4l2_dbg(1, debug, sd, "Turn WSS %s\n", enable ? "on" : "off");
saa7127_write(sd, 0x27, enable << 7);
state->wss_enable = enable;
}
if (!enable)
return 0;
saa7127_write(sd, 0x26, data->data[0]);
saa7127_write(sd, 0x27, 0x80 | (data->data[1] & 0x3f));
v4l2_dbg(1, debug, sd,
"WSS mode: %s\n", wss_strs[data->data[0] & 0xf]);
state->wss_mode = (data->data[1] & 0x3f) << 8 | data->data[0];
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_set_video_enable(struct v4l2_subdev *sd, int enable)
{
struct saa7127_state *state = to_state(sd);
if (enable) {
v4l2_dbg(1, debug, sd, "Enable Video Output\n");
saa7127_write(sd, 0x2d, state->reg_2d);
saa7127_write(sd, 0x61, state->reg_61);
} else {
v4l2_dbg(1, debug, sd, "Disable Video Output\n");
saa7127_write(sd, 0x2d, (state->reg_2d & 0xf0));
saa7127_write(sd, 0x61, (state->reg_61 | 0xc0));
}
state->video_enable = enable;
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_set_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct saa7127_state *state = to_state(sd);
const struct i2c_reg_value *inittab;
if (std & V4L2_STD_525_60) {
v4l2_dbg(1, debug, sd, "Selecting 60 Hz video Standard\n");
inittab = saa7127_init_config_60hz;
state->reg_61 = SAA7127_60HZ_DAC_CONTROL;
} else if (state->ident == V4L2_IDENT_SAA7129 &&
(std & V4L2_STD_SECAM) &&
!(std & (V4L2_STD_625_50 & ~V4L2_STD_SECAM))) {
/* If and only if SECAM, with a SAA712[89] */
v4l2_dbg(1, debug, sd,
"Selecting 50 Hz SECAM video Standard\n");
inittab = saa7127_init_config_50hz_secam;
state->reg_61 = SAA7127_50HZ_SECAM_DAC_CONTROL;
} else {
v4l2_dbg(1, debug, sd, "Selecting 50 Hz PAL video Standard\n");
inittab = saa7127_init_config_50hz_pal;
state->reg_61 = SAA7127_50HZ_PAL_DAC_CONTROL;
}
/* Write Table */
saa7127_write_inittab(sd, inittab);
state->std = std;
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_set_output_type(struct v4l2_subdev *sd, int output)
{
struct saa7127_state *state = to_state(sd);
switch (output) {
case SAA7127_OUTPUT_TYPE_RGB:
state->reg_2d = 0x0f; /* RGB + CVBS (for sync) */
state->reg_3a = 0x13; /* by default switch YUV to RGB-matrix on */
break;
case SAA7127_OUTPUT_TYPE_COMPOSITE:
if (state->ident == V4L2_IDENT_SAA7129)
state->reg_2d = 0x20; /* CVBS only */
else
state->reg_2d = 0x08; /* 00001000 CVBS only, RGB DAC's off (high impedance mode) */
state->reg_3a = 0x13; /* by default switch YUV to RGB-matrix on */
break;
case SAA7127_OUTPUT_TYPE_SVIDEO:
if (state->ident == V4L2_IDENT_SAA7129)
state->reg_2d = 0x18; /* Y + C */
else
state->reg_2d = 0xff; /*11111111 croma -> R, luma -> CVBS + G + B */
state->reg_3a = 0x13; /* by default switch YUV to RGB-matrix on */
break;
case SAA7127_OUTPUT_TYPE_YUV_V:
state->reg_2d = 0x4f; /* reg 2D = 01001111, all DAC's on, RGB + VBS */
state->reg_3a = 0x0b; /* reg 3A = 00001011, bypass RGB-matrix */
break;
case SAA7127_OUTPUT_TYPE_YUV_C:
state->reg_2d = 0x0f; /* reg 2D = 00001111, all DAC's on, RGB + CVBS */
state->reg_3a = 0x0b; /* reg 3A = 00001011, bypass RGB-matrix */
break;
case SAA7127_OUTPUT_TYPE_BOTH:
if (state->ident == V4L2_IDENT_SAA7129)
state->reg_2d = 0x38;
else
state->reg_2d = 0xbf;
state->reg_3a = 0x13; /* by default switch YUV to RGB-matrix on */
break;
default:
return -EINVAL;
}
v4l2_dbg(1, debug, sd,
"Selecting %s output type\n", output_strs[output]);
/* Configure Encoder */
saa7127_write(sd, 0x2d, state->reg_2d);
saa7127_write(sd, 0x3a, state->reg_3a | state->reg_3a_cb);
state->output_type = output;
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_set_input_type(struct v4l2_subdev *sd, int input)
{
struct saa7127_state *state = to_state(sd);
switch (input) {
case SAA7127_INPUT_TYPE_NORMAL: /* avia */
v4l2_dbg(1, debug, sd, "Selecting Normal Encoder Input\n");
state->reg_3a_cb = 0;
break;
case SAA7127_INPUT_TYPE_TEST_IMAGE: /* color bar */
v4l2_dbg(1, debug, sd, "Selecting Color Bar generator\n");
state->reg_3a_cb = 0x80;
break;
default:
return -EINVAL;
}
saa7127_write(sd, 0x3a, state->reg_3a | state->reg_3a_cb);
state->input_type = input;
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct saa7127_state *state = to_state(sd);
if (state->std == std)
return 0;
return saa7127_set_std(sd, std);
}
static int saa7127_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct saa7127_state *state = to_state(sd);
int rc = 0;
if (state->input_type != input)
rc = saa7127_set_input_type(sd, input);
if (rc == 0 && state->output_type != output)
rc = saa7127_set_output_type(sd, output);
return rc;
}
static int saa7127_s_stream(struct v4l2_subdev *sd, int enable)
{
struct saa7127_state *state = to_state(sd);
if (state->video_enable == enable)
return 0;
return saa7127_set_video_enable(sd, enable);
}
static int saa7127_g_sliced_fmt(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_format *fmt)
{
struct saa7127_state *state = to_state(sd);
memset(fmt, 0, sizeof(*fmt));
if (state->vps_enable)
fmt->service_lines[0][16] = V4L2_SLICED_VPS;
if (state->wss_enable)
fmt->service_lines[0][23] = V4L2_SLICED_WSS_625;
if (state->cc_enable) {
fmt->service_lines[0][21] = V4L2_SLICED_CAPTION_525;
fmt->service_lines[1][21] = V4L2_SLICED_CAPTION_525;
}
fmt->service_set =
(state->vps_enable ? V4L2_SLICED_VPS : 0) |
(state->wss_enable ? V4L2_SLICED_WSS_625 : 0) |
(state->cc_enable ? V4L2_SLICED_CAPTION_525 : 0);
return 0;
}
static int saa7127_s_vbi_data(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *data)
{
switch (data->id) {
case V4L2_SLICED_WSS_625:
return saa7127_set_wss(sd, data);
case V4L2_SLICED_VPS:
return saa7127_set_vps(sd, data);
case V4L2_SLICED_CAPTION_525:
if (data->field == 0)
return saa7127_set_cc(sd, data);
return saa7127_set_xds(sd, data);
default:
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int saa7127_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, ®->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
reg->val = saa7127_read(sd, reg->reg & 0xff);
reg->size = 1;
return 0;
}
static int saa7127_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, ®->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
saa7127_write(sd, reg->reg & 0xff, reg->val & 0xff);
return 0;
}
#endif
static int saa7127_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct saa7127_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, state->ident, 0);
}
static int saa7127_log_status(struct v4l2_subdev *sd)
{
struct saa7127_state *state = to_state(sd);
v4l2_info(sd, "Standard: %s\n", (state->std & V4L2_STD_525_60) ? "60 Hz" : "50 Hz");
v4l2_info(sd, "Input: %s\n", state->input_type ? "color bars" : "normal");
v4l2_info(sd, "Output: %s\n", state->video_enable ?
output_strs[state->output_type] : "disabled");
v4l2_info(sd, "WSS: %s\n", state->wss_enable ?
wss_strs[state->wss_mode] : "disabled");
v4l2_info(sd, "VPS: %s\n", state->vps_enable ? "enabled" : "disabled");
v4l2_info(sd, "CC: %s\n", state->cc_enable ? "enabled" : "disabled");
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops saa7127_core_ops = {
.log_status = saa7127_log_status,
.g_chip_ident = saa7127_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = saa7127_g_register,
.s_register = saa7127_s_register,
#endif
};
static const struct v4l2_subdev_video_ops saa7127_video_ops = {
.s_std_output = saa7127_s_std_output,
.s_routing = saa7127_s_routing,
.s_stream = saa7127_s_stream,
};
static const struct v4l2_subdev_vbi_ops saa7127_vbi_ops = {
.s_vbi_data = saa7127_s_vbi_data,
.g_sliced_fmt = saa7127_g_sliced_fmt,
};
static const struct v4l2_subdev_ops saa7127_ops = {
.core = &saa7127_core_ops,
.video = &saa7127_video_ops,
.vbi = &saa7127_vbi_ops,
};
/* ----------------------------------------------------------------------- */
static int saa7127_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct saa7127_state *state;
struct v4l2_subdev *sd;
struct v4l2_sliced_vbi_data vbi = { 0, 0, 0, 0 }; /* set to disabled */
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
v4l_dbg(1, debug, client, "detecting saa7127 client on address 0x%x\n",
client->addr << 1);
state = kzalloc(sizeof(struct saa7127_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &saa7127_ops);
/* First test register 0: Bits 5-7 are a version ID (should be 0),
and bit 2 should also be 0.
This is rather general, so the second test is more specific and
looks at the 'ending point of burst in clock cycles' which is
0x1d after a reset and not expected to ever change. */
if ((saa7127_read(sd, 0) & 0xe4) != 0 ||
(saa7127_read(sd, 0x29) & 0x3f) != 0x1d) {
v4l2_dbg(1, debug, sd, "saa7127 not found\n");
kfree(state);
return -ENODEV;
}
if (id->driver_data) { /* Chip type is already known */
state->ident = id->driver_data;
} else { /* Needs detection */
int read_result;
/* Detect if it's an saa7129 */
read_result = saa7127_read(sd, SAA7129_REG_FADE_KEY_COL2);
saa7127_write(sd, SAA7129_REG_FADE_KEY_COL2, 0xaa);
if (saa7127_read(sd, SAA7129_REG_FADE_KEY_COL2) == 0xaa) {
saa7127_write(sd, SAA7129_REG_FADE_KEY_COL2,
read_result);
state->ident = V4L2_IDENT_SAA7129;
strlcpy(client->name, "saa7129", I2C_NAME_SIZE);
} else {
state->ident = V4L2_IDENT_SAA7127;
strlcpy(client->name, "saa7127", I2C_NAME_SIZE);
}
}
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
v4l2_dbg(1, debug, sd, "Configuring encoder\n");
saa7127_write_inittab(sd, saa7127_init_config_common);
saa7127_set_std(sd, V4L2_STD_NTSC);
saa7127_set_output_type(sd, SAA7127_OUTPUT_TYPE_BOTH);
saa7127_set_vps(sd, &vbi);
saa7127_set_wss(sd, &vbi);
saa7127_set_cc(sd, &vbi);
saa7127_set_xds(sd, &vbi);
if (test_image == 1)
/* The Encoder has an internal Colorbar generator */
/* This can be used for debugging */
saa7127_set_input_type(sd, SAA7127_INPUT_TYPE_TEST_IMAGE);
else
saa7127_set_input_type(sd, SAA7127_INPUT_TYPE_NORMAL);
saa7127_set_video_enable(sd, 1);
if (state->ident == V4L2_IDENT_SAA7129)
saa7127_write_inittab(sd, saa7129_init_config_extra);
return 0;
}
/* ----------------------------------------------------------------------- */
static int saa7127_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
/* Turn off TV output */
saa7127_set_video_enable(sd, 0);
kfree(to_state(sd));
return 0;
}
/* ----------------------------------------------------------------------- */
static struct i2c_device_id saa7127_id[] = {
{ "saa7127_auto", 0 }, /* auto-detection */
{ "saa7126", V4L2_IDENT_SAA7127 },
{ "saa7127", V4L2_IDENT_SAA7127 },
{ "saa7128", V4L2_IDENT_SAA7129 },
{ "saa7129", V4L2_IDENT_SAA7129 },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa7127_id);
static struct i2c_driver saa7127_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "saa7127",
},
.probe = saa7127_probe,
.remove = saa7127_remove,
.id_table = saa7127_id,
};
static __init int init_saa7127(void)
{
return i2c_add_driver(&saa7127_driver);
}
static __exit void exit_saa7127(void)
{
i2c_del_driver(&saa7127_driver);
}
module_init(init_saa7127);
module_exit(exit_saa7127);
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_klimtwifi | arch/mips/kernel/smp-mt.c | 4413 | 7298 | /*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
* Elizabeth Clarke (beth@mips.com)
* Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/smp.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
static void __init smvp_copy_vpe_config(void)
{
write_vpe_c0_status(
(read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
/* set config to be the same as vpe0, particularly kseg0 coherency alg */
write_vpe_c0_config( read_c0_config());
/* make sure there are no software interrupts pending */
write_vpe_c0_cause(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
write_vpe_c0_count(read_c0_count());
}
static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
unsigned int ncpu)
{
if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
return ncpu;
/* Deactivate all but VPE 0 */
if (tc != 0) {
unsigned long tmp = read_vpe_c0_vpeconf0();
tmp &= ~VPECONF0_VPA;
/* master VPE */
tmp |= VPECONF0_MVP;
write_vpe_c0_vpeconf0(tmp);
/* Record this as available CPU */
set_cpu_possible(tc, true);
__cpu_number_map[tc] = ++ncpu;
__cpu_logical_map[ncpu] = tc;
}
/* Disable multi-threading with TC's */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
if (tc != 0)
smvp_copy_vpe_config();
return ncpu;
}
static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
{
unsigned long tmp;
if (!tc)
return;
/* bind a TC to each VPE, May as well put all excess TC's
on the last VPE */
if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
else {
write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);
/* and set XTC */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
}
tmp = read_tc_c0_tcstatus();
/* mark not allocated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
write_tc_c0_tchalt(TCHALT_H);
}
static void vsmp_send_ipi_single(int cpu, unsigned int action)
{
int i;
unsigned long flags;
int vpflags;
local_irq_save(flags);
vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
switch (action) {
case SMP_CALL_FUNCTION:
i = C_SW1;
break;
case SMP_RESCHEDULE_YOURSELF:
default:
i = C_SW0;
break;
}
/* 1:1 mapping of vpe and tc... */
settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
local_irq_restore(flags);
}
static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
vsmp_send_ipi_single(i, action);
}
static void __cpuinit vsmp_init_secondary(void)
{
extern int gic_present;
/* This is Malta specific: IPI,performance and timer interrupts */
if (gic_present)
change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
STATUSF_IP6 | STATUSF_IP7);
else
change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
STATUSF_IP6 | STATUSF_IP7);
}
static void __cpuinit vsmp_smp_finish(void)
{
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
local_irq_enable();
}
static void vsmp_cpus_done(void)
{
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE
*/
static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
settc(cpu);
/* restart */
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
/* enable the tc this vpe/cpu will be running */
write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
write_tc_c0_tchalt(0);
/* enable the VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
/* stack pointer */
write_tc_gpr_sp( __KSTK_TOS(idle));
/* global pointer */
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
evpe(EVPE_ENABLE);
}
/*
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
* secondaries
*/
static void __init vsmp_smp_setup(void)
{
unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe;
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (!cpu_has_mipsmt)
return;
/* disable MT so we can configure */
dvpe();
dmt();
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
mvpconf0 = read_c0_mvpconf0();
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
smp_num_siblings = nvpe;
/* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
smvp_tc_init(tc, mvpconf0);
ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
}
/* Release config state */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
/* We'll wait until starting the secondaries before starting MVPE */
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
}
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
mips_mt_set_cpuoptions();
}
struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_single = vsmp_send_ipi_single,
.send_ipi_mask = vsmp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.cpus_done = vsmp_cpus_done,
.boot_secondary = vsmp_boot_secondary,
.smp_setup = vsmp_smp_setup,
.prepare_cpus = vsmp_prepare_cpus,
};
| gpl-2.0 |
syhost/android_kernel_pantech_ef50l | net/ipv4/netfilter/nf_nat_proto_dccp.c | 4669 | 2832 | /*
* DCCP NAT protocol helper
*
* Copyright (c) 2005, 2006. 2008 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/dccp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_protocol.h>
static u_int16_t dccp_port_rover;
static void
dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_ipv4_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&dccp_port_rover);
}
static bool
dccp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
const struct iphdr *iph = (const void *)(skb->data + iphdroff);
struct dccp_hdr *hdr;
unsigned int hdroff = iphdroff + iph->ihl * 4;
__be32 oldip, newip;
__be16 *portptr, oldport, newport;
int hdrsize = 8; /* DCCP connection tracking guarantees this much */
if (skb->len >= hdroff + sizeof(struct dccp_hdr))
hdrsize = sizeof(struct dccp_hdr);
if (!skb_make_writable(skb, hdroff + hdrsize))
return false;
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct dccp_hdr *)(skb->data + hdroff);
if (maniptype == NF_NAT_MANIP_SRC) {
oldip = iph->saddr;
newip = tuple->src.u3.ip;
newport = tuple->src.u.dccp.port;
portptr = &hdr->dccph_sport;
} else {
oldip = iph->daddr;
newip = tuple->dst.u3.ip;
newport = tuple->dst.u.dccp.port;
portptr = &hdr->dccph_dport;
}
oldport = *portptr;
*portptr = newport;
if (hdrsize < sizeof(*hdr))
return true;
inet_proto_csum_replace4(&hdr->dccph_checksum, skb, oldip, newip, 1);
inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
0);
return true;
}
static const struct nf_nat_protocol nf_nat_protocol_dccp = {
.protonum = IPPROTO_DCCP,
.manip_pkt = dccp_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = dccp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
static int __init nf_nat_proto_dccp_init(void)
{
return nf_nat_protocol_register(&nf_nat_protocol_dccp);
}
static void __exit nf_nat_proto_dccp_fini(void)
{
nf_nat_protocol_unregister(&nf_nat_protocol_dccp);
}
module_init(nf_nat_proto_dccp_init);
module_exit(nf_nat_proto_dccp_fini);
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_DESCRIPTION("DCCP NAT protocol helper");
MODULE_LICENSE("GPL");
| gpl-2.0 |
GustavoRD78/78Kernel-ZL-569 | drivers/gpu/drm/radeon/radeon_clocks.c | 4925 | 26833 | /*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
/* 10 khz */
uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
{
struct radeon_pll *spll = &rdev->clock.spll;
uint32_t fb_div, ref_div, post_div, sclk;
fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
fb_div = (fb_div >> RADEON_SPLL_FB_DIV_SHIFT) & RADEON_SPLL_FB_DIV_MASK;
fb_div <<= 1;
fb_div *= spll->reference_freq;
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
if (ref_div == 0)
return 0;
sclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
if (post_div == 2)
sclk >>= 1;
else if (post_div == 3)
sclk >>= 2;
else if (post_div == 4)
sclk >>= 3;
return sclk;
}
/* 10 khz */
uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
{
struct radeon_pll *mpll = &rdev->clock.mpll;
uint32_t fb_div, ref_div, post_div, mclk;
fb_div = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
fb_div = (fb_div >> RADEON_MPLL_FB_DIV_SHIFT) & RADEON_MPLL_FB_DIV_MASK;
fb_div <<= 1;
fb_div *= mpll->reference_freq;
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
if (ref_div == 0)
return 0;
mclk = fb_div / ref_div;
post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
if (post_div == 2)
mclk >>= 1;
else if (post_div == 3)
mclk >>= 2;
else if (post_div == 4)
mclk >>= 3;
return mclk;
}
#ifdef CONFIG_OF
/*
* Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
* tree. Hopefully, ATI OF driver is kind enough to fill these
*/
static bool radeon_read_clocks_OF(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct device_node *dp = rdev->pdev->dev.of_node;
const u32 *val;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
if (dp == NULL)
return false;
val = of_get_property(dp, "ATY,RefCLK", NULL);
if (!val || !*val) {
printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
return false;
}
p1pll->reference_freq = p2pll->reference_freq = (*val) / 10;
p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
if (p1pll->reference_div < 2)
p1pll->reference_div = 12;
p2pll->reference_div = p1pll->reference_div;
/* These aren't in the device-tree */
if (rdev->family >= CHIP_R420) {
p1pll->pll_in_min = 100;
p1pll->pll_in_max = 1350;
p1pll->pll_out_min = 20000;
p1pll->pll_out_max = 50000;
p2pll->pll_in_min = 100;
p2pll->pll_in_max = 1350;
p2pll->pll_out_min = 20000;
p2pll->pll_out_max = 50000;
} else {
p1pll->pll_in_min = 40;
p1pll->pll_in_max = 500;
p1pll->pll_out_min = 12500;
p1pll->pll_out_max = 35000;
p2pll->pll_in_min = 40;
p2pll->pll_in_max = 500;
p2pll->pll_out_min = 12500;
p2pll->pll_out_max = 35000;
}
/* not sure what the max should be in all cases */
rdev->clock.max_pixel_clock = 35000;
spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
spll->reference_div = mpll->reference_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
RADEON_M_SPLL_REF_DIV_MASK;
val = of_get_property(dp, "ATY,SCLK", NULL);
if (val && *val)
rdev->clock.default_sclk = (*val) / 10;
else
rdev->clock.default_sclk =
radeon_legacy_get_engine_clock(rdev);
val = of_get_property(dp, "ATY,MCLK", NULL);
if (val && *val)
rdev->clock.default_mclk = (*val) / 10;
else
rdev->clock.default_mclk =
radeon_legacy_get_memory_clock(rdev);
DRM_INFO("Using device-tree clock info\n");
return true;
}
#else
static bool radeon_read_clocks_OF(struct drm_device *dev)
{
return false;
}
#endif /* CONFIG_OF */
void radeon_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
struct radeon_pll *dcpll = &rdev->clock.dcpll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
int ret;
if (rdev->is_atom_bios)
ret = radeon_atom_get_clock_info(dev);
else
ret = radeon_combios_get_clock_info(dev);
if (!ret)
ret = radeon_read_clocks_OF(dev);
if (ret) {
if (p1pll->reference_div < 2) {
if (!ASIC_IS_AVIVO(rdev)) {
u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
if (ASIC_IS_R300(rdev))
p1pll->reference_div =
(tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
else
p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
if (p1pll->reference_div < 2)
p1pll->reference_div = 12;
} else
p1pll->reference_div = 12;
}
if (p2pll->reference_div < 2)
p2pll->reference_div = 12;
if (rdev->family < CHIP_RS600) {
if (spll->reference_div < 2)
spll->reference_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
RADEON_M_SPLL_REF_DIV_MASK;
}
if (mpll->reference_div < 2)
mpll->reference_div = spll->reference_div;
} else {
if (ASIC_IS_AVIVO(rdev)) {
/* TODO FALLBACK */
} else {
DRM_INFO("Using generic clock info\n");
/* may need to be per card */
rdev->clock.max_pixel_clock = 35000;
if (rdev->flags & RADEON_IS_IGP) {
p1pll->reference_freq = 1432;
p2pll->reference_freq = 1432;
spll->reference_freq = 1432;
mpll->reference_freq = 1432;
} else {
p1pll->reference_freq = 2700;
p2pll->reference_freq = 2700;
spll->reference_freq = 2700;
mpll->reference_freq = 2700;
}
p1pll->reference_div =
RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
if (p1pll->reference_div < 2)
p1pll->reference_div = 12;
p2pll->reference_div = p1pll->reference_div;
if (rdev->family >= CHIP_R420) {
p1pll->pll_in_min = 100;
p1pll->pll_in_max = 1350;
p1pll->pll_out_min = 20000;
p1pll->pll_out_max = 50000;
p2pll->pll_in_min = 100;
p2pll->pll_in_max = 1350;
p2pll->pll_out_min = 20000;
p2pll->pll_out_max = 50000;
} else {
p1pll->pll_in_min = 40;
p1pll->pll_in_max = 500;
p1pll->pll_out_min = 12500;
p1pll->pll_out_max = 35000;
p2pll->pll_in_min = 40;
p2pll->pll_in_max = 500;
p2pll->pll_out_min = 12500;
p2pll->pll_out_max = 35000;
}
spll->reference_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
RADEON_M_SPLL_REF_DIV_MASK;
mpll->reference_div = spll->reference_div;
rdev->clock.default_sclk =
radeon_legacy_get_engine_clock(rdev);
rdev->clock.default_mclk =
radeon_legacy_get_memory_clock(rdev);
}
}
/* pixel clocks */
if (ASIC_IS_AVIVO(rdev)) {
p1pll->min_post_div = 2;
p1pll->max_post_div = 0x7f;
p1pll->min_frac_feedback_div = 0;
p1pll->max_frac_feedback_div = 9;
p2pll->min_post_div = 2;
p2pll->max_post_div = 0x7f;
p2pll->min_frac_feedback_div = 0;
p2pll->max_frac_feedback_div = 9;
} else {
p1pll->min_post_div = 1;
p1pll->max_post_div = 16;
p1pll->min_frac_feedback_div = 0;
p1pll->max_frac_feedback_div = 0;
p2pll->min_post_div = 1;
p2pll->max_post_div = 12;
p2pll->min_frac_feedback_div = 0;
p2pll->max_frac_feedback_div = 0;
}
/* dcpll is DCE4 only */
dcpll->min_post_div = 2;
dcpll->max_post_div = 0x7f;
dcpll->min_frac_feedback_div = 0;
dcpll->max_frac_feedback_div = 9;
dcpll->min_ref_div = 2;
dcpll->max_ref_div = 0x3ff;
dcpll->min_feedback_div = 4;
dcpll->max_feedback_div = 0xfff;
dcpll->best_vco = 0;
p1pll->min_ref_div = 2;
p1pll->max_ref_div = 0x3ff;
p1pll->min_feedback_div = 4;
p1pll->max_feedback_div = 0x7ff;
p1pll->best_vco = 0;
p2pll->min_ref_div = 2;
p2pll->max_ref_div = 0x3ff;
p2pll->min_feedback_div = 4;
p2pll->max_feedback_div = 0x7ff;
p2pll->best_vco = 0;
/* system clock */
spll->min_post_div = 1;
spll->max_post_div = 1;
spll->min_ref_div = 2;
spll->max_ref_div = 0xff;
spll->min_feedback_div = 4;
spll->max_feedback_div = 0xff;
spll->best_vco = 0;
/* memory clock */
mpll->min_post_div = 1;
mpll->max_post_div = 1;
mpll->min_ref_div = 2;
mpll->max_ref_div = 0xff;
mpll->min_feedback_div = 4;
mpll->max_feedback_div = 0xff;
mpll->best_vco = 0;
if (!rdev->clock.default_sclk)
rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
}
/* 10 khz */
static uint32_t calc_eng_mem_clock(struct radeon_device *rdev,
uint32_t req_clock,
int *fb_div, int *post_div)
{
struct radeon_pll *spll = &rdev->clock.spll;
int ref_div = spll->reference_div;
if (!ref_div)
ref_div =
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
RADEON_M_SPLL_REF_DIV_MASK;
if (req_clock < 15000) {
*post_div = 8;
req_clock *= 8;
} else if (req_clock < 30000) {
*post_div = 4;
req_clock *= 4;
} else if (req_clock < 60000) {
*post_div = 2;
req_clock *= 2;
} else
*post_div = 1;
req_clock *= ref_div;
req_clock += spll->reference_freq;
req_clock /= (2 * spll->reference_freq);
*fb_div = req_clock & 0xff;
req_clock = (req_clock & 0xffff) << 1;
req_clock *= spll->reference_freq;
req_clock /= ref_div;
req_clock /= *post_div;
return req_clock;
}
/* 10 khz */
void radeon_legacy_set_engine_clock(struct radeon_device *rdev,
uint32_t eng_clock)
{
uint32_t tmp;
int fb_div, post_div;
/* XXX: wait for idle */
eng_clock = calc_eng_mem_clock(rdev, eng_clock, &fb_div, &post_div);
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
tmp &= ~RADEON_DONT_USE_XTALIN;
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
udelay(10);
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
tmp |= RADEON_SPLL_SLEEP;
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
udelay(2);
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
tmp |= RADEON_SPLL_RESET;
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
udelay(200);
tmp = RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV);
tmp &= ~(RADEON_SPLL_FB_DIV_MASK << RADEON_SPLL_FB_DIV_SHIFT);
tmp |= (fb_div & RADEON_SPLL_FB_DIV_MASK) << RADEON_SPLL_FB_DIV_SHIFT;
WREG32_PLL(RADEON_M_SPLL_REF_FB_DIV, tmp);
/* XXX: verify on different asics */
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
tmp &= ~RADEON_SPLL_PVG_MASK;
if ((eng_clock * post_div) >= 90000)
tmp |= (0x7 << RADEON_SPLL_PVG_SHIFT);
else
tmp |= (0x4 << RADEON_SPLL_PVG_SHIFT);
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
tmp &= ~RADEON_SPLL_SLEEP;
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
udelay(2);
tmp = RREG32_PLL(RADEON_SPLL_CNTL);
tmp &= ~RADEON_SPLL_RESET;
WREG32_PLL(RADEON_SPLL_CNTL, tmp);
udelay(200);
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp &= ~RADEON_SCLK_SRC_SEL_MASK;
switch (post_div) {
case 1:
default:
tmp |= 1;
break;
case 2:
tmp |= 2;
break;
case 4:
tmp |= 3;
break;
case 8:
tmp |= 4;
break;
}
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
udelay(20);
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
tmp |= RADEON_DONT_USE_XTALIN;
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
udelay(10);
}
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
{
uint32_t tmp;
if (enable) {
if (rdev->flags & RADEON_SINGLE_CRTC) {
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
if ((RREG32(RADEON_CONFIG_CNTL) &
RADEON_CFG_ATI_REV_ID_MASK) >
RADEON_CFG_ATI_REV_A13) {
tmp &=
~(RADEON_SCLK_FORCE_CP |
RADEON_SCLK_FORCE_RB);
}
tmp &=
~(RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1 |
RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_SE |
RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE |
RADEON_SCLK_FORCE_PB | RADEON_SCLK_FORCE_TAM |
RADEON_SCLK_FORCE_TDM);
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
} else if (ASIC_IS_R300(rdev)) {
if ((rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480)) {
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp &=
~(RADEON_SCLK_FORCE_DISP2 |
RADEON_SCLK_FORCE_CP |
RADEON_SCLK_FORCE_HDP |
RADEON_SCLK_FORCE_DISP1 |
RADEON_SCLK_FORCE_TOP |
RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
| RADEON_SCLK_FORCE_IDCT |
RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
| R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
| R300_SCLK_FORCE_US |
RADEON_SCLK_FORCE_TV_SCLK |
R300_SCLK_FORCE_SU |
RADEON_SCLK_FORCE_OV0);
tmp |= RADEON_DYN_STOP_LAT_MASK;
tmp |=
RADEON_SCLK_FORCE_TOP |
RADEON_SCLK_FORCE_VIP;
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp &= ~RADEON_SCLK_MORE_FORCEON;
tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
RADEON_PIXCLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
R300_DVOCLK_ALWAYS_ONb |
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
RADEON_PIXCLK_GV_ALWAYS_ONb |
R300_PIXCLK_DVO_ALWAYS_ONb |
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
RADEON_PIXCLK_TMDS_ALWAYS_ONb |
R300_PIXCLK_TRANS_ALWAYS_ONb |
R300_PIXCLK_TVO_ALWAYS_ONb |
R300_P2G2CLK_ALWAYS_ONb |
R300_P2G2CLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
} else if (rdev->family >= CHIP_RV350) {
tmp = RREG32_PLL(R300_SCLK_CNTL2);
tmp &= ~(R300_SCLK_FORCE_TCL |
R300_SCLK_FORCE_GA |
R300_SCLK_FORCE_CBA);
tmp |= (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
R300_SCLK_GA_MAX_DYN_STOP_LAT |
R300_SCLK_CBA_MAX_DYN_STOP_LAT);
WREG32_PLL(R300_SCLK_CNTL2, tmp);
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp &=
~(RADEON_SCLK_FORCE_DISP2 |
RADEON_SCLK_FORCE_CP |
RADEON_SCLK_FORCE_HDP |
RADEON_SCLK_FORCE_DISP1 |
RADEON_SCLK_FORCE_TOP |
RADEON_SCLK_FORCE_E2 | R300_SCLK_FORCE_VAP
| RADEON_SCLK_FORCE_IDCT |
RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR
| R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX
| R300_SCLK_FORCE_US |
RADEON_SCLK_FORCE_TV_SCLK |
R300_SCLK_FORCE_SU |
RADEON_SCLK_FORCE_OV0);
tmp |= RADEON_DYN_STOP_LAT_MASK;
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp &= ~RADEON_SCLK_MORE_FORCEON;
tmp |= RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
RADEON_PIXCLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
R300_DVOCLK_ALWAYS_ONb |
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
RADEON_PIXCLK_GV_ALWAYS_ONb |
R300_PIXCLK_DVO_ALWAYS_ONb |
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
RADEON_PIXCLK_TMDS_ALWAYS_ONb |
R300_PIXCLK_TRANS_ALWAYS_ONb |
R300_PIXCLK_TVO_ALWAYS_ONb |
R300_P2G2CLK_ALWAYS_ONb |
R300_P2G2CLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
tmp = RREG32_PLL(RADEON_MCLK_MISC);
tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
RADEON_IO_MCLK_DYN_ENABLE);
WREG32_PLL(RADEON_MCLK_MISC, tmp);
tmp = RREG32_PLL(RADEON_MCLK_CNTL);
tmp |= (RADEON_FORCEON_MCLKA |
RADEON_FORCEON_MCLKB);
tmp &= ~(RADEON_FORCEON_YCLKA |
RADEON_FORCEON_YCLKB |
RADEON_FORCEON_MC);
/* Some releases of vbios have set DISABLE_MC_MCLKA
and DISABLE_MC_MCLKB bits in the vbios table. Setting these
bits will cause H/W hang when reading video memory with dynamic clocking
enabled. */
if ((tmp & R300_DISABLE_MC_MCLKA) &&
(tmp & R300_DISABLE_MC_MCLKB)) {
/* If both bits are set, then check the active channels */
tmp = RREG32_PLL(RADEON_MCLK_CNTL);
if (rdev->mc.vram_width == 64) {
if (RREG32(RADEON_MEM_CNTL) &
R300_MEM_USE_CD_CH_ONLY)
tmp &=
~R300_DISABLE_MC_MCLKB;
else
tmp &=
~R300_DISABLE_MC_MCLKA;
} else {
tmp &= ~(R300_DISABLE_MC_MCLKA |
R300_DISABLE_MC_MCLKB);
}
}
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
} else {
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp &= ~(R300_SCLK_FORCE_VAP);
tmp |= RADEON_SCLK_FORCE_CP;
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
mdelay(15);
tmp = RREG32_PLL(R300_SCLK_CNTL2);
tmp &= ~(R300_SCLK_FORCE_TCL |
R300_SCLK_FORCE_GA |
R300_SCLK_FORCE_CBA);
WREG32_PLL(R300_SCLK_CNTL2, tmp);
}
} else {
tmp = RREG32_PLL(RADEON_CLK_PWRMGT_CNTL);
tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK |
RADEON_DISP_DYN_STOP_LAT_MASK |
RADEON_DYN_STOP_MODE_MASK);
tmp |= (RADEON_ENGIN_DYNCLK_MODE |
(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
mdelay(15);
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
tmp |= RADEON_SCLK_DYN_START_CNTL;
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
mdelay(15);
/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
to lockup randomly, leave them as set by BIOS.
*/
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
/*tmp &= RADEON_SCLK_SRC_SEL_MASK; */
tmp &= ~RADEON_SCLK_FORCEON_MASK;
/*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300 */
if (((rdev->family == CHIP_RV250) &&
((RREG32(RADEON_CONFIG_CNTL) &
RADEON_CFG_ATI_REV_ID_MASK) <
RADEON_CFG_ATI_REV_A13))
|| ((rdev->family == CHIP_RV100)
&&
((RREG32(RADEON_CONFIG_CNTL) &
RADEON_CFG_ATI_REV_ID_MASK) <=
RADEON_CFG_ATI_REV_A13))) {
tmp |= RADEON_SCLK_FORCE_CP;
tmp |= RADEON_SCLK_FORCE_VIP;
}
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
if ((rdev->family == CHIP_RV200) ||
(rdev->family == CHIP_RV250) ||
(rdev->family == CHIP_RV280)) {
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp &= ~RADEON_SCLK_MORE_FORCEON;
/* RV200::A11 A12 RV250::A11 A12 */
if (((rdev->family == CHIP_RV200) ||
(rdev->family == CHIP_RV250)) &&
((RREG32(RADEON_CONFIG_CNTL) &
RADEON_CFG_ATI_REV_ID_MASK) <
RADEON_CFG_ATI_REV_A13)) {
tmp |= RADEON_SCLK_MORE_FORCEON;
}
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
mdelay(15);
}
/* RV200::A11 A12, RV250::A11 A12 */
if (((rdev->family == CHIP_RV200) ||
(rdev->family == CHIP_RV250)) &&
((RREG32(RADEON_CONFIG_CNTL) &
RADEON_CFG_ATI_REV_ID_MASK) <
RADEON_CFG_ATI_REV_A13)) {
tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
tmp |= RADEON_TCL_BYPASS_DISABLE;
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
}
mdelay(15);
/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
tmp |= (RADEON_PIX2CLK_ALWAYS_ONb |
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
RADEON_PIXCLK_GV_ALWAYS_ONb |
RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
mdelay(15);
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
RADEON_PIXCLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
mdelay(15);
}
} else {
/* Turn everything OFF (ForceON to everything) */
if (rdev->flags & RADEON_SINGLE_CRTC) {
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_HDP |
RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP
| RADEON_SCLK_FORCE_E2 | RADEON_SCLK_FORCE_SE |
RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
RADEON_SCLK_FORCE_RE | RADEON_SCLK_FORCE_PB |
RADEON_SCLK_FORCE_TAM | RADEON_SCLK_FORCE_TDM |
RADEON_SCLK_FORCE_RB);
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
} else if ((rdev->family == CHIP_RS400) ||
(rdev->family == CHIP_RS480)) {
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp |= RADEON_SCLK_MORE_FORCEON;
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
RADEON_PIXCLK_DAC_ALWAYS_ONb |
R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
R300_DVOCLK_ALWAYS_ONb |
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
RADEON_PIXCLK_GV_ALWAYS_ONb |
R300_PIXCLK_DVO_ALWAYS_ONb |
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
RADEON_PIXCLK_TMDS_ALWAYS_ONb |
R300_PIXCLK_TRANS_ALWAYS_ONb |
R300_PIXCLK_TVO_ALWAYS_ONb |
R300_P2G2CLK_ALWAYS_ONb |
R300_P2G2CLK_DAC_ALWAYS_ONb |
R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
} else if (rdev->family >= CHIP_RV350) {
/* for RV350/M10, no delays are required. */
tmp = RREG32_PLL(R300_SCLK_CNTL2);
tmp |= (R300_SCLK_FORCE_TCL |
R300_SCLK_FORCE_GA | R300_SCLK_FORCE_CBA);
WREG32_PLL(R300_SCLK_CNTL2, tmp);
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP |
RADEON_SCLK_FORCE_HDP | RADEON_SCLK_FORCE_DISP1
| RADEON_SCLK_FORCE_TOP | RADEON_SCLK_FORCE_E2 |
R300_SCLK_FORCE_VAP | RADEON_SCLK_FORCE_IDCT |
RADEON_SCLK_FORCE_VIP | R300_SCLK_FORCE_SR |
R300_SCLK_FORCE_PX | R300_SCLK_FORCE_TX |
R300_SCLK_FORCE_US | RADEON_SCLK_FORCE_TV_SCLK |
R300_SCLK_FORCE_SU | RADEON_SCLK_FORCE_OV0);
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp |= RADEON_SCLK_MORE_FORCEON;
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
tmp = RREG32_PLL(RADEON_MCLK_CNTL);
tmp |= (RADEON_FORCEON_MCLKA |
RADEON_FORCEON_MCLKB |
RADEON_FORCEON_YCLKA |
RADEON_FORCEON_YCLKB | RADEON_FORCEON_MC);
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
RADEON_PIXCLK_DAC_ALWAYS_ONb |
R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
R300_DVOCLK_ALWAYS_ONb |
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
RADEON_PIXCLK_GV_ALWAYS_ONb |
R300_PIXCLK_DVO_ALWAYS_ONb |
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
RADEON_PIXCLK_TMDS_ALWAYS_ONb |
R300_PIXCLK_TRANS_ALWAYS_ONb |
R300_PIXCLK_TVO_ALWAYS_ONb |
R300_P2G2CLK_ALWAYS_ONb |
R300_P2G2CLK_DAC_ALWAYS_ONb |
R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
} else {
tmp = RREG32_PLL(RADEON_SCLK_CNTL);
tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
tmp |= RADEON_SCLK_FORCE_SE;
if (rdev->flags & RADEON_SINGLE_CRTC) {
tmp |= (RADEON_SCLK_FORCE_RB |
RADEON_SCLK_FORCE_TDM |
RADEON_SCLK_FORCE_TAM |
RADEON_SCLK_FORCE_PB |
RADEON_SCLK_FORCE_RE |
RADEON_SCLK_FORCE_VIP |
RADEON_SCLK_FORCE_IDCT |
RADEON_SCLK_FORCE_TOP |
RADEON_SCLK_FORCE_DISP1 |
RADEON_SCLK_FORCE_DISP2 |
RADEON_SCLK_FORCE_HDP);
} else if ((rdev->family == CHIP_R300) ||
(rdev->family == CHIP_R350)) {
tmp |= (RADEON_SCLK_FORCE_HDP |
RADEON_SCLK_FORCE_DISP1 |
RADEON_SCLK_FORCE_DISP2 |
RADEON_SCLK_FORCE_TOP |
RADEON_SCLK_FORCE_IDCT |
RADEON_SCLK_FORCE_VIP);
}
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
mdelay(16);
if ((rdev->family == CHIP_R300) ||
(rdev->family == CHIP_R350)) {
tmp = RREG32_PLL(R300_SCLK_CNTL2);
tmp |= (R300_SCLK_FORCE_TCL |
R300_SCLK_FORCE_GA |
R300_SCLK_FORCE_CBA);
WREG32_PLL(R300_SCLK_CNTL2, tmp);
mdelay(16);
}
if (rdev->flags & RADEON_IS_IGP) {
tmp = RREG32_PLL(RADEON_MCLK_CNTL);
tmp &= ~(RADEON_FORCEON_MCLKA |
RADEON_FORCEON_YCLKA);
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
mdelay(16);
}
if ((rdev->family == CHIP_RV200) ||
(rdev->family == CHIP_RV250) ||
(rdev->family == CHIP_RV280)) {
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp |= RADEON_SCLK_MORE_FORCEON;
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
mdelay(16);
}
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb |
RADEON_PIX2CLK_DAC_ALWAYS_ONb |
RADEON_PIXCLK_BLEND_ALWAYS_ONb |
RADEON_PIXCLK_GV_ALWAYS_ONb |
RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
RADEON_PIXCLK_LVDS_ALWAYS_ONb |
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
mdelay(16);
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
RADEON_PIXCLK_DAC_ALWAYS_ONb);
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
}
}
}
| gpl-2.0 |
piccolo-dev/aquaris-M5 | arch/sparc/kernel/btext.c | 7485 | 31895 | /*
* Procedures for drawing on the screen early on in the boot process.
*
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/console.h>
#include <asm/btext.h>
#include <asm/oplib.h>
#include <asm/io.h>
#define NO_SCROLL
#ifndef NO_SCROLL
static void scrollscreen(void);
#endif
static void draw_byte(unsigned char c, long locX, long locY);
static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
#define __force_data __attribute__((__section__(".data")))
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
static int g_max_loc_X __force_data;
static int g_max_loc_Y __force_data;
static int dispDeviceRowBytes __force_data;
static int dispDeviceDepth __force_data;
static int dispDeviceRect[4] __force_data;
static unsigned char *dispDeviceBase __force_data;
#define cmapsz (16*256)
static unsigned char vga_font[cmapsz];
static int __init btext_initialize(phandle node)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
u32 prop;
if (prom_getproperty(node, "width", (char *)&width, 4) < 0)
return -EINVAL;
if (prom_getproperty(node, "height", (char *)&height, 4) < 0)
return -EINVAL;
if (prom_getproperty(node, "depth", (char *)&depth, 4) < 0)
return -EINVAL;
pitch = width * ((depth + 7) / 8);
if (prom_getproperty(node, "linebytes", (char *)&prop, 4) >= 0 &&
prop != 0xffffffffu)
pitch = prop;
if (pitch == 1)
pitch = 0x1000;
if (prom_getproperty(node, "address", (char *)&prop, 4) >= 0)
address = prop;
/* FIXME: Add support for PCI reg properties. Right now, only
* reliable on macs
*/
if (address == 0)
return -EINVAL;
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth == 15 ? 16 : depth;
dispDeviceRect[0] = dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
return 0;
}
/* Calc the base address of a given point (x,y) */
static unsigned char * calc_base(int x, int y)
{
unsigned char *base = dispDeviceBase;
base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
return base;
}
static void btext_clearscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
unsigned int *ptr = base;
for(j=width; j; --j)
*(ptr++) = 0;
base += (dispDeviceRowBytes >> 2);
}
}
#ifndef NO_SCROLL
static void scrollscreen(void)
{
unsigned int *src = (unsigned int *)calc_base(0,16);
unsigned int *dst = (unsigned int *)calc_base(0,0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
{
unsigned int *src_ptr = src;
unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = *(src_ptr++);
src += (dispDeviceRowBytes >> 2);
dst += (dispDeviceRowBytes >> 2);
}
for (i=0; i<16; i++)
{
unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = 0;
dst += (dispDeviceRowBytes >> 2);
}
}
#endif /* ndef NO_SCROLL */
void btext_drawchar(char c)
{
int cline = 0;
#ifdef NO_SCROLL
int x;
#endif
switch (c) {
case '\b':
if (g_loc_X > 0)
--g_loc_X;
break;
case '\t':
g_loc_X = (g_loc_X & -8) + 8;
break;
case '\r':
g_loc_X = 0;
break;
case '\n':
g_loc_X = 0;
g_loc_Y++;
cline = 1;
break;
default:
draw_byte(c, g_loc_X++, g_loc_Y);
}
if (g_loc_X >= g_max_loc_X) {
g_loc_X = 0;
g_loc_Y++;
cline = 1;
}
#ifndef NO_SCROLL
while (g_loc_Y >= g_max_loc_Y) {
scrollscreen();
g_loc_Y--;
}
#else
/* wrap around from bottom to top of screen so we don't
waste time scrolling each line. -- paulus. */
if (g_loc_Y >= g_max_loc_Y)
g_loc_Y = 0;
if (cline) {
for (x = 0; x < g_max_loc_X; ++x)
draw_byte(' ', x, g_loc_Y);
}
#endif
}
static void btext_drawtext(const char *c, unsigned int len)
{
while (len--)
btext_drawchar(*c++);
}
static void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
unsigned char *font = &vga_font[((unsigned int)c) * 16];
int rb = dispDeviceRowBytes;
switch(dispDeviceDepth) {
case 24:
case 32:
draw_byte_32(font, (unsigned int *)base, rb);
break;
case 15:
case 16:
draw_byte_16(font, (unsigned int *)base, rb);
break;
case 8:
draw_byte_8(font, (unsigned int *)base, rb);
break;
}
}
static unsigned int expand_bits_8[16] = {
0x00000000,
0x000000ff,
0x0000ff00,
0x0000ffff,
0x00ff0000,
0x00ff00ff,
0x00ffff00,
0x00ffffff,
0xff000000,
0xff0000ff,
0xff00ff00,
0xff00ffff,
0xffff0000,
0xffff00ff,
0xffffff00,
0xffffffff
};
static unsigned int expand_bits_16[4] = {
0x00000000,
0x0000ffff,
0xffff0000,
0xffffffff
};
static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (-(bits >> 7) & fg) ^ bg;
base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
base[7] = (-(bits & 1) & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_16;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 6] & fg) ^ bg;
base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
base[3] = (eb[bits & 3] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_8;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 4] & fg) ^ bg;
base[1] = (eb[bits & 0xf] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void btext_console_write(struct console *con, const char *s,
unsigned int n)
{
btext_drawtext(s, n);
}
static struct console btext_console = {
.name = "btext",
.write = btext_console_write,
.flags = CON_PRINTBUFFER | CON_ENABLED | CON_BOOT | CON_ANYTIME,
.index = 0,
};
int __init btext_find_display(void)
{
phandle node;
char type[32];
int ret;
node = prom_inst2pkg(prom_stdout);
if (prom_getproperty(node, "device_type", type, 32) < 0)
return -ENODEV;
if (strcmp(type, "display"))
return -ENODEV;
ret = btext_initialize(node);
if (!ret) {
btext_clearscreen();
register_console(&btext_console);
}
return ret;
}
static unsigned char vga_font[cmapsz] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
};
| gpl-2.0 |
akkufix/HTC_M7xx_kernel-4.4x | arch/arm/kernel/xscale-cp0.c | 8509 | 4003 | /*
* linux/arch/arm/kernel/xscale-cp0.c
*
* XScale DSP and iWMMXt coprocessor context switching and handling
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/thread_notify.h>
static inline void dsp_save_state(u32 *state)
{
__asm__ __volatile__ (
"mrrc p0, 0, %0, %1, c0\n"
: "=r" (state[0]), "=r" (state[1]));
}
static inline void dsp_load_state(u32 *state)
{
__asm__ __volatile__ (
"mcrr p0, 0, %0, %1, c0\n"
: : "r" (state[0]), "r" (state[1]));
}
static int dsp_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = t;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
thread->cpu_context.extra[0] = 0;
thread->cpu_context.extra[1] = 0;
break;
case THREAD_NOTIFY_SWITCH:
dsp_save_state(current_thread_info()->cpu_context.extra);
dsp_load_state(thread->cpu_context.extra);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block dsp_notifier_block = {
.notifier_call = dsp_do,
};
#ifdef CONFIG_IWMMXT
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = t;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
/*
* flush_thread() zeroes thread->fpstate, so no need
* to do anything here.
*
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
case THREAD_NOTIFY_EXIT:
iwmmxt_task_release(thread);
break;
case THREAD_NOTIFY_SWITCH:
iwmmxt_task_switch(thread);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block iwmmxt_notifier_block = {
.notifier_call = iwmmxt_do,
};
#endif
static u32 __init xscale_cp_access_read(void)
{
u32 value;
__asm__ __volatile__ (
"mrc p15, 0, %0, c15, c1, 0\n\t"
: "=r" (value));
return value;
}
static void __init xscale_cp_access_write(u32 value)
{
u32 temp;
__asm__ __volatile__ (
"mcr p15, 0, %1, c15, c1, 0\n\t"
"mrc p15, 0, %0, c15, c1, 0\n\t"
"mov %0, %0\n\t"
"sub pc, pc, #4\n\t"
: "=r" (temp) : "r" (value));
}
/*
* Detect whether we have a MAC coprocessor (40 bit register) or an
* iWMMXt coprocessor (64 bit registers) by loading 00000100:00000000
* into a coprocessor register and reading it back, and checking
* whether the upper word survived intact.
*/
static int __init cpu_has_iwmmxt(void)
{
u32 lo;
u32 hi;
/*
* This sequence is interpreted by the DSP coprocessor as:
* mar acc0, %2, %3
* mra %0, %1, acc0
*
* And by the iWMMXt coprocessor as:
* tmcrr wR0, %2, %3
* tmrrc %0, %1, wR0
*/
__asm__ __volatile__ (
"mcrr p0, 0, %2, %3, c0\n"
"mrrc p0, 0, %0, %1, c0\n"
: "=r" (lo), "=r" (hi)
: "r" (0), "r" (0x100));
return !!hi;
}
/*
* If we detect that the CPU has iWMMXt (and CONFIG_IWMMXT=y), we
* disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
* switch code handle iWMMXt context switching. If on the other
* hand the CPU has a DSP coprocessor, we keep access to CP0 enabled
* all the time, and save/restore acc0 on context switch in non-lazy
* fashion.
*/
static int __init xscale_cp0_init(void)
{
u32 cp_access;
cp_access = xscale_cp_access_read() & ~3;
xscale_cp_access_write(cp_access | 1);
if (cpu_has_iwmmxt()) {
#ifndef CONFIG_IWMMXT
printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor "
"detected, but kernel support is missing.\n");
#else
printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n");
elf_hwcap |= HWCAP_IWMMXT;
thread_register_notifier(&iwmmxt_notifier_block);
#endif
} else {
printk(KERN_INFO "XScale DSP coprocessor detected.\n");
thread_register_notifier(&dsp_notifier_block);
cp_access |= 1;
}
xscale_cp_access_write(cp_access);
return 0;
}
late_initcall(xscale_cp0_init);
| gpl-2.0 |
MassStash/htc_pme_kernel_sense_6.0 | arch/alpha/kernel/core_cia.c | 9021 | 33386 | /*
* linux/arch/alpha/kernel/core_cia.c
*
* Written by David A Rusling (david.rusling@reo.mts.dec.com).
* December 1995.
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1997, 1998 Jay Estabrook
* Copyright (C) 1998, 1999, 2000 Richard Henderson
*
* Code common to all CIA core logic chips.
*/
#define __EXTERN_INLINE inline
#include <asm/io.h>
#include <asm/core_cia.h>
#undef __EXTERN_INLINE
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <asm/ptrace.h>
#include <asm/mce.h>
#include "proto.h"
#include "pci_impl.h"
/*
* NOTE: Herein lie back-to-back mb instructions. They are magic.
* One plausible explanation is that the i/o controller does not properly
* handle the system transaction. Another involves timing. Ho hum.
*/
#define DEBUG_CONFIG 0
#if DEBUG_CONFIG
# define DBGC(args) printk args
#else
# define DBGC(args)
#endif
#define vip volatile int *
/*
* Given a bus, device, and function number, compute resulting
* configuration space address. It is therefore not safe to have
* concurrent invocations to configuration space access routines, but
* there really shouldn't be any need for this.
*
* Type 0:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:11 Device select bit.
* 10:8 Function number
* 7:2 Register number
*
* Type 1:
*
* 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
* 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* 31:24 reserved
* 23:16 bus number (8 bits = 128 possible buses)
* 15:11 Device number (5 bits)
* 10:8 function number
* 7:2 register number
*
* Notes:
* The function number selects which function of a multi-function device
* (e.g., SCSI and Ethernet).
*
* The register selects a DWORD (32 bit) register offset. Hence it
* doesn't get shifted by 2 bits as we want to "drop" the bottom two
* bits.
*/
static int
mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where,
unsigned long *pci_addr, unsigned char *type1)
{
u8 bus = bus_dev->number;
*type1 = (bus != 0);
*pci_addr = (bus << 16) | (device_fn << 8) | where;
DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
" returning address 0x%p\n"
bus, device_fn, where, *pci_addr));
return 0;
}
static unsigned int
conf_read(unsigned long addr, unsigned char type1)
{
unsigned long flags;
int stat0, value;
int cia_cfg = 0;
DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
local_irq_save(flags);
/* Reset status register to avoid losing errors. */
stat0 = *(vip)CIA_IOC_CIA_ERR;
*(vip)CIA_IOC_CIA_ERR = stat0;
mb();
*(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
/* If Type1 access, must set CIA CFG. */
if (type1) {
cia_cfg = *(vip)CIA_IOC_CFG;
*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
mb();
*(vip)CIA_IOC_CFG;
}
mb();
draina();
mcheck_expected(0) = 1;
mcheck_taken(0) = 0;
mb();
/* Access configuration space. */
value = *(vip)addr;
mb();
mb(); /* magic */
if (mcheck_taken(0)) {
mcheck_taken(0) = 0;
value = 0xffffffff;
mb();
}
mcheck_expected(0) = 0;
mb();
/* If Type1 access, must reset IOC CFG so normal IO space ops work. */
if (type1) {
*(vip)CIA_IOC_CFG = cia_cfg;
mb();
*(vip)CIA_IOC_CFG;
}
local_irq_restore(flags);
DBGC(("done\n"));
return value;
}
static void
conf_write(unsigned long addr, unsigned int value, unsigned char type1)
{
unsigned long flags;
int stat0, cia_cfg = 0;
DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
local_irq_save(flags);
/* Reset status register to avoid losing errors. */
stat0 = *(vip)CIA_IOC_CIA_ERR;
*(vip)CIA_IOC_CIA_ERR = stat0;
mb();
*(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
/* If Type1 access, must set CIA CFG. */
if (type1) {
cia_cfg = *(vip)CIA_IOC_CFG;
*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
mb();
*(vip)CIA_IOC_CFG;
}
mb();
draina();
mcheck_expected(0) = 1;
mcheck_taken(0) = 0;
mb();
/* Access configuration space. */
*(vip)addr = value;
mb();
*(vip)addr; /* read back to force the write */
mcheck_expected(0) = 0;
mb();
/* If Type1 access, must reset IOC CFG so normal IO space ops work. */
if (type1) {
*(vip)CIA_IOC_CFG = cia_cfg;
mb();
*(vip)CIA_IOC_CFG;
}
local_irq_restore(flags);
DBGC(("done\n"));
}
static int
cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 *value)
{
unsigned long addr, pci_addr;
long mask;
unsigned char type1;
int shift;
if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
mask = (size - 1) * 8;
shift = (where & 3) * 8;
addr = (pci_addr << 5) + mask + CIA_CONF;
*value = conf_read(addr, type1) >> (shift);
return PCIBIOS_SUCCESSFUL;
}
static int
cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 value)
{
unsigned long addr, pci_addr;
long mask;
unsigned char type1;
if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
return PCIBIOS_DEVICE_NOT_FOUND;
mask = (size - 1) * 8;
addr = (pci_addr << 5) + mask + CIA_CONF;
conf_write(addr, value << ((where & 3) * 8), type1);
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops cia_pci_ops =
{
.read = cia_read_config,
.write = cia_write_config,
};
/*
* CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
* It cannot be invalidated. Rather than hard code the pass numbers,
* actually try the tbia to see if it works.
*/
void
cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
{
wmb();
*(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */
mb();
*(vip)CIA_IOC_PCI_TBIA;
}
/*
* On PYXIS, even if the tbia works, we cannot use it. It effectively locks
* the chip (as well as direct write to the tag registers) if there is a
* SG DMA operation in progress. This is true at least for PYXIS rev. 1,
* so always use the method below.
*/
/*
* This is the method NT and NetBSD use.
*
* Allocate mappings, and put the chip into DMA loopback mode to read a
* garbage page. This works by causing TLB misses, causing old entries to
* be purged to make room for the new entries coming in for the garbage page.
*/
#define CIA_BROKEN_TBIA_BASE 0x30000000
#define CIA_BROKEN_TBIA_SIZE 1024
/* Always called with interrupts disabled */
void
cia_pci_tbi_try2(struct pci_controller *hose,
dma_addr_t start, dma_addr_t end)
{
void __iomem *bus_addr;
int ctrl;
/* Put the chip into PCI loopback mode. */
mb();
ctrl = *(vip)CIA_IOC_CIA_CTRL;
*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
mb();
*(vip)CIA_IOC_CIA_CTRL;
mb();
/* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
each read. This forces SG TLB misses. NetBSD claims that the
TLB entries are not quite LRU, meaning that we need to read more
times than there are actual tags. The 2117x docs claim strict
round-robin. Oh well, we've come this far... */
/* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can
be filled by the TLB misses *only once* after being invalidated
(by tbia or direct write). Next misses won't update them even
though the lock bits are cleared. Tags 4-7 are "quite LRU" though,
so use them and read at window 3 base exactly 4 times. Reading
more sometimes makes the chip crazy. -ink */
bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4);
cia_readl(bus_addr + 0x00000);
cia_readl(bus_addr + 0x08000);
cia_readl(bus_addr + 0x10000);
cia_readl(bus_addr + 0x18000);
cia_iounmap(bus_addr);
/* Restore normal PCI operation. */
mb();
*(vip)CIA_IOC_CIA_CTRL = ctrl;
mb();
*(vip)CIA_IOC_CIA_CTRL;
mb();
}
static inline void
cia_prepare_tbia_workaround(int window)
{
unsigned long *ppte, pte;
long i;
/* Use minimal 1K map. */
ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
ppte[i] = pte;
*(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3;
*(vip)CIA_IOC_PCI_Wn_MASK(window)
= (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000;
*(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2;
}
static void __init
verify_tb_operation(void)
{
static int page[PAGE_SIZE/4]
__attribute__((aligned(PAGE_SIZE)))
__initdata = { 0 };
struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
int ctrl, addr0, tag0, pte0, data0;
int temp, use_tbia_try2 = 0;
void __iomem *bus_addr;
/* pyxis -- tbia is broken */
if (pci_isa_hose->dense_io_base)
use_tbia_try2 = 1;
/* Put the chip into PCI loopback mode. */
mb();
ctrl = *(vip)CIA_IOC_CIA_CTRL;
*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
mb();
*(vip)CIA_IOC_CIA_CTRL;
mb();
/* Write a valid entry directly into the TLB registers. */
addr0 = arena->dma_base;
tag0 = addr0 | 1;
pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
*(vip)CIA_IOC_TB_TAGn(0) = tag0;
*(vip)CIA_IOC_TB_TAGn(1) = 0;
*(vip)CIA_IOC_TB_TAGn(2) = 0;
*(vip)CIA_IOC_TB_TAGn(3) = 0;
*(vip)CIA_IOC_TB_TAGn(4) = 0;
*(vip)CIA_IOC_TB_TAGn(5) = 0;
*(vip)CIA_IOC_TB_TAGn(6) = 0;
*(vip)CIA_IOC_TB_TAGn(7) = 0;
*(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
*(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
*(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
*(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
mb();
/* Get a usable bus address */
bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE);
/* First, verify we can read back what we've written. If
this fails, we can't be sure of any of the other testing
we're going to do, so bail. */
/* ??? Actually, we could do the work with machine checks.
By passing this register update test, we pretty much
guarantee that cia_pci_tbi_try1 works. If this test
fails, cia_pci_tbi_try2 might still work. */
temp = *(vip)CIA_IOC_TB_TAGn(0);
if (temp != tag0) {
printk("pci: failed tb register update test "
"(tag0 %#x != %#x)\n", temp, tag0);
goto failed;
}
temp = *(vip)CIA_IOC_TB_TAGn(1);
if (temp != 0) {
printk("pci: failed tb register update test "
"(tag1 %#x != 0)\n", temp);
goto failed;
}
temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
if (temp != pte0) {
printk("pci: failed tb register update test "
"(pte0 %#x != %#x)\n", temp, pte0);
goto failed;
}
printk("pci: passed tb register update test\n");
/* Second, verify we can actually do I/O through this entry. */
data0 = 0xdeadbeef;
page[0] = data0;
mcheck_expected(0) = 1;
mcheck_taken(0) = 0;
mb();
temp = cia_readl(bus_addr);
mb();
mcheck_expected(0) = 0;
mb();
if (mcheck_taken(0)) {
printk("pci: failed sg loopback i/o read test (mcheck)\n");
goto failed;
}
if (temp != data0) {
printk("pci: failed sg loopback i/o read test "
"(%#x != %#x)\n", temp, data0);
goto failed;
}
printk("pci: passed sg loopback i/o read test\n");
/* Third, try to invalidate the TLB. */
if (! use_tbia_try2) {
cia_pci_tbi(arena->hose, 0, -1);
temp = *(vip)CIA_IOC_TB_TAGn(0);
if (temp & 1) {
use_tbia_try2 = 1;
printk("pci: failed tbia test; workaround available\n");
} else {
printk("pci: passed tbia test\n");
}
}
/* Fourth, verify the TLB snoops the EV5's caches when
doing a tlb fill. */
data0 = 0x5adda15e;
page[0] = data0;
arena->ptes[4] = pte0;
mcheck_expected(0) = 1;
mcheck_taken(0) = 0;
mb();
temp = cia_readl(bus_addr + 4*PAGE_SIZE);
mb();
mcheck_expected(0) = 0;
mb();
if (mcheck_taken(0)) {
printk("pci: failed pte write cache snoop test (mcheck)\n");
goto failed;
}
if (temp != data0) {
printk("pci: failed pte write cache snoop test "
"(%#x != %#x)\n", temp, data0);
goto failed;
}
printk("pci: passed pte write cache snoop test\n");
/* Fifth, verify that a previously invalid PTE entry gets
filled from the page table. */
data0 = 0xabcdef12;
page[0] = data0;
arena->ptes[5] = pte0;
mcheck_expected(0) = 1;
mcheck_taken(0) = 0;
mb();
temp = cia_readl(bus_addr + 5*PAGE_SIZE);
mb();
mcheck_expected(0) = 0;
mb();
if (mcheck_taken(0)) {
printk("pci: failed valid tag invalid pte reload test "
"(mcheck; workaround available)\n");
/* Work around this bug by aligning new allocations
on 4 page boundaries. */
arena->align_entry = 4;
} else if (temp != data0) {
printk("pci: failed valid tag invalid pte reload test "
"(%#x != %#x)\n", temp, data0);
goto failed;
} else {
printk("pci: passed valid tag invalid pte reload test\n");
}
/* Sixth, verify machine checks are working. Test invalid
pte under the same valid tag as we used above. */
mcheck_expected(0) = 1;
mcheck_taken(0) = 0;
mb();
temp = cia_readl(bus_addr + 6*PAGE_SIZE);
mb();
mcheck_expected(0) = 0;
mb();
printk("pci: %s pci machine check test\n",
mcheck_taken(0) ? "passed" : "failed");
/* Clean up after the tests. */
arena->ptes[4] = 0;
arena->ptes[5] = 0;
if (use_tbia_try2) {
alpha_mv.mv_pci_tbi = cia_pci_tbi_try2;
/* Tags 0-3 must be disabled if we use this workaraund. */
wmb();
*(vip)CIA_IOC_TB_TAGn(0) = 2;
*(vip)CIA_IOC_TB_TAGn(1) = 2;
*(vip)CIA_IOC_TB_TAGn(2) = 2;
*(vip)CIA_IOC_TB_TAGn(3) = 2;
printk("pci: tbia workaround enabled\n");
}
alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
exit:
/* unmap the bus addr */
cia_iounmap(bus_addr);
/* Restore normal PCI operation. */
mb();
*(vip)CIA_IOC_CIA_CTRL = ctrl;
mb();
*(vip)CIA_IOC_CIA_CTRL;
mb();
return;
failed:
printk("pci: disabling sg translation window\n");
*(vip)CIA_IOC_PCI_W0_BASE = 0;
*(vip)CIA_IOC_PCI_W1_BASE = 0;
pci_isa_hose->sg_isa = NULL;
alpha_mv.mv_pci_tbi = NULL;
goto exit;
}
#if defined(ALPHA_RESTORE_SRM_SETUP)
/* Save CIA configuration data as the console had it set up. */
struct
{
unsigned int hae_mem;
unsigned int hae_io;
unsigned int pci_dac_offset;
unsigned int err_mask;
unsigned int cia_ctrl;
unsigned int cia_cnfg;
struct {
unsigned int w_base;
unsigned int w_mask;
unsigned int t_base;
} window[4];
} saved_config __attribute((common));
void
cia_save_srm_settings(int is_pyxis)
{
int i;
/* Save some important registers. */
saved_config.err_mask = *(vip)CIA_IOC_ERR_MASK;
saved_config.cia_ctrl = *(vip)CIA_IOC_CIA_CTRL;
saved_config.hae_mem = *(vip)CIA_IOC_HAE_MEM;
saved_config.hae_io = *(vip)CIA_IOC_HAE_IO;
saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC;
if (is_pyxis)
saved_config.cia_cnfg = *(vip)CIA_IOC_CIA_CNFG;
else
saved_config.cia_cnfg = 0;
/* Save DMA windows configuration. */
for (i = 0; i < 4; i++) {
saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i);
saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i);
saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i);
}
mb();
}
void
cia_restore_srm_settings(void)
{
int i;
for (i = 0; i < 4; i++) {
*(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base;
*(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask;
*(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base;
}
*(vip)CIA_IOC_HAE_MEM = saved_config.hae_mem;
*(vip)CIA_IOC_HAE_IO = saved_config.hae_io;
*(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset;
*(vip)CIA_IOC_ERR_MASK = saved_config.err_mask;
*(vip)CIA_IOC_CIA_CTRL = saved_config.cia_ctrl;
if (saved_config.cia_cnfg) /* Must be pyxis. */
*(vip)CIA_IOC_CIA_CNFG = saved_config.cia_cnfg;
mb();
}
#else /* ALPHA_RESTORE_SRM_SETUP */
#define cia_save_srm_settings(p) do {} while (0)
#define cia_restore_srm_settings() do {} while (0)
#endif /* ALPHA_RESTORE_SRM_SETUP */
static void __init
do_init_arch(int is_pyxis)
{
struct pci_controller *hose;
int temp, cia_rev, tbia_window;
cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
printk("pci: cia revision %d%s\n",
cia_rev, is_pyxis ? " (pyxis)" : "");
if (alpha_using_srm)
cia_save_srm_settings(is_pyxis);
/* Set up error reporting. */
temp = *(vip)CIA_IOC_ERR_MASK;
temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
| CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
*(vip)CIA_IOC_ERR_MASK = temp;
/* Clear all currently pending errors. */
temp = *(vip)CIA_IOC_CIA_ERR;
*(vip)CIA_IOC_CIA_ERR = temp;
/* Turn on mchecks. */
temp = *(vip)CIA_IOC_CIA_CTRL;
temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
*(vip)CIA_IOC_CIA_CTRL = temp;
/* Clear the CFG register, which gets used for PCI config space
accesses. That is the way we want to use it, and we do not
want to depend on what ARC or SRM might have left behind. */
*(vip)CIA_IOC_CFG = 0;
/* Zero the HAEs. */
*(vip)CIA_IOC_HAE_MEM = 0;
*(vip)CIA_IOC_HAE_IO = 0;
/* For PYXIS, we always use BWX bus and i/o accesses. To that end,
make sure they're enabled on the controller. At the same time,
enable the monster window. */
if (is_pyxis) {
temp = *(vip)CIA_IOC_CIA_CNFG;
temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN;
*(vip)CIA_IOC_CIA_CNFG = temp;
}
/* Synchronize with all previous changes. */
mb();
*(vip)CIA_IOC_CIA_REV;
/*
* Create our single hose.
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource;
hose->index = 0;
if (! is_pyxis) {
struct resource *hae_mem = alloc_resource();
hose->mem_space = hae_mem;
hae_mem->start = 0;
hae_mem->end = CIA_MEM_R1_MASK;
hae_mem->name = pci_hae0_name;
hae_mem->flags = IORESOURCE_MEM;
if (request_resource(&iomem_resource, hae_mem) < 0)
printk(KERN_ERR "Failed to request HAE_MEM\n");
hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
hose->sparse_io_base = CIA_IO - IDENT_ADDR;
hose->dense_io_base = 0;
} else {
hose->sparse_mem_base = 0;
hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
hose->sparse_io_base = 0;
hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
}
/*
* Set up the PCI to main memory translation windows.
*
* Window 0 is S/G 8MB at 8MB (for isa)
* Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1)
* Window 2 is direct access 2GB at 2GB
* Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1)
*
* ??? NetBSD hints that page tables must be aligned to 32K,
* possibly due to a hardware bug. This is over-aligned
* from the 8K alignment one would expect for an 8MB window.
* No description of what revisions affected.
*/
hose->sg_pci = NULL;
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
__direct_map_base = 0x80000000;
__direct_map_size = 0x80000000;
*(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
*(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
*(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
*(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
*(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
*(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2;
/* On PYXIS we have the monster window, selected by bit 40, so
there is no need for window3 to be enabled.
On CIA, we don't have true arbitrary addressing -- bits <39:32>
are compared against W_DAC. We can, however, directly map 4GB,
which is better than before. However, due to assumptions made
elsewhere, we should not claim that we support DAC unless that
4GB covers all of physical memory.
On CIA rev 1, apparently W1 and W2 can't be used for SG.
At least, there are reports that it doesn't work for Alcor.
In that case, we have no choice but to use W3 for the TBIA
workaround, which means we can't use DAC at all. */
tbia_window = 1;
if (is_pyxis) {
*(vip)CIA_IOC_PCI_W3_BASE = 0;
} else if (cia_rev == 1) {
*(vip)CIA_IOC_PCI_W1_BASE = 0;
tbia_window = 3;
} else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
*(vip)CIA_IOC_PCI_W3_BASE = 0;
} else {
*(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8;
*(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000;
*(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2;
alpha_mv.pci_dac_offset = 0x200000000UL;
*(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32;
}
/* Prepare workaround for apparently broken tbia. */
cia_prepare_tbia_workaround(tbia_window);
}
void __init
cia_init_arch(void)
{
do_init_arch(0);
}
void __init
pyxis_init_arch(void)
{
/* On pyxis machines we can precisely calculate the
CPU clock frequency using pyxis real time counter.
It's especially useful for SX164 with broken RTC.
Both CPU and chipset are driven by the single 16.666M
or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
66.66 MHz. -ink */
unsigned int cc0, cc1;
unsigned long pyxis_cc;
__asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
pyxis_cc = *(vulp)PYXIS_RT_COUNT;
do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
__asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
cc1 -= cc0;
hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
hwrpb_update_checksum(hwrpb);
do_init_arch(1);
}
void
cia_kill_arch(int mode)
{
if (alpha_using_srm)
cia_restore_srm_settings();
}
void __init
cia_init_pci(void)
{
/* Must delay this from init_arch, as we need machine checks. */
verify_tb_operation();
common_init_pci();
}
static inline void
cia_pci_clr_err(void)
{
int jd;
jd = *(vip)CIA_IOC_CIA_ERR;
*(vip)CIA_IOC_CIA_ERR = jd;
mb();
*(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */
}
#ifdef CONFIG_VERBOSE_MCHECK
static void
cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
{
static const char * const pci_cmd_desc[16] = {
"Interrupt Acknowledge", "Special Cycle", "I/O Read",
"I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read",
"Memory Write", "Reserved 0x8", "Reserved 0x9",
"Configuration Read", "Configuration Write",
"Memory Read Multiple", "Dual Address Cycle",
"Memory Read Line", "Memory Write and Invalidate"
};
if (cia->cia_err & (CIA_ERR_COR_ERR
| CIA_ERR_UN_COR_ERR
| CIA_ERR_MEM_NEM
| CIA_ERR_PA_PTE_INV)) {
static const char * const window_desc[6] = {
"No window active", "Window 0 hit", "Window 1 hit",
"Window 2 hit", "Window 3 hit", "Monster window hit"
};
const char *window;
const char *cmd;
unsigned long addr, tmp;
int lock, dac;
cmd = pci_cmd_desc[cia->pci_err0 & 0x7];
lock = (cia->pci_err0 >> 4) & 1;
dac = (cia->pci_err0 >> 5) & 1;
tmp = (cia->pci_err0 >> 8) & 0x1F;
tmp = ffs(tmp);
window = window_desc[tmp];
addr = cia->pci_err1;
if (dac) {
tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL;
addr |= tmp << 32;
}
printk(KERN_CRIT "CIA machine check: %s\n", msg);
printk(KERN_CRIT " DMA command: %s\n", cmd);
printk(KERN_CRIT " PCI address: %#010lx\n", addr);
printk(KERN_CRIT " %s, Lock: %d, DAC: %d\n",
window, lock, dac);
} else if (cia->cia_err & (CIA_ERR_PERR
| CIA_ERR_PCI_ADDR_PE
| CIA_ERR_RCVD_MAS_ABT
| CIA_ERR_RCVD_TAR_ABT
| CIA_ERR_IOA_TIMEOUT)) {
static const char * const master_st_desc[16] = {
"Idle", "Drive bus", "Address step cycle",
"Address cycle", "Data cycle", "Last read data cycle",
"Last write data cycle", "Read stop cycle",
"Write stop cycle", "Read turnaround cycle",
"Write turnaround cycle", "Reserved 0xB",
"Reserved 0xC", "Reserved 0xD", "Reserved 0xE",
"Unknown state"
};
static const char * const target_st_desc[16] = {
"Idle", "Busy", "Read data cycle", "Write data cycle",
"Read stop cycle", "Write stop cycle",
"Read turnaround cycle", "Write turnaround cycle",
"Read wait cycle", "Write wait cycle",
"Reserved 0xA", "Reserved 0xB", "Reserved 0xC",
"Reserved 0xD", "Reserved 0xE", "Unknown state"
};
const char *cmd;
const char *master, *target;
unsigned long addr, tmp;
int dac;
master = master_st_desc[(cia->pci_err0 >> 16) & 0xF];
target = target_st_desc[(cia->pci_err0 >> 20) & 0xF];
cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF];
dac = (cia->pci_err0 >> 28) & 1;
addr = cia->pci_err2;
if (dac) {
tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL;
addr |= tmp << 32;
}
printk(KERN_CRIT "CIA machine check: %s\n", msg);
printk(KERN_CRIT " PCI command: %s\n", cmd);
printk(KERN_CRIT " Master state: %s, Target state: %s\n",
master, target);
printk(KERN_CRIT " PCI address: %#010lx, DAC: %d\n",
addr, dac);
} else {
printk(KERN_CRIT "CIA machine check: %s\n", msg);
printk(KERN_CRIT " Unknown PCI error\n");
printk(KERN_CRIT " PCI_ERR0 = %#08lx", cia->pci_err0);
printk(KERN_CRIT " PCI_ERR1 = %#08lx", cia->pci_err1);
printk(KERN_CRIT " PCI_ERR2 = %#08lx", cia->pci_err2);
}
}
static void
cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
{
unsigned long mem_port_addr;
unsigned long mem_port_mask;
const char *mem_port_cmd;
const char *seq_state;
const char *set_select;
unsigned long tmp;
/* If this is a DMA command, also decode the PCI bits. */
if ((cia->mem_err1 >> 20) & 1)
cia_decode_pci_error(cia, msg);
else
printk(KERN_CRIT "CIA machine check: %s\n", msg);
mem_port_addr = cia->mem_err0 & 0xfffffff0;
mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32;
mem_port_mask = (cia->mem_err1 >> 12) & 0xF;
tmp = (cia->mem_err1 >> 8) & 0xF;
tmp |= ((cia->mem_err1 >> 20) & 1) << 4;
if ((tmp & 0x1E) == 0x06)
mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK";
else if ((tmp & 0x1C) == 0x08)
mem_port_cmd = "READ MISS or READ MISS MODIFY";
else if (tmp == 0x1C)
mem_port_cmd = "BC VICTIM";
else if ((tmp & 0x1E) == 0x0E)
mem_port_cmd = "READ MISS MODIFY";
else if ((tmp & 0x1C) == 0x18)
mem_port_cmd = "DMA READ or DMA READ MODIFY";
else if ((tmp & 0x1E) == 0x12)
mem_port_cmd = "DMA WRITE";
else
mem_port_cmd = "Unknown";
tmp = (cia->mem_err1 >> 16) & 0xF;
switch (tmp) {
case 0x0:
seq_state = "Idle";
break;
case 0x1:
seq_state = "DMA READ or DMA WRITE";
break;
case 0x2: case 0x3:
seq_state = "READ MISS (or READ MISS MODIFY) with victim";
break;
case 0x4: case 0x5: case 0x6:
seq_state = "READ MISS (or READ MISS MODIFY) with no victim";
break;
case 0x8: case 0x9: case 0xB:
seq_state = "Refresh";
break;
case 0xC:
seq_state = "Idle, waiting for DMA pending read";
break;
case 0xE: case 0xF:
seq_state = "Idle, ras precharge";
break;
default:
seq_state = "Unknown";
break;
}
tmp = (cia->mem_err1 >> 24) & 0x1F;
switch (tmp) {
case 0x00: set_select = "Set 0 selected"; break;
case 0x01: set_select = "Set 1 selected"; break;
case 0x02: set_select = "Set 2 selected"; break;
case 0x03: set_select = "Set 3 selected"; break;
case 0x04: set_select = "Set 4 selected"; break;
case 0x05: set_select = "Set 5 selected"; break;
case 0x06: set_select = "Set 6 selected"; break;
case 0x07: set_select = "Set 7 selected"; break;
case 0x08: set_select = "Set 8 selected"; break;
case 0x09: set_select = "Set 9 selected"; break;
case 0x0A: set_select = "Set A selected"; break;
case 0x0B: set_select = "Set B selected"; break;
case 0x0C: set_select = "Set C selected"; break;
case 0x0D: set_select = "Set D selected"; break;
case 0x0E: set_select = "Set E selected"; break;
case 0x0F: set_select = "Set F selected"; break;
case 0x10: set_select = "No set selected"; break;
case 0x1F: set_select = "Refresh cycle"; break;
default: set_select = "Unknown"; break;
}
printk(KERN_CRIT " Memory port command: %s\n", mem_port_cmd);
printk(KERN_CRIT " Memory port address: %#010lx, mask: %#lx\n",
mem_port_addr, mem_port_mask);
printk(KERN_CRIT " Memory sequencer state: %s\n", seq_state);
printk(KERN_CRIT " Memory set: %s\n", set_select);
}
static void
cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
{
long syn;
long i;
const char *fmt;
cia_decode_mem_error(cia, msg);
syn = cia->cia_syn & 0xff;
if (syn == (syn & -syn)) {
fmt = KERN_CRIT " ECC syndrome %#x -- check bit %d\n";
i = ffs(syn) - 1;
} else {
static unsigned char const data_bit[64] = {
0xCE, 0xCB, 0xD3, 0xD5,
0xD6, 0xD9, 0xDA, 0xDC,
0x23, 0x25, 0x26, 0x29,
0x2A, 0x2C, 0x31, 0x34,
0x0E, 0x0B, 0x13, 0x15,
0x16, 0x19, 0x1A, 0x1C,
0xE3, 0xE5, 0xE6, 0xE9,
0xEA, 0xEC, 0xF1, 0xF4,
0x4F, 0x4A, 0x52, 0x54,
0x57, 0x58, 0x5B, 0x5D,
0xA2, 0xA4, 0xA7, 0xA8,
0xAB, 0xAD, 0xB0, 0xB5,
0x8F, 0x8A, 0x92, 0x94,
0x97, 0x98, 0x9B, 0x9D,
0x62, 0x64, 0x67, 0x68,
0x6B, 0x6D, 0x70, 0x75
};
for (i = 0; i < 64; ++i)
if (data_bit[i] == syn)
break;
if (i < 64)
fmt = KERN_CRIT " ECC syndrome %#x -- data bit %d\n";
else
fmt = KERN_CRIT " ECC syndrome %#x -- unknown bit\n";
}
printk (fmt, syn, i);
}
static void
cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia)
{
static const char * const cmd_desc[16] = {
"NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER",
"SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK",
"READ MISS0", "READ MISS1", "READ MISS MOD0",
"READ MISS MOD1", "BCACHE VICTIM", "Spare",
"READ MISS MOD STC0", "READ MISS MOD STC1"
};
unsigned long addr;
unsigned long mask;
const char *cmd;
int par;
addr = cia->cpu_err0 & 0xfffffff0;
addr |= (cia->cpu_err1 & 0x83UL) << 32;
cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF];
mask = (cia->cpu_err1 >> 12) & 0xF;
par = (cia->cpu_err1 >> 21) & 1;
printk(KERN_CRIT "CIA machine check: System bus parity error\n");
printk(KERN_CRIT " Command: %s, Parity bit: %d\n", cmd, par);
printk(KERN_CRIT " Address: %#010lx, Mask: %#lx\n", addr, mask);
}
#endif /* CONFIG_VERBOSE_MCHECK */
static int
cia_decode_mchk(unsigned long la_ptr)
{
struct el_common *com;
struct el_CIA_sysdata_mcheck *cia;
com = (void *)la_ptr;
cia = (void *)(la_ptr + com->sys_offset);
if ((cia->cia_err & CIA_ERR_VALID) == 0)
return 0;
#ifdef CONFIG_VERBOSE_MCHECK
if (!alpha_verbose_mcheck)
return 1;
switch (ffs(cia->cia_err & 0xfff) - 1) {
case 0: /* CIA_ERR_COR_ERR */
cia_decode_ecc_error(cia, "Corrected ECC error");
break;
case 1: /* CIA_ERR_UN_COR_ERR */
cia_decode_ecc_error(cia, "Uncorrected ECC error");
break;
case 2: /* CIA_ERR_CPU_PE */
cia_decode_parity_error(cia);
break;
case 3: /* CIA_ERR_MEM_NEM */
cia_decode_mem_error(cia, "Access to nonexistent memory");
break;
case 4: /* CIA_ERR_PCI_SERR */
cia_decode_pci_error(cia, "PCI bus system error");
break;
case 5: /* CIA_ERR_PERR */
cia_decode_pci_error(cia, "PCI data parity error");
break;
case 6: /* CIA_ERR_PCI_ADDR_PE */
cia_decode_pci_error(cia, "PCI address parity error");
break;
case 7: /* CIA_ERR_RCVD_MAS_ABT */
cia_decode_pci_error(cia, "PCI master abort");
break;
case 8: /* CIA_ERR_RCVD_TAR_ABT */
cia_decode_pci_error(cia, "PCI target abort");
break;
case 9: /* CIA_ERR_PA_PTE_INV */
cia_decode_pci_error(cia, "PCI invalid PTE");
break;
case 10: /* CIA_ERR_FROM_WRT_ERR */
cia_decode_mem_error(cia, "Write to flash ROM attempted");
break;
case 11: /* CIA_ERR_IOA_TIMEOUT */
cia_decode_pci_error(cia, "I/O timeout");
break;
}
if (cia->cia_err & CIA_ERR_LOST_CORR_ERR)
printk(KERN_CRIT "CIA lost machine check: "
"Correctable ECC error\n");
if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR)
printk(KERN_CRIT "CIA lost machine check: "
"Uncorrectable ECC error\n");
if (cia->cia_err & CIA_ERR_LOST_CPU_PE)
printk(KERN_CRIT "CIA lost machine check: "
"System bus parity error\n");
if (cia->cia_err & CIA_ERR_LOST_MEM_NEM)
printk(KERN_CRIT "CIA lost machine check: "
"Access to nonexistent memory\n");
if (cia->cia_err & CIA_ERR_LOST_PERR)
printk(KERN_CRIT "CIA lost machine check: "
"PCI data parity error\n");
if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE)
printk(KERN_CRIT "CIA lost machine check: "
"PCI address parity error\n");
if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT)
printk(KERN_CRIT "CIA lost machine check: "
"PCI master abort\n");
if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT)
printk(KERN_CRIT "CIA lost machine check: "
"PCI target abort\n");
if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV)
printk(KERN_CRIT "CIA lost machine check: "
"PCI invalid PTE\n");
if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR)
printk(KERN_CRIT "CIA lost machine check: "
"Write to flash ROM attempted\n");
if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT)
printk(KERN_CRIT "CIA lost machine check: "
"I/O timeout\n");
#endif /* CONFIG_VERBOSE_MCHECK */
return 1;
}
void
cia_machine_check(unsigned long vector, unsigned long la_ptr)
{
int expected;
/* Clear the error before any reporting. */
mb();
mb(); /* magic */
draina();
cia_pci_clr_err();
wrmces(rdmces()); /* reset machine check pending flag. */
mb();
expected = mcheck_expected(0);
if (!expected && vector == 0x660)
expected = cia_decode_mchk(la_ptr);
process_mcheck_info(vector, la_ptr, "CIA", expected);
}
| gpl-2.0 |
friedrich420/Note-4-TMO-AEL-Kernel-Lollipop-Source | drivers/net/wireless/b43legacy/rfkill.c | 9277 | 2661 | /*
Broadcom B43 wireless driver
RFKILL support
Copyright (c) 2007 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "radio.h"
#include "b43legacy.h"
/* Returns TRUE, if the radio is enabled in hardware. */
bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
{
if (dev->dev->id.revision >= 3) {
if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
& B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
return 1;
} else {
/* To prevent CPU fault on PPC, do not read a register
* unless the interface is started; however, on resume
* for hibernation, this routine is entered early. When
* that happens, unconditionally return TRUE.
*/
if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
return 1;
if (b43legacy_read16(dev, B43legacy_MMIO_RADIO_HWENABLED_LO)
& B43legacy_MMIO_RADIO_HWENABLED_LO_MASK)
return 1;
}
return 0;
}
/* The poll callback for the hardware button. */
void b43legacy_rfkill_poll(struct ieee80211_hw *hw)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
struct ssb_bus *bus = dev->dev->bus;
bool enabled;
bool brought_up = false;
mutex_lock(&wl->mutex);
if (unlikely(b43legacy_status(dev) < B43legacy_STAT_INITIALIZED)) {
if (ssb_bus_powerup(bus, 0)) {
mutex_unlock(&wl->mutex);
return;
}
ssb_device_enable(dev->dev, 0);
brought_up = true;
}
enabled = b43legacy_is_hw_radio_enabled(dev);
if (unlikely(enabled != dev->radio_hw_enable)) {
dev->radio_hw_enable = enabled;
b43legacyinfo(wl, "Radio hardware status changed to %s\n",
enabled ? "ENABLED" : "DISABLED");
wiphy_rfkill_set_hw_state(hw->wiphy, !enabled);
if (enabled != dev->phy.radio_on) {
if (enabled)
b43legacy_radio_turn_on(dev);
else
b43legacy_radio_turn_off(dev, 0);
}
}
if (brought_up) {
ssb_device_disable(dev->dev, 0);
ssb_bus_may_powerdown(bus);
}
mutex_unlock(&wl->mutex);
}
| gpl-2.0 |
Vachounette/Acer_S500_Kernel | net/ipv6/netfilter/ip6table_security.c | 9277 | 2853 | /*
* "security" table for IPv6
*
* This is for use by Mandatory Access Control (MAC) security models,
* which need to be able to manage security policy in separate context
* to DAC.
*
* Based on iptable_mangle.c
*
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2004 Netfilter Core Team <coreteam <at> netfilter.org>
* Copyright (C) 2008 Red Hat, Inc., James Morris <jmorris <at> redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/slab.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Morris <jmorris <at> redhat.com>");
MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
#define SECURITY_VALID_HOOKS (1 << NF_INET_LOCAL_IN) | \
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT)
static const struct xt_table security_table = {
.name = "security",
.valid_hooks = SECURITY_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV6,
.priority = NF_IP6_PRI_SECURITY,
};
static unsigned int
ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct net *net = dev_net((in != NULL) ? in : out);
return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
}
static struct nf_hook_ops *sectbl_ops __read_mostly;
static int __net_init ip6table_security_net_init(struct net *net)
{
struct ip6t_replace *repl;
repl = ip6t_alloc_initial_table(&security_table);
if (repl == NULL)
return -ENOMEM;
net->ipv6.ip6table_security =
ip6t_register_table(net, &security_table, repl);
kfree(repl);
if (IS_ERR(net->ipv6.ip6table_security))
return PTR_ERR(net->ipv6.ip6table_security);
return 0;
}
static void __net_exit ip6table_security_net_exit(struct net *net)
{
ip6t_unregister_table(net, net->ipv6.ip6table_security);
}
static struct pernet_operations ip6table_security_net_ops = {
.init = ip6table_security_net_init,
.exit = ip6table_security_net_exit,
};
static int __init ip6table_security_init(void)
{
int ret;
ret = register_pernet_subsys(&ip6table_security_net_ops);
if (ret < 0)
return ret;
sectbl_ops = xt_hook_link(&security_table, ip6table_security_hook);
if (IS_ERR(sectbl_ops)) {
ret = PTR_ERR(sectbl_ops);
goto cleanup_table;
}
return ret;
cleanup_table:
unregister_pernet_subsys(&ip6table_security_net_ops);
return ret;
}
static void __exit ip6table_security_fini(void)
{
xt_hook_unlink(&security_table, sectbl_ops);
unregister_pernet_subsys(&ip6table_security_net_ops);
}
module_init(ip6table_security_init);
module_exit(ip6table_security_fini);
| gpl-2.0 |
wimpknocker/lge-kernel-lproj | arch/mips/txx9/rbtx4927/prom.c | 9533 | 1725 | /*
* rbtx4927 specific prom routines
*
* Author: MontaVista Software, Inc.
* source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
* Copyright (C) 2004 MontaVista Software Inc.
* Author: Manish Lachwani, mlachwani@mvista.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/rbtx4927.h>
void __init rbtx4927_prom_init(void)
{
add_memory_region(0, tx4927_get_mem_size(), BOOT_MEM_RAM);
txx9_sio_putchar_init(TX4927_SIO_REG(0) & 0xfffffffffULL);
}
| gpl-2.0 |
pbeeler/Linux-stable | drivers/pcmcia/pxa2xx_palmtx.c | 9789 | 2823 | /*
* linux/drivers/pcmcia/pxa2xx_palmtx.c
*
* Driver for Palm T|X PCMCIA
*
* Copyright (C) 2007-2011 Marek Vasut <marek.vasut@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <mach/palmtx.h>
#include "soc_common.h"
static struct gpio palmtx_pcmcia_gpios[] = {
{ GPIO_NR_PALMTX_PCMCIA_POWER1, GPIOF_INIT_LOW, "PCMCIA Power 1" },
{ GPIO_NR_PALMTX_PCMCIA_POWER2, GPIOF_INIT_LOW, "PCMCIA Power 2" },
{ GPIO_NR_PALMTX_PCMCIA_RESET, GPIOF_INIT_HIGH,"PCMCIA Reset" },
};
static int palmtx_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
int ret;
ret = gpio_request_array(palmtx_pcmcia_gpios,
ARRAY_SIZE(palmtx_pcmcia_gpios));
skt->stat[SOC_STAT_RDY].gpio = GPIO_NR_PALMTX_PCMCIA_READY;
skt->stat[SOC_STAT_RDY].name = "PCMCIA Ready";
return ret;
}
static void palmtx_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
gpio_free_array(palmtx_pcmcia_gpios, ARRAY_SIZE(palmtx_pcmcia_gpios));
}
static void palmtx_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
state->detect = 1; /* always inserted */
state->vs_3v = 1;
state->vs_Xv = 0;
}
static int
palmtx_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER1, 1);
gpio_set_value(GPIO_NR_PALMTX_PCMCIA_POWER2, 1);
gpio_set_value(GPIO_NR_PALMTX_PCMCIA_RESET,
!!(state->flags & SS_RESET));
return 0;
}
static struct pcmcia_low_level palmtx_pcmcia_ops = {
.owner = THIS_MODULE,
.first = 0,
.nr = 1,
.hw_init = palmtx_pcmcia_hw_init,
.hw_shutdown = palmtx_pcmcia_hw_shutdown,
.socket_state = palmtx_pcmcia_socket_state,
.configure_socket = palmtx_pcmcia_configure_socket,
};
static struct platform_device *palmtx_pcmcia_device;
static int __init palmtx_pcmcia_init(void)
{
int ret;
if (!machine_is_palmtx())
return -ENODEV;
palmtx_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!palmtx_pcmcia_device)
return -ENOMEM;
ret = platform_device_add_data(palmtx_pcmcia_device, &palmtx_pcmcia_ops,
sizeof(palmtx_pcmcia_ops));
if (!ret)
ret = platform_device_add(palmtx_pcmcia_device);
if (ret)
platform_device_put(palmtx_pcmcia_device);
return ret;
}
static void __exit palmtx_pcmcia_exit(void)
{
platform_device_unregister(palmtx_pcmcia_device);
}
module_init(palmtx_pcmcia_init);
module_exit(palmtx_pcmcia_exit);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("PCMCIA support for Palm T|X");
MODULE_ALIAS("platform:pxa2xx-pcmcia");
MODULE_LICENSE("GPL");
| gpl-2.0 |
houst0nn/android_kernel_lge_galbi | drivers/pcmcia/pxa2xx_e740.c | 9789 | 3133 | /*
* Toshiba e740 PCMCIA specific routines.
*
* (c) 2004 Ian Molton <spyro@f2s.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <mach/eseries-gpio.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include "soc_common.h"
static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
if (skt->nr == 0) {
skt->stat[SOC_STAT_CD].gpio = GPIO_E740_PCMCIA_CD0;
skt->stat[SOC_STAT_CD].name = "CF card detect";
skt->stat[SOC_STAT_RDY].gpio = GPIO_E740_PCMCIA_RDY0;
skt->stat[SOC_STAT_RDY].name = "CF ready";
} else {
skt->stat[SOC_STAT_CD].gpio = GPIO_E740_PCMCIA_CD1;
skt->stat[SOC_STAT_CD].name = "Wifi switch";
skt->stat[SOC_STAT_RDY].gpio = GPIO_E740_PCMCIA_RDY1;
skt->stat[SOC_STAT_RDY].name = "Wifi ready";
}
return 0;
}
static void e740_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
struct pcmcia_state *state)
{
state->vs_3v = 1;
state->vs_Xv = 0;
}
static int e740_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
const socket_state_t *state)
{
if (state->flags & SS_RESET) {
if (skt->nr == 0)
gpio_set_value(GPIO_E740_PCMCIA_RST0, 1);
else
gpio_set_value(GPIO_E740_PCMCIA_RST1, 1);
} else {
if (skt->nr == 0)
gpio_set_value(GPIO_E740_PCMCIA_RST0, 0);
else
gpio_set_value(GPIO_E740_PCMCIA_RST1, 0);
}
switch (state->Vcc) {
case 0: /* Socket off */
if (skt->nr == 0)
gpio_set_value(GPIO_E740_PCMCIA_PWR0, 0);
else
gpio_set_value(GPIO_E740_PCMCIA_PWR1, 1);
break;
case 50:
case 33: /* socket on */
if (skt->nr == 0)
gpio_set_value(GPIO_E740_PCMCIA_PWR0, 1);
else
gpio_set_value(GPIO_E740_PCMCIA_PWR1, 0);
break;
default:
printk(KERN_ERR "e740_cs: Unsupported Vcc: %d\n", state->Vcc);
}
return 0;
}
static struct pcmcia_low_level e740_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = e740_pcmcia_hw_init,
.socket_state = e740_pcmcia_socket_state,
.configure_socket = e740_pcmcia_configure_socket,
.nr = 2,
};
static struct platform_device *e740_pcmcia_device;
static int __init e740_pcmcia_init(void)
{
int ret;
if (!machine_is_e740())
return -ENODEV;
e740_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
if (!e740_pcmcia_device)
return -ENOMEM;
ret = platform_device_add_data(e740_pcmcia_device, &e740_pcmcia_ops,
sizeof(e740_pcmcia_ops));
if (!ret)
ret = platform_device_add(e740_pcmcia_device);
if (ret)
platform_device_put(e740_pcmcia_device);
return ret;
}
static void __exit e740_pcmcia_exit(void)
{
platform_device_unregister(e740_pcmcia_device);
}
module_init(e740_pcmcia_init);
module_exit(e740_pcmcia_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
MODULE_ALIAS("platform:pxa2xx-pcmcia");
MODULE_DESCRIPTION("e740 PCMCIA platform support");
| gpl-2.0 |
SerenityS/kernel_msm | net/ipv4/tcp_lp.c | 10557 | 8970 | /*
* TCP Low Priority (TCP-LP)
*
* TCP Low Priority is a distributed algorithm whose goal is to utilize only
* the excess network bandwidth as compared to the ``fair share`` of
* bandwidth as targeted by TCP.
*
* As of 2.6.13, Linux supports pluggable congestion control algorithms.
* Due to the limitation of the API, we take the following changes from
* the original TCP-LP implementation:
* o We use newReno in most core CA handling. Only add some checking
* within cong_avoid.
* o Error correcting in remote HZ, therefore remote HZ will be keeped
* on checking and updating.
* o Handling calculation of One-Way-Delay (OWD) within rtt_sample, since
* OWD have a similar meaning as RTT. Also correct the buggy formular.
* o Handle reaction for Early Congestion Indication (ECI) within
* pkts_acked, as mentioned within pseudo code.
* o OWD is handled in relative format, where local time stamp will in
* tcp_time_stamp format.
*
* Original Author:
* Aleksandar Kuzmanovic <akuzma@northwestern.edu>
* Available from:
* http://www.ece.rice.edu/~akuzma/Doc/akuzma/TCP-LP.pdf
* Original implementation for 2.4.19:
* http://www-ece.rice.edu/networks/TCP-LP/
*
* 2.6.x module Authors:
* Wong Hoi Sing, Edison <hswong3i@gmail.com>
* Hung Hing Lun, Mike <hlhung3i@gmail.com>
* SourceForge project page:
* http://tcp-lp-mod.sourceforge.net/
*/
#include <linux/module.h>
#include <net/tcp.h>
/* resolution of owd */
#define LP_RESOL 1000
/**
* enum tcp_lp_state
* @LP_VALID_RHZ: is remote HZ valid?
* @LP_VALID_OWD: is OWD valid?
* @LP_WITHIN_THR: are we within threshold?
* @LP_WITHIN_INF: are we within inference?
*
* TCP-LP's state flags.
* We create this set of state flag mainly for debugging.
*/
enum tcp_lp_state {
LP_VALID_RHZ = (1 << 0),
LP_VALID_OWD = (1 << 1),
LP_WITHIN_THR = (1 << 3),
LP_WITHIN_INF = (1 << 4),
};
/**
* struct lp
* @flag: TCP-LP state flag
* @sowd: smoothed OWD << 3
* @owd_min: min OWD
* @owd_max: max OWD
* @owd_max_rsv: resrved max owd
* @remote_hz: estimated remote HZ
* @remote_ref_time: remote reference time
* @local_ref_time: local reference time
* @last_drop: time for last active drop
* @inference: current inference
*
* TCP-LP's private struct.
* We get the idea from original TCP-LP implementation where only left those we
* found are really useful.
*/
struct lp {
u32 flag;
u32 sowd;
u32 owd_min;
u32 owd_max;
u32 owd_max_rsv;
u32 remote_hz;
u32 remote_ref_time;
u32 local_ref_time;
u32 last_drop;
u32 inference;
};
/**
* tcp_lp_init
*
* Init all required variables.
* Clone the handling from Vegas module implementation.
*/
static void tcp_lp_init(struct sock *sk)
{
struct lp *lp = inet_csk_ca(sk);
lp->flag = 0;
lp->sowd = 0;
lp->owd_min = 0xffffffff;
lp->owd_max = 0;
lp->owd_max_rsv = 0;
lp->remote_hz = 0;
lp->remote_ref_time = 0;
lp->local_ref_time = 0;
lp->last_drop = 0;
lp->inference = 0;
}
/**
* tcp_lp_cong_avoid
*
* Implementation of cong_avoid.
* Will only call newReno CA when away from inference.
* From TCP-LP's paper, this will be handled in additive increasement.
*/
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
struct lp *lp = inet_csk_ca(sk);
if (!(lp->flag & LP_WITHIN_INF))
tcp_reno_cong_avoid(sk, ack, in_flight);
}
/**
* tcp_lp_remote_hz_estimator
*
* Estimate remote HZ.
* We keep on updating the estimated value, where original TCP-LP
* implementation only guest it for once and use forever.
*/
static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
s64 rhz = lp->remote_hz << 6; /* remote HZ << 6 */
s64 m = 0;
/* not yet record reference time
* go away!! record it before come back!! */
if (lp->remote_ref_time == 0 || lp->local_ref_time == 0)
goto out;
/* we can't calc remote HZ with no different!! */
if (tp->rx_opt.rcv_tsval == lp->remote_ref_time ||
tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out;
m = HZ * (tp->rx_opt.rcv_tsval -
lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr -
lp->local_ref_time);
if (m < 0)
m = -m;
if (rhz > 0) {
m -= rhz >> 6; /* m is now error in remote HZ est */
rhz += m; /* 63/64 old + 1/64 new */
} else
rhz = m << 6;
out:
/* record time for successful remote HZ calc */
if ((rhz >> 6) > 0)
lp->flag |= LP_VALID_RHZ;
else
lp->flag &= ~LP_VALID_RHZ;
/* record reference time stamp */
lp->remote_ref_time = tp->rx_opt.rcv_tsval;
lp->local_ref_time = tp->rx_opt.rcv_tsecr;
return rhz >> 6;
}
/**
* tcp_lp_owd_calculator
*
* Calculate one way delay (in relative format).
* Original implement OWD as minus of remote time difference to local time
* difference directly. As this time difference just simply equal to RTT, when
* the network status is stable, remote RTT will equal to local RTT, and result
* OWD into zero.
* It seems to be a bug and so we fixed it.
*/
static u32 tcp_lp_owd_calculator(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
s64 owd = 0;
lp->remote_hz = tcp_lp_remote_hz_estimator(sk);
if (lp->flag & LP_VALID_RHZ) {
owd =
tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ);
if (owd < 0)
owd = -owd;
}
if (owd > 0)
lp->flag |= LP_VALID_OWD;
else
lp->flag &= ~LP_VALID_OWD;
return owd;
}
/**
* tcp_lp_rtt_sample
*
* Implementation or rtt_sample.
* Will take the following action,
* 1. calc OWD,
* 2. record the min/max OWD,
* 3. calc smoothed OWD (SOWD).
* Most ideas come from the original TCP-LP implementation.
*/
static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
{
struct lp *lp = inet_csk_ca(sk);
s64 mowd = tcp_lp_owd_calculator(sk);
/* sorry that we don't have valid data */
if (!(lp->flag & LP_VALID_RHZ) || !(lp->flag & LP_VALID_OWD))
return;
/* record the next min owd */
if (mowd < lp->owd_min)
lp->owd_min = mowd;
/* always forget the max of the max
* we just set owd_max as one below it */
if (mowd > lp->owd_max) {
if (mowd > lp->owd_max_rsv) {
if (lp->owd_max_rsv == 0)
lp->owd_max = mowd;
else
lp->owd_max = lp->owd_max_rsv;
lp->owd_max_rsv = mowd;
} else
lp->owd_max = mowd;
}
/* calc for smoothed owd */
if (lp->sowd != 0) {
mowd -= lp->sowd >> 3; /* m is now error in owd est */
lp->sowd += mowd; /* owd = 7/8 owd + 1/8 new */
} else
lp->sowd = mowd << 3; /* take the measured time be owd */
}
/**
* tcp_lp_pkts_acked
*
* Implementation of pkts_acked.
* Deal with active drop under Early Congestion Indication.
* Only drop to half and 1 will be handle, because we hope to use back
* newReno in increase case.
* We work it out by following the idea from TCP-LP's paper directly
*/
static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
if (rtt_us > 0)
tcp_lp_rtt_sample(sk, rtt_us);
/* calc inference */
if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
/* test if within inference */
if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
lp->flag |= LP_WITHIN_INF;
else
lp->flag &= ~LP_WITHIN_INF;
/* test if within threshold */
if (lp->sowd >> 3 <
lp->owd_min + 15 * (lp->owd_max - lp->owd_min) / 100)
lp->flag |= LP_WITHIN_THR;
else
lp->flag &= ~LP_WITHIN_THR;
pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag,
tp->snd_cwnd, lp->remote_hz, lp->owd_min, lp->owd_max,
lp->sowd >> 3);
if (lp->flag & LP_WITHIN_THR)
return;
/* FIXME: try to reset owd_min and owd_max here
* so decrease the chance the min/max is no longer suitable
* and will usually within threshold when whithin inference */
lp->owd_min = lp->sowd >> 3;
lp->owd_max = lp->sowd >> 2;
lp->owd_max_rsv = lp->sowd >> 2;
/* happened within inference
* drop snd_cwnd into 1 */
if (lp->flag & LP_WITHIN_INF)
tp->snd_cwnd = 1U;
/* happened after inference
* cut snd_cwnd into half */
else
tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
/* record this drop time */
lp->last_drop = tcp_time_stamp;
}
static struct tcp_congestion_ops tcp_lp __read_mostly = {
.flags = TCP_CONG_RTT_STAMP,
.init = tcp_lp_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_lp_cong_avoid,
.min_cwnd = tcp_reno_min_cwnd,
.pkts_acked = tcp_lp_pkts_acked,
.owner = THIS_MODULE,
.name = "lp"
};
static int __init tcp_lp_register(void)
{
BUILD_BUG_ON(sizeof(struct lp) > ICSK_CA_PRIV_SIZE);
return tcp_register_congestion_control(&tcp_lp);
}
static void __exit tcp_lp_unregister(void)
{
tcp_unregister_congestion_control(&tcp_lp);
}
module_init(tcp_lp_register);
module_exit(tcp_lp_unregister);
MODULE_AUTHOR("Wong Hoi Sing Edison, Hung Hing Lun Mike");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP Low Priority");
| gpl-2.0 |
sohkis/leanKernel-shamu | net/ipv4/tcp_lp.c | 10557 | 8970 | /*
* TCP Low Priority (TCP-LP)
*
* TCP Low Priority is a distributed algorithm whose goal is to utilize only
* the excess network bandwidth as compared to the ``fair share`` of
* bandwidth as targeted by TCP.
*
* As of 2.6.13, Linux supports pluggable congestion control algorithms.
* Due to the limitation of the API, we take the following changes from
* the original TCP-LP implementation:
* o We use newReno in most core CA handling. Only add some checking
* within cong_avoid.
* o Error correcting in remote HZ, therefore remote HZ will be keeped
* on checking and updating.
* o Handling calculation of One-Way-Delay (OWD) within rtt_sample, since
* OWD have a similar meaning as RTT. Also correct the buggy formular.
* o Handle reaction for Early Congestion Indication (ECI) within
* pkts_acked, as mentioned within pseudo code.
* o OWD is handled in relative format, where local time stamp will in
* tcp_time_stamp format.
*
* Original Author:
* Aleksandar Kuzmanovic <akuzma@northwestern.edu>
* Available from:
* http://www.ece.rice.edu/~akuzma/Doc/akuzma/TCP-LP.pdf
* Original implementation for 2.4.19:
* http://www-ece.rice.edu/networks/TCP-LP/
*
* 2.6.x module Authors:
* Wong Hoi Sing, Edison <hswong3i@gmail.com>
* Hung Hing Lun, Mike <hlhung3i@gmail.com>
* SourceForge project page:
* http://tcp-lp-mod.sourceforge.net/
*/
#include <linux/module.h>
#include <net/tcp.h>
/* resolution of owd */
#define LP_RESOL 1000
/**
* enum tcp_lp_state
* @LP_VALID_RHZ: is remote HZ valid?
* @LP_VALID_OWD: is OWD valid?
* @LP_WITHIN_THR: are we within threshold?
* @LP_WITHIN_INF: are we within inference?
*
* TCP-LP's state flags.
* We create this set of state flag mainly for debugging.
*/
enum tcp_lp_state {
LP_VALID_RHZ = (1 << 0),
LP_VALID_OWD = (1 << 1),
LP_WITHIN_THR = (1 << 3),
LP_WITHIN_INF = (1 << 4),
};
/**
* struct lp
* @flag: TCP-LP state flag
* @sowd: smoothed OWD << 3
* @owd_min: min OWD
* @owd_max: max OWD
* @owd_max_rsv: resrved max owd
* @remote_hz: estimated remote HZ
* @remote_ref_time: remote reference time
* @local_ref_time: local reference time
* @last_drop: time for last active drop
* @inference: current inference
*
* TCP-LP's private struct.
* We get the idea from original TCP-LP implementation where only left those we
* found are really useful.
*/
struct lp {
u32 flag;
u32 sowd;
u32 owd_min;
u32 owd_max;
u32 owd_max_rsv;
u32 remote_hz;
u32 remote_ref_time;
u32 local_ref_time;
u32 last_drop;
u32 inference;
};
/**
* tcp_lp_init
*
* Init all required variables.
* Clone the handling from Vegas module implementation.
*/
static void tcp_lp_init(struct sock *sk)
{
struct lp *lp = inet_csk_ca(sk);
lp->flag = 0;
lp->sowd = 0;
lp->owd_min = 0xffffffff;
lp->owd_max = 0;
lp->owd_max_rsv = 0;
lp->remote_hz = 0;
lp->remote_ref_time = 0;
lp->local_ref_time = 0;
lp->last_drop = 0;
lp->inference = 0;
}
/**
* tcp_lp_cong_avoid
*
* Implementation of cong_avoid.
* Will only call newReno CA when away from inference.
* From TCP-LP's paper, this will be handled in additive increasement.
*/
static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
struct lp *lp = inet_csk_ca(sk);
if (!(lp->flag & LP_WITHIN_INF))
tcp_reno_cong_avoid(sk, ack, in_flight);
}
/**
* tcp_lp_remote_hz_estimator
*
* Estimate remote HZ.
* We keep on updating the estimated value, where original TCP-LP
* implementation only guest it for once and use forever.
*/
static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
s64 rhz = lp->remote_hz << 6; /* remote HZ << 6 */
s64 m = 0;
/* not yet record reference time
* go away!! record it before come back!! */
if (lp->remote_ref_time == 0 || lp->local_ref_time == 0)
goto out;
/* we can't calc remote HZ with no different!! */
if (tp->rx_opt.rcv_tsval == lp->remote_ref_time ||
tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out;
m = HZ * (tp->rx_opt.rcv_tsval -
lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr -
lp->local_ref_time);
if (m < 0)
m = -m;
if (rhz > 0) {
m -= rhz >> 6; /* m is now error in remote HZ est */
rhz += m; /* 63/64 old + 1/64 new */
} else
rhz = m << 6;
out:
/* record time for successful remote HZ calc */
if ((rhz >> 6) > 0)
lp->flag |= LP_VALID_RHZ;
else
lp->flag &= ~LP_VALID_RHZ;
/* record reference time stamp */
lp->remote_ref_time = tp->rx_opt.rcv_tsval;
lp->local_ref_time = tp->rx_opt.rcv_tsecr;
return rhz >> 6;
}
/**
* tcp_lp_owd_calculator
*
* Calculate one way delay (in relative format).
* Original implement OWD as minus of remote time difference to local time
* difference directly. As this time difference just simply equal to RTT, when
* the network status is stable, remote RTT will equal to local RTT, and result
* OWD into zero.
* It seems to be a bug and so we fixed it.
*/
static u32 tcp_lp_owd_calculator(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
s64 owd = 0;
lp->remote_hz = tcp_lp_remote_hz_estimator(sk);
if (lp->flag & LP_VALID_RHZ) {
owd =
tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ);
if (owd < 0)
owd = -owd;
}
if (owd > 0)
lp->flag |= LP_VALID_OWD;
else
lp->flag &= ~LP_VALID_OWD;
return owd;
}
/**
* tcp_lp_rtt_sample
*
* Implementation or rtt_sample.
* Will take the following action,
* 1. calc OWD,
* 2. record the min/max OWD,
* 3. calc smoothed OWD (SOWD).
* Most ideas come from the original TCP-LP implementation.
*/
static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
{
struct lp *lp = inet_csk_ca(sk);
s64 mowd = tcp_lp_owd_calculator(sk);
/* sorry that we don't have valid data */
if (!(lp->flag & LP_VALID_RHZ) || !(lp->flag & LP_VALID_OWD))
return;
/* record the next min owd */
if (mowd < lp->owd_min)
lp->owd_min = mowd;
/* always forget the max of the max
* we just set owd_max as one below it */
if (mowd > lp->owd_max) {
if (mowd > lp->owd_max_rsv) {
if (lp->owd_max_rsv == 0)
lp->owd_max = mowd;
else
lp->owd_max = lp->owd_max_rsv;
lp->owd_max_rsv = mowd;
} else
lp->owd_max = mowd;
}
/* calc for smoothed owd */
if (lp->sowd != 0) {
mowd -= lp->sowd >> 3; /* m is now error in owd est */
lp->sowd += mowd; /* owd = 7/8 owd + 1/8 new */
} else
lp->sowd = mowd << 3; /* take the measured time be owd */
}
/**
* tcp_lp_pkts_acked
*
* Implementation of pkts_acked.
* Deal with active drop under Early Congestion Indication.
* Only drop to half and 1 will be handle, because we hope to use back
* newReno in increase case.
* We work it out by following the idea from TCP-LP's paper directly
*/
static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
if (rtt_us > 0)
tcp_lp_rtt_sample(sk, rtt_us);
/* calc inference */
if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
/* test if within inference */
if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
lp->flag |= LP_WITHIN_INF;
else
lp->flag &= ~LP_WITHIN_INF;
/* test if within threshold */
if (lp->sowd >> 3 <
lp->owd_min + 15 * (lp->owd_max - lp->owd_min) / 100)
lp->flag |= LP_WITHIN_THR;
else
lp->flag &= ~LP_WITHIN_THR;
pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag,
tp->snd_cwnd, lp->remote_hz, lp->owd_min, lp->owd_max,
lp->sowd >> 3);
if (lp->flag & LP_WITHIN_THR)
return;
/* FIXME: try to reset owd_min and owd_max here
* so decrease the chance the min/max is no longer suitable
* and will usually within threshold when whithin inference */
lp->owd_min = lp->sowd >> 3;
lp->owd_max = lp->sowd >> 2;
lp->owd_max_rsv = lp->sowd >> 2;
/* happened within inference
* drop snd_cwnd into 1 */
if (lp->flag & LP_WITHIN_INF)
tp->snd_cwnd = 1U;
/* happened after inference
* cut snd_cwnd into half */
else
tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
/* record this drop time */
lp->last_drop = tcp_time_stamp;
}
static struct tcp_congestion_ops tcp_lp __read_mostly = {
.flags = TCP_CONG_RTT_STAMP,
.init = tcp_lp_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_lp_cong_avoid,
.min_cwnd = tcp_reno_min_cwnd,
.pkts_acked = tcp_lp_pkts_acked,
.owner = THIS_MODULE,
.name = "lp"
};
static int __init tcp_lp_register(void)
{
BUILD_BUG_ON(sizeof(struct lp) > ICSK_CA_PRIV_SIZE);
return tcp_register_congestion_control(&tcp_lp);
}
static void __exit tcp_lp_unregister(void)
{
tcp_unregister_congestion_control(&tcp_lp);
}
module_init(tcp_lp_register);
module_exit(tcp_lp_unregister);
MODULE_AUTHOR("Wong Hoi Sing Edison, Hung Hing Lun Mike");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TCP Low Priority");
| gpl-2.0 |
Senthil360/android_kernel_samsung_trlte | net/sctp/command.c | 12349 | 2384 | /* SCTP kernel implementation Copyright (C) 1999-2001
* Cisco, Motorola, and IBM
* Copyright 2001 La Monte H.P. Yarroll
*
* This file is part of the SCTP kernel implementation
*
* These functions manipulate sctp command sequences.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#include <linux/types.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Initialize a block of memory as a command sequence. */
int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
{
memset(seq, 0, sizeof(sctp_cmd_seq_t));
return 1; /* We always succeed. */
}
/* Add a command to a sctp_cmd_seq_t.
* Return 0 if the command sequence is full.
*/
void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj)
{
BUG_ON(seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS);
seq->cmds[seq->next_free_slot].verb = verb;
seq->cmds[seq->next_free_slot++].obj = obj;
}
/* Return the next command structure in a sctp_cmd_seq.
* Returns NULL at the end of the sequence.
*/
sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq)
{
sctp_cmd_t *retval = NULL;
if (seq->next_cmd < seq->next_free_slot)
retval = &seq->cmds[seq->next_cmd++];
return retval;
}
| gpl-2.0 |
sameerkhan07/furnace_kernel_motorola_falcon | drivers/media/dvb/frontends/drxd_firm.c | 12605 | 36425 | /*
* drxd_firm.c : DRXD firmware tables
*
* Copyright (C) 2006-2007 Micronas
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 only, as published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
/* TODO: generate this file with a script from a settings file */
/* Contains A2 firmware version: 1.4.2
* Contains B1 firmware version: 3.3.33
* Contains settings from driver 1.4.23
*/
#include "drxd_firm.h"
#define ADDRESS(x) ((x) & 0xFF), (((x)>>8) & 0xFF), (((x)>>16) & 0xFF), (((x)>>24) & 0xFF)
#define LENGTH(x) ((x) & 0xFF), (((x)>>8) & 0xFF)
/* Is written via block write, must be little endian */
#define DATA16(x) ((x) & 0xFF), (((x)>>8) & 0xFF)
#define WRBLOCK(a, l) ADDRESS(a), LENGTH(l)
#define WR16(a, d) ADDRESS(a), LENGTH(1), DATA16(d)
#define END_OF_TABLE 0xFF, 0xFF, 0xFF, 0xFF
/* HI firmware patches */
#define HI_TR_FUNC_ADDR HI_IF_RAM_USR_BEGIN__A
#define HI_TR_FUNC_SIZE 9 /* size of this function in instruction words */
u8 DRXD_InitAtomicRead[] = {
WRBLOCK(HI_TR_FUNC_ADDR, HI_TR_FUNC_SIZE),
0x26, 0x00, /* 0 -> ring.rdy; */
0x60, 0x04, /* r0rami.dt -> ring.xba; */
0x61, 0x04, /* r0rami.dt -> ring.xad; */
0xE3, 0x07, /* HI_RA_RAM_USR_BEGIN -> ring.iad; */
0x40, 0x00, /* (long immediate) */
0x64, 0x04, /* r0rami.dt -> ring.len; */
0x65, 0x04, /* r0rami.dt -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x38, 0x00, /* 0 -> jumps.ad; */
END_OF_TABLE
};
/* Pins D0 and D1 of the parallel MPEG output can be used
to set the I2C address of a device. */
#define HI_RST_FUNC_ADDR (HI_IF_RAM_USR_BEGIN__A + HI_TR_FUNC_SIZE)
#define HI_RST_FUNC_SIZE 54 /* size of this function in instruction words */
/* D0 Version */
u8 DRXD_HiI2cPatch_1[] = {
WRBLOCK(HI_RST_FUNC_ADDR, HI_RST_FUNC_SIZE),
0xC8, 0x07, 0x01, 0x00, /* MASK -> reg0.dt; */
0xE0, 0x07, 0x15, 0x02, /* (EC__BLK << 6) + EC_OC_REG__BNK -> ring.xba; */
0xE1, 0x07, 0x12, 0x00, /* EC_OC_REG_OC_MPG_SIO__A -> ring.xad; */
0xA2, 0x00, /* M_BNK_ID_DAT -> ring.iba; */
0x23, 0x00, /* &data -> ring.iad; */
0x24, 0x00, /* 0 -> ring.len; */
0xA5, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_READ -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x42, 0x00, /* &data+1 -> w0ram.ad; */
0xC0, 0x07, 0xFF, 0x0F, /* -1 -> w0ram.dt; */
0x63, 0x00, /* &data+1 -> ring.iad; */
0x65, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_WRITE -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0xE1, 0x07, 0x38, 0x00, /* EC_OC_REG_OCR_MPG_USR_DAT__A -> ring.xad; */
0xA5, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_READ -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0xE1, 0x07, 0x12, 0x00, /* EC_OC_REG_OC_MPG_SIO__A -> ring.xad; */
0x23, 0x00, /* &data -> ring.iad; */
0x65, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_WRITE -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x42, 0x00, /* &data+1 -> w0ram.ad; */
0x0F, 0x04, /* r0ram.dt -> and.op; */
0x1C, 0x06, /* reg0.dt -> and.tr; */
0xCF, 0x04, /* and.rs -> add.op; */
0xD0, 0x07, 0x70, 0x00, /* DEF_DEV_ID -> add.tr; */
0xD0, 0x04, /* add.rs -> add.tr; */
0xC8, 0x04, /* add.rs -> reg0.dt; */
0x60, 0x00, /* reg0.dt -> w0ram.dt; */
0xC2, 0x07, 0x10, 0x00, /* SLV0_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x06, /* reg0.dt -> w0rami.dt; */
0xC2, 0x07, 0x20, 0x00, /* SLV1_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x06, /* reg0.dt -> w0rami.dt; */
0xC2, 0x07, 0x30, 0x00, /* CMD_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x68, 0x00, /* M_IC_SEL_PT1 -> i2c.sel; */
0x29, 0x00, /* M_IC_CMD_RESET -> i2c.cmd; */
0x28, 0x00, /* M_IC_SEL_PT0 -> i2c.sel; */
0x29, 0x00, /* M_IC_CMD_RESET -> i2c.cmd; */
0xF8, 0x07, 0x2F, 0x00, /* 0x2F -> jumps.ad; */
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 0) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 1) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 2) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 3) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
/* Force quick and dirty reset */
WR16(B_HI_CT_REG_COMM_STATE__A, 0),
END_OF_TABLE
};
/* D0,D1 Version */
u8 DRXD_HiI2cPatch_3[] = {
WRBLOCK(HI_RST_FUNC_ADDR, HI_RST_FUNC_SIZE),
0xC8, 0x07, 0x03, 0x00, /* MASK -> reg0.dt; */
0xE0, 0x07, 0x15, 0x02, /* (EC__BLK << 6) + EC_OC_REG__BNK -> ring.xba; */
0xE1, 0x07, 0x12, 0x00, /* EC_OC_REG_OC_MPG_SIO__A -> ring.xad; */
0xA2, 0x00, /* M_BNK_ID_DAT -> ring.iba; */
0x23, 0x00, /* &data -> ring.iad; */
0x24, 0x00, /* 0 -> ring.len; */
0xA5, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_READ -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x42, 0x00, /* &data+1 -> w0ram.ad; */
0xC0, 0x07, 0xFF, 0x0F, /* -1 -> w0ram.dt; */
0x63, 0x00, /* &data+1 -> ring.iad; */
0x65, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_WRITE -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0xE1, 0x07, 0x38, 0x00, /* EC_OC_REG_OCR_MPG_USR_DAT__A -> ring.xad; */
0xA5, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_READ -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0xE1, 0x07, 0x12, 0x00, /* EC_OC_REG_OC_MPG_SIO__A -> ring.xad; */
0x23, 0x00, /* &data -> ring.iad; */
0x65, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_WRITE -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x42, 0x00, /* &data+1 -> w0ram.ad; */
0x0F, 0x04, /* r0ram.dt -> and.op; */
0x1C, 0x06, /* reg0.dt -> and.tr; */
0xCF, 0x04, /* and.rs -> add.op; */
0xD0, 0x07, 0x70, 0x00, /* DEF_DEV_ID -> add.tr; */
0xD0, 0x04, /* add.rs -> add.tr; */
0xC8, 0x04, /* add.rs -> reg0.dt; */
0x60, 0x00, /* reg0.dt -> w0ram.dt; */
0xC2, 0x07, 0x10, 0x00, /* SLV0_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x06, /* reg0.dt -> w0rami.dt; */
0xC2, 0x07, 0x20, 0x00, /* SLV1_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x06, /* reg0.dt -> w0rami.dt; */
0xC2, 0x07, 0x30, 0x00, /* CMD_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x68, 0x00, /* M_IC_SEL_PT1 -> i2c.sel; */
0x29, 0x00, /* M_IC_CMD_RESET -> i2c.cmd; */
0x28, 0x00, /* M_IC_SEL_PT0 -> i2c.sel; */
0x29, 0x00, /* M_IC_CMD_RESET -> i2c.cmd; */
0xF8, 0x07, 0x2F, 0x00, /* 0x2F -> jumps.ad; */
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 0) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 1) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 2) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 3) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
/* Force quick and dirty reset */
WR16(B_HI_CT_REG_COMM_STATE__A, 0),
END_OF_TABLE
};
u8 DRXD_ResetCEFR[] = {
WRBLOCK(CE_REG_FR_TREAL00__A, 57),
0x52, 0x00, /* CE_REG_FR_TREAL00__A */
0x00, 0x00, /* CE_REG_FR_TIMAG00__A */
0x52, 0x00, /* CE_REG_FR_TREAL01__A */
0x00, 0x00, /* CE_REG_FR_TIMAG01__A */
0x52, 0x00, /* CE_REG_FR_TREAL02__A */
0x00, 0x00, /* CE_REG_FR_TIMAG02__A */
0x52, 0x00, /* CE_REG_FR_TREAL03__A */
0x00, 0x00, /* CE_REG_FR_TIMAG03__A */
0x52, 0x00, /* CE_REG_FR_TREAL04__A */
0x00, 0x00, /* CE_REG_FR_TIMAG04__A */
0x52, 0x00, /* CE_REG_FR_TREAL05__A */
0x00, 0x00, /* CE_REG_FR_TIMAG05__A */
0x52, 0x00, /* CE_REG_FR_TREAL06__A */
0x00, 0x00, /* CE_REG_FR_TIMAG06__A */
0x52, 0x00, /* CE_REG_FR_TREAL07__A */
0x00, 0x00, /* CE_REG_FR_TIMAG07__A */
0x52, 0x00, /* CE_REG_FR_TREAL08__A */
0x00, 0x00, /* CE_REG_FR_TIMAG08__A */
0x52, 0x00, /* CE_REG_FR_TREAL09__A */
0x00, 0x00, /* CE_REG_FR_TIMAG09__A */
0x52, 0x00, /* CE_REG_FR_TREAL10__A */
0x00, 0x00, /* CE_REG_FR_TIMAG10__A */
0x52, 0x00, /* CE_REG_FR_TREAL11__A */
0x00, 0x00, /* CE_REG_FR_TIMAG11__A */
0x52, 0x00, /* CE_REG_FR_MID_TAP__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G00__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G01__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G02__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G03__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G04__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G05__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G06__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G07__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G08__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G09__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G10__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G11__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G12__A */
0xFF, 0x01, /* CE_REG_FR_RIO_G00__A */
0x90, 0x01, /* CE_REG_FR_RIO_G01__A */
0x0B, 0x01, /* CE_REG_FR_RIO_G02__A */
0xC8, 0x00, /* CE_REG_FR_RIO_G03__A */
0xA0, 0x00, /* CE_REG_FR_RIO_G04__A */
0x85, 0x00, /* CE_REG_FR_RIO_G05__A */
0x72, 0x00, /* CE_REG_FR_RIO_G06__A */
0x64, 0x00, /* CE_REG_FR_RIO_G07__A */
0x59, 0x00, /* CE_REG_FR_RIO_G08__A */
0x50, 0x00, /* CE_REG_FR_RIO_G09__A */
0x49, 0x00, /* CE_REG_FR_RIO_G10__A */
0x10, 0x00, /* CE_REG_FR_MODE__A */
0x78, 0x00, /* CE_REG_FR_SQS_TRH__A */
0x00, 0x00, /* CE_REG_FR_RIO_GAIN__A */
0x00, 0x02, /* CE_REG_FR_BYPASS__A */
0x0D, 0x00, /* CE_REG_FR_PM_SET__A */
0x07, 0x00, /* CE_REG_FR_ERR_SH__A */
0x04, 0x00, /* CE_REG_FR_MAN_SH__A */
0x06, 0x00, /* CE_REG_FR_TAP_SH__A */
END_OF_TABLE
};
u8 DRXD_InitFEA2_1[] = {
WRBLOCK(FE_AD_REG_PD__A, 3),
0x00, 0x00, /* FE_AD_REG_PD__A */
0x01, 0x00, /* FE_AD_REG_INVEXT__A */
0x00, 0x00, /* FE_AD_REG_CLKNEG__A */
WRBLOCK(FE_AG_REG_DCE_AUR_CNT__A, 2),
0x10, 0x00, /* FE_AG_REG_DCE_AUR_CNT__A */
0x10, 0x00, /* FE_AG_REG_DCE_RUR_CNT__A */
WRBLOCK(FE_AG_REG_ACE_AUR_CNT__A, 2),
0x0E, 0x00, /* FE_AG_REG_ACE_AUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_ACE_RUR_CNT__A */
WRBLOCK(FE_AG_REG_EGC_FLA_RGN__A, 5),
0x04, 0x00, /* FE_AG_REG_EGC_FLA_RGN__A */
0x1F, 0x00, /* FE_AG_REG_EGC_SLO_RGN__A */
0x00, 0x00, /* FE_AG_REG_EGC_JMP_PSN__A */
0x00, 0x00, /* FE_AG_REG_EGC_FLA_INC__A */
0x00, 0x00, /* FE_AG_REG_EGC_FLA_DEC__A */
WRBLOCK(FE_AG_REG_GC1_AGC_MAX__A, 2),
0xFF, 0x01, /* FE_AG_REG_GC1_AGC_MAX__A */
0x00, 0xFE, /* FE_AG_REG_GC1_AGC_MIN__A */
WRBLOCK(FE_AG_REG_IND_WIN__A, 29),
0x00, 0x00, /* FE_AG_REG_IND_WIN__A */
0x05, 0x00, /* FE_AG_REG_IND_THD_LOL__A */
0x0F, 0x00, /* FE_AG_REG_IND_THD_HIL__A */
0x00, 0x00, /* FE_AG_REG_IND_DEL__A don't care */
0x1E, 0x00, /* FE_AG_REG_IND_PD1_WRI__A */
0x0C, 0x00, /* FE_AG_REG_PDA_AUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_PDA_RUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_PDA_AVE_DAT__A don't care */
0x00, 0x00, /* FE_AG_REG_PDC_RUR_CNT__A */
0x01, 0x00, /* FE_AG_REG_PDC_SET_LVL__A */
0x02, 0x00, /* FE_AG_REG_PDC_FLA_RGN__A */
0x00, 0x00, /* FE_AG_REG_PDC_JMP_PSN__A don't care */
0xFF, 0xFF, /* FE_AG_REG_PDC_FLA_STP__A */
0xFF, 0xFF, /* FE_AG_REG_PDC_SLO_STP__A */
0x00, 0x1F, /* FE_AG_REG_PDC_PD2_WRI__A don't care */
0x00, 0x00, /* FE_AG_REG_PDC_MAP_DAT__A don't care */
0x02, 0x00, /* FE_AG_REG_PDC_MAX__A */
0x0C, 0x00, /* FE_AG_REG_TGA_AUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_TGA_RUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_TGA_AVE_DAT__A don't care */
0x00, 0x00, /* FE_AG_REG_TGC_RUR_CNT__A */
0x22, 0x00, /* FE_AG_REG_TGC_SET_LVL__A */
0x15, 0x00, /* FE_AG_REG_TGC_FLA_RGN__A */
0x00, 0x00, /* FE_AG_REG_TGC_JMP_PSN__A don't care */
0x01, 0x00, /* FE_AG_REG_TGC_FLA_STP__A */
0x0A, 0x00, /* FE_AG_REG_TGC_SLO_STP__A */
0x00, 0x00, /* FE_AG_REG_TGC_MAP_DAT__A don't care */
0x10, 0x00, /* FE_AG_REG_FGA_AUR_CNT__A */
0x10, 0x00, /* FE_AG_REG_FGA_RUR_CNT__A */
WRBLOCK(FE_AG_REG_BGC_FGC_WRI__A, 2),
0x00, 0x00, /* FE_AG_REG_BGC_FGC_WRI__A */
0x00, 0x00, /* FE_AG_REG_BGC_CGC_WRI__A */
WRBLOCK(FE_FD_REG_SCL__A, 3),
0x05, 0x00, /* FE_FD_REG_SCL__A */
0x03, 0x00, /* FE_FD_REG_MAX_LEV__A */
0x05, 0x00, /* FE_FD_REG_NR__A */
WRBLOCK(FE_CF_REG_SCL__A, 5),
0x16, 0x00, /* FE_CF_REG_SCL__A */
0x04, 0x00, /* FE_CF_REG_MAX_LEV__A */
0x06, 0x00, /* FE_CF_REG_NR__A */
0x00, 0x00, /* FE_CF_REG_IMP_VAL__A */
0x01, 0x00, /* FE_CF_REG_MEAS_VAL__A */
WRBLOCK(FE_CU_REG_FRM_CNT_RST__A, 2),
0x00, 0x08, /* FE_CU_REG_FRM_CNT_RST__A */
0x00, 0x00, /* FE_CU_REG_FRM_CNT_STR__A */
END_OF_TABLE
};
/* with PGA */
/* WR16COND( DRXD_WITH_PGA, FE_AG_REG_AG_PGA_MODE__A , 0x0004), */
/* without PGA */
/* WR16COND( DRXD_WITHOUT_PGA, FE_AG_REG_AG_PGA_MODE__A , 0x0001), */
/* WR16(FE_AG_REG_AG_AGC_SIO__A, (extAttr -> FeAgRegAgAgcSio), 0x0000 );*/
/* WR16(FE_AG_REG_AG_PWD__A ,(extAttr -> FeAgRegAgPwd), 0x0000 );*/
u8 DRXD_InitFEA2_2[] = {
WR16(FE_AG_REG_CDR_RUR_CNT__A, 0x0010),
WR16(FE_AG_REG_FGM_WRI__A, 48),
/* Activate measurement, activate scale */
WR16(FE_FD_REG_MEAS_VAL__A, 0x0001),
WR16(FE_CU_REG_COMM_EXEC__A, 0x0001),
WR16(FE_CF_REG_COMM_EXEC__A, 0x0001),
WR16(FE_IF_REG_COMM_EXEC__A, 0x0001),
WR16(FE_FD_REG_COMM_EXEC__A, 0x0001),
WR16(FE_FS_REG_COMM_EXEC__A, 0x0001),
WR16(FE_AD_REG_COMM_EXEC__A, 0x0001),
WR16(FE_AG_REG_COMM_EXEC__A, 0x0001),
WR16(FE_AG_REG_AG_MODE_LOP__A, 0x895E),
END_OF_TABLE
};
u8 DRXD_InitFEB1_1[] = {
WR16(B_FE_AD_REG_PD__A, 0x0000),
WR16(B_FE_AD_REG_CLKNEG__A, 0x0000),
WR16(B_FE_AG_REG_BGC_FGC_WRI__A, 0x0000),
WR16(B_FE_AG_REG_BGC_CGC_WRI__A, 0x0000),
WR16(B_FE_AG_REG_AG_MODE_LOP__A, 0x000a),
WR16(B_FE_AG_REG_IND_PD1_WRI__A, 35),
WR16(B_FE_AG_REG_IND_WIN__A, 0),
WR16(B_FE_AG_REG_IND_THD_LOL__A, 8),
WR16(B_FE_AG_REG_IND_THD_HIL__A, 8),
WR16(B_FE_CF_REG_IMP_VAL__A, 1),
WR16(B_FE_AG_REG_EGC_FLA_RGN__A, 7),
END_OF_TABLE
};
/* with PGA */
/* WR16(B_FE_AG_REG_AG_PGA_MODE__A , 0x0000, 0x0000); */
/* without PGA */
/* WR16(B_FE_AG_REG_AG_PGA_MODE__A ,
B_FE_AG_REG_AG_PGA_MODE_PFN_PCN_AFY_REN, 0x0000);*/
/* WR16(B_FE_AG_REG_AG_AGC_SIO__A,(extAttr -> FeAgRegAgAgcSio), 0x0000 );*//*added HS 23-05-2005 */
/* WR16(B_FE_AG_REG_AG_PWD__A ,(extAttr -> FeAgRegAgPwd), 0x0000 );*/
u8 DRXD_InitFEB1_2[] = {
WR16(B_FE_COMM_EXEC__A, 0x0001),
/* RF-AGC setup */
WR16(B_FE_AG_REG_PDA_AUR_CNT__A, 0x0C),
WR16(B_FE_AG_REG_PDC_SET_LVL__A, 0x01),
WR16(B_FE_AG_REG_PDC_FLA_RGN__A, 0x02),
WR16(B_FE_AG_REG_PDC_FLA_STP__A, 0xFFFF),
WR16(B_FE_AG_REG_PDC_SLO_STP__A, 0xFFFF),
WR16(B_FE_AG_REG_PDC_MAX__A, 0x02),
WR16(B_FE_AG_REG_TGA_AUR_CNT__A, 0x0C),
WR16(B_FE_AG_REG_TGC_SET_LVL__A, 0x22),
WR16(B_FE_AG_REG_TGC_FLA_RGN__A, 0x15),
WR16(B_FE_AG_REG_TGC_FLA_STP__A, 0x01),
WR16(B_FE_AG_REG_TGC_SLO_STP__A, 0x0A),
WR16(B_FE_CU_REG_DIV_NFC_CLP__A, 0),
WR16(B_FE_CU_REG_CTR_NFC_OCR__A, 25000),
WR16(B_FE_CU_REG_CTR_NFC_ICR__A, 1),
END_OF_TABLE
};
u8 DRXD_InitCPA2[] = {
WRBLOCK(CP_REG_BR_SPL_OFFSET__A, 2),
0x07, 0x00, /* CP_REG_BR_SPL_OFFSET__A */
0x0A, 0x00, /* CP_REG_BR_STR_DEL__A */
WRBLOCK(CP_REG_RT_ANG_INC0__A, 4),
0x00, 0x00, /* CP_REG_RT_ANG_INC0__A */
0x00, 0x00, /* CP_REG_RT_ANG_INC1__A */
0x03, 0x00, /* CP_REG_RT_DETECT_ENA__A */
0x03, 0x00, /* CP_REG_RT_DETECT_TRH__A */
WRBLOCK(CP_REG_AC_NEXP_OFFS__A, 5),
0x32, 0x00, /* CP_REG_AC_NEXP_OFFS__A */
0x62, 0x00, /* CP_REG_AC_AVER_POW__A */
0x82, 0x00, /* CP_REG_AC_MAX_POW__A */
0x26, 0x00, /* CP_REG_AC_WEIGHT_MAN__A */
0x0F, 0x00, /* CP_REG_AC_WEIGHT_EXP__A */
WRBLOCK(CP_REG_AC_AMP_MODE__A, 2),
0x02, 0x00, /* CP_REG_AC_AMP_MODE__A */
0x01, 0x00, /* CP_REG_AC_AMP_FIX__A */
WR16(CP_REG_INTERVAL__A, 0x0005),
WR16(CP_REG_RT_EXP_MARG__A, 0x0004),
WR16(CP_REG_AC_ANG_MODE__A, 0x0003),
WR16(CP_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitCPB1[] = {
WR16(B_CP_REG_BR_SPL_OFFSET__A, 0x0008),
WR16(B_CP_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitCEA2[] = {
WRBLOCK(CE_REG_AVG_POW__A, 4),
0x62, 0x00, /* CE_REG_AVG_POW__A */
0x78, 0x00, /* CE_REG_MAX_POW__A */
0x62, 0x00, /* CE_REG_ATT__A */
0x17, 0x00, /* CE_REG_NRED__A */
WRBLOCK(CE_REG_NE_ERR_SELECT__A, 2),
0x07, 0x00, /* CE_REG_NE_ERR_SELECT__A */
0xEB, 0xFF, /* CE_REG_NE_TD_CAL__A */
WRBLOCK(CE_REG_NE_MIXAVG__A, 2),
0x06, 0x00, /* CE_REG_NE_MIXAVG__A */
0x00, 0x00, /* CE_REG_NE_NUPD_OFS__A */
WRBLOCK(CE_REG_PE_NEXP_OFFS__A, 2),
0x00, 0x00, /* CE_REG_PE_NEXP_OFFS__A */
0x00, 0x00, /* CE_REG_PE_TIMESHIFT__A */
WRBLOCK(CE_REG_TP_A0_TAP_NEW__A, 3),
0x00, 0x01, /* CE_REG_TP_A0_TAP_NEW__A */
0x01, 0x00, /* CE_REG_TP_A0_TAP_NEW_VALID__A */
0x0E, 0x00, /* CE_REG_TP_A0_MU_LMS_STEP__A */
WRBLOCK(CE_REG_TP_A1_TAP_NEW__A, 3),
0x00, 0x00, /* CE_REG_TP_A1_TAP_NEW__A */
0x01, 0x00, /* CE_REG_TP_A1_TAP_NEW_VALID__A */
0x0A, 0x00, /* CE_REG_TP_A1_MU_LMS_STEP__A */
WRBLOCK(CE_REG_FI_SHT_INCR__A, 2),
0x12, 0x00, /* CE_REG_FI_SHT_INCR__A */
0x0C, 0x00, /* CE_REG_FI_EXP_NORM__A */
WRBLOCK(CE_REG_IR_INPUTSEL__A, 3),
0x00, 0x00, /* CE_REG_IR_INPUTSEL__A */
0x00, 0x00, /* CE_REG_IR_STARTPOS__A */
0xFF, 0x00, /* CE_REG_IR_NEXP_THRES__A */
WR16(CE_REG_TI_NEXP_OFFS__A, 0x0000),
END_OF_TABLE
};
u8 DRXD_InitCEB1[] = {
WR16(B_CE_REG_TI_PHN_ENABLE__A, 0x0001),
WR16(B_CE_REG_FR_PM_SET__A, 0x000D),
END_OF_TABLE
};
u8 DRXD_InitEQA2[] = {
WRBLOCK(EQ_REG_OT_QNT_THRES0__A, 4),
0x1E, 0x00, /* EQ_REG_OT_QNT_THRES0__A */
0x1F, 0x00, /* EQ_REG_OT_QNT_THRES1__A */
0x06, 0x00, /* EQ_REG_OT_CSI_STEP__A */
0x02, 0x00, /* EQ_REG_OT_CSI_OFFSET__A */
WR16(EQ_REG_TD_REQ_SMB_CNT__A, 0x0200),
WR16(EQ_REG_IS_CLIP_EXP__A, 0x001F),
WR16(EQ_REG_SN_OFFSET__A, (u16) (-7)),
WR16(EQ_REG_RC_SEL_CAR__A, 0x0002),
WR16(EQ_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitEQB1[] = {
WR16(B_EQ_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_ResetECRAM[] = {
/* Reset packet sync bytes in EC_VD ram */
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (0 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (1 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (2 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (3 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (4 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (5 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (6 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (7 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (8 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (9 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (10 * 17), 0x0000),
/* Reset packet sync bytes in EC_RS ram */
WR16(EC_RS_EC_RAM__A, 0x0000),
WR16(EC_RS_EC_RAM__A + 204, 0x0000),
END_OF_TABLE
};
u8 DRXD_InitECA2[] = {
WRBLOCK(EC_SB_REG_CSI_HI__A, 6),
0x1F, 0x00, /* EC_SB_REG_CSI_HI__A */
0x1E, 0x00, /* EC_SB_REG_CSI_LO__A */
0x01, 0x00, /* EC_SB_REG_SMB_TGL__A */
0x7F, 0x00, /* EC_SB_REG_SNR_HI__A */
0x7F, 0x00, /* EC_SB_REG_SNR_MID__A */
0x7F, 0x00, /* EC_SB_REG_SNR_LO__A */
WRBLOCK(EC_RS_REG_REQ_PCK_CNT__A, 2),
0x00, 0x10, /* EC_RS_REG_REQ_PCK_CNT__A */
DATA16(EC_RS_REG_VAL_PCK), /* EC_RS_REG_VAL__A */
WRBLOCK(EC_OC_REG_TMD_TOP_MODE__A, 5),
0x03, 0x00, /* EC_OC_REG_TMD_TOP_MODE__A */
0xF4, 0x01, /* EC_OC_REG_TMD_TOP_CNT__A */
0xC0, 0x03, /* EC_OC_REG_TMD_HIL_MAR__A */
0x40, 0x00, /* EC_OC_REG_TMD_LOL_MAR__A */
0x03, 0x00, /* EC_OC_REG_TMD_CUR_CNT__A */
WRBLOCK(EC_OC_REG_AVR_ASH_CNT__A, 2),
0x06, 0x00, /* EC_OC_REG_AVR_ASH_CNT__A */
0x02, 0x00, /* EC_OC_REG_AVR_BSH_CNT__A */
WRBLOCK(EC_OC_REG_RCN_MODE__A, 7),
0x07, 0x00, /* EC_OC_REG_RCN_MODE__A */
0x00, 0x00, /* EC_OC_REG_RCN_CRA_LOP__A */
0xc0, 0x00, /* EC_OC_REG_RCN_CRA_HIP__A */
0x00, 0x10, /* EC_OC_REG_RCN_CST_LOP__A */
0x00, 0x00, /* EC_OC_REG_RCN_CST_HIP__A */
0xFF, 0x01, /* EC_OC_REG_RCN_SET_LVL__A */
0x0D, 0x00, /* EC_OC_REG_RCN_GAI_LVL__A */
WRBLOCK(EC_OC_REG_RCN_CLP_LOP__A, 2),
0x00, 0x00, /* EC_OC_REG_RCN_CLP_LOP__A */
0xC0, 0x00, /* EC_OC_REG_RCN_CLP_HIP__A */
WR16(EC_SB_REG_CSI_OFS__A, 0x0001),
WR16(EC_VD_REG_FORCE__A, 0x0002),
WR16(EC_VD_REG_REQ_SMB_CNT__A, 0x0001),
WR16(EC_VD_REG_RLK_ENA__A, 0x0001),
WR16(EC_OD_REG_SYNC__A, 0x0664),
WR16(EC_OC_REG_OC_MON_SIO__A, 0x0000),
WR16(EC_OC_REG_SNC_ISC_LVL__A, 0x0D0C),
/* Output zero on monitorbus pads, power saving */
WR16(EC_OC_REG_OCR_MON_UOS__A,
(EC_OC_REG_OCR_MON_UOS_DAT_0_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_1_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_2_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_3_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_4_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_5_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_6_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_7_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_8_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_9_ENABLE |
EC_OC_REG_OCR_MON_UOS_VAL_ENABLE |
EC_OC_REG_OCR_MON_UOS_CLK_ENABLE)),
WR16(EC_OC_REG_OCR_MON_WRI__A,
EC_OC_REG_OCR_MON_WRI_INIT),
/* CHK_ERROR(ResetECRAM(demod)); */
/* Reset packet sync bytes in EC_VD ram */
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (0 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (1 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (2 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (3 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (4 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (5 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (6 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (7 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (8 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (9 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (10 * 17), 0x0000),
/* Reset packet sync bytes in EC_RS ram */
WR16(EC_RS_EC_RAM__A, 0x0000),
WR16(EC_RS_EC_RAM__A + 204, 0x0000),
WR16(EC_SB_REG_COMM_EXEC__A, 0x0001),
WR16(EC_VD_REG_COMM_EXEC__A, 0x0001),
WR16(EC_OD_REG_COMM_EXEC__A, 0x0001),
WR16(EC_RS_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitECB1[] = {
WR16(B_EC_SB_REG_CSI_OFS0__A, 0x0001),
WR16(B_EC_SB_REG_CSI_OFS1__A, 0x0001),
WR16(B_EC_SB_REG_CSI_OFS2__A, 0x0001),
WR16(B_EC_SB_REG_CSI_LO__A, 0x000c),
WR16(B_EC_SB_REG_CSI_HI__A, 0x0018),
WR16(B_EC_SB_REG_SNR_HI__A, 0x007f),
WR16(B_EC_SB_REG_SNR_MID__A, 0x007f),
WR16(B_EC_SB_REG_SNR_LO__A, 0x007f),
WR16(B_EC_OC_REG_DTO_CLKMODE__A, 0x0002),
WR16(B_EC_OC_REG_DTO_PER__A, 0x0006),
WR16(B_EC_OC_REG_DTO_BUR__A, 0x0001),
WR16(B_EC_OC_REG_RCR_CLKMODE__A, 0x0000),
WR16(B_EC_OC_REG_RCN_GAI_LVL__A, 0x000D),
WR16(B_EC_OC_REG_OC_MPG_SIO__A, 0x0000),
/* Needed because shadow registers do not have correct default value */
WR16(B_EC_OC_REG_RCN_CST_LOP__A, 0x1000),
WR16(B_EC_OC_REG_RCN_CST_HIP__A, 0x0000),
WR16(B_EC_OC_REG_RCN_CRA_LOP__A, 0x0000),
WR16(B_EC_OC_REG_RCN_CRA_HIP__A, 0x00C0),
WR16(B_EC_OC_REG_RCN_CLP_LOP__A, 0x0000),
WR16(B_EC_OC_REG_RCN_CLP_HIP__A, 0x00C0),
WR16(B_EC_OC_REG_DTO_INC_LOP__A, 0x0000),
WR16(B_EC_OC_REG_DTO_INC_HIP__A, 0x00C0),
WR16(B_EC_OD_REG_SYNC__A, 0x0664),
WR16(B_EC_RS_REG_REQ_PCK_CNT__A, 0x1000),
/* CHK_ERROR(ResetECRAM(demod)); */
/* Reset packet sync bytes in EC_VD ram */
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (0 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (1 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (2 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (3 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (4 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (5 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (6 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (7 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (8 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (9 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (10 * 17), 0x0000),
/* Reset packet sync bytes in EC_RS ram */
WR16(EC_RS_EC_RAM__A, 0x0000),
WR16(EC_RS_EC_RAM__A + 204, 0x0000),
WR16(B_EC_SB_REG_COMM_EXEC__A, 0x0001),
WR16(B_EC_VD_REG_COMM_EXEC__A, 0x0001),
WR16(B_EC_OD_REG_COMM_EXEC__A, 0x0001),
WR16(B_EC_RS_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_ResetECA2[] = {
WR16(EC_OC_REG_COMM_EXEC__A, 0x0000),
WR16(EC_OD_REG_COMM_EXEC__A, 0x0000),
WRBLOCK(EC_OC_REG_TMD_TOP_MODE__A, 5),
0x03, 0x00, /* EC_OC_REG_TMD_TOP_MODE__A */
0xF4, 0x01, /* EC_OC_REG_TMD_TOP_CNT__A */
0xC0, 0x03, /* EC_OC_REG_TMD_HIL_MAR__A */
0x40, 0x00, /* EC_OC_REG_TMD_LOL_MAR__A */
0x03, 0x00, /* EC_OC_REG_TMD_CUR_CNT__A */
WRBLOCK(EC_OC_REG_AVR_ASH_CNT__A, 2),
0x06, 0x00, /* EC_OC_REG_AVR_ASH_CNT__A */
0x02, 0x00, /* EC_OC_REG_AVR_BSH_CNT__A */
WRBLOCK(EC_OC_REG_RCN_MODE__A, 7),
0x07, 0x00, /* EC_OC_REG_RCN_MODE__A */
0x00, 0x00, /* EC_OC_REG_RCN_CRA_LOP__A */
0xc0, 0x00, /* EC_OC_REG_RCN_CRA_HIP__A */
0x00, 0x10, /* EC_OC_REG_RCN_CST_LOP__A */
0x00, 0x00, /* EC_OC_REG_RCN_CST_HIP__A */
0xFF, 0x01, /* EC_OC_REG_RCN_SET_LVL__A */
0x0D, 0x00, /* EC_OC_REG_RCN_GAI_LVL__A */
WRBLOCK(EC_OC_REG_RCN_CLP_LOP__A, 2),
0x00, 0x00, /* EC_OC_REG_RCN_CLP_LOP__A */
0xC0, 0x00, /* EC_OC_REG_RCN_CLP_HIP__A */
WR16(EC_OD_REG_SYNC__A, 0x0664),
WR16(EC_OC_REG_OC_MON_SIO__A, 0x0000),
WR16(EC_OC_REG_SNC_ISC_LVL__A, 0x0D0C),
/* Output zero on monitorbus pads, power saving */
WR16(EC_OC_REG_OCR_MON_UOS__A,
(EC_OC_REG_OCR_MON_UOS_DAT_0_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_1_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_2_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_3_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_4_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_5_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_6_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_7_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_8_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_9_ENABLE |
EC_OC_REG_OCR_MON_UOS_VAL_ENABLE |
EC_OC_REG_OCR_MON_UOS_CLK_ENABLE)),
WR16(EC_OC_REG_OCR_MON_WRI__A,
EC_OC_REG_OCR_MON_WRI_INIT),
/* CHK_ERROR(ResetECRAM(demod)); */
/* Reset packet sync bytes in EC_VD ram */
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (0 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (1 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (2 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (3 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (4 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (5 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (6 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (7 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (8 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (9 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (10 * 17), 0x0000),
/* Reset packet sync bytes in EC_RS ram */
WR16(EC_RS_EC_RAM__A, 0x0000),
WR16(EC_RS_EC_RAM__A + 204, 0x0000),
WR16(EC_OD_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitSC[] = {
WR16(SC_COMM_EXEC__A, 0),
WR16(SC_COMM_STATE__A, 0),
#ifdef COMPILE_FOR_QT
WR16(SC_RA_RAM_BE_OPT_DELAY__A, 0x100),
#endif
/* SC is not started, this is done in SetChannels() */
END_OF_TABLE
};
/* Diversity settings */
u8 DRXD_InitDiversityFront[] = {
/* Start demod ********* RF in , diversity out **************************** */
WR16(B_SC_RA_RAM_CONFIG__A, B_SC_RA_RAM_CONFIG_FR_ENABLE__M |
B_SC_RA_RAM_CONFIG_FREQSCAN__M),
WR16(B_SC_RA_RAM_LC_ABS_2K__A, 0x7),
WR16(B_SC_RA_RAM_LC_ABS_8K__A, 0x7),
WR16(B_SC_RA_RAM_IR_COARSE_8K_LENGTH__A, IRLEN_COARSE_8K),
WR16(B_SC_RA_RAM_IR_COARSE_8K_FREQINC__A, 1 << (11 - IRLEN_COARSE_8K)),
WR16(B_SC_RA_RAM_IR_COARSE_8K_KAISINC__A, 1 << (17 - IRLEN_COARSE_8K)),
WR16(B_SC_RA_RAM_IR_FINE_8K_LENGTH__A, IRLEN_FINE_8K),
WR16(B_SC_RA_RAM_IR_FINE_8K_FREQINC__A, 1 << (11 - IRLEN_FINE_8K)),
WR16(B_SC_RA_RAM_IR_FINE_8K_KAISINC__A, 1 << (17 - IRLEN_FINE_8K)),
WR16(B_SC_RA_RAM_IR_COARSE_2K_LENGTH__A, IRLEN_COARSE_2K),
WR16(B_SC_RA_RAM_IR_COARSE_2K_FREQINC__A, 1 << (11 - IRLEN_COARSE_2K)),
WR16(B_SC_RA_RAM_IR_COARSE_2K_KAISINC__A, 1 << (17 - IRLEN_COARSE_2K)),
WR16(B_SC_RA_RAM_IR_FINE_2K_LENGTH__A, IRLEN_FINE_2K),
WR16(B_SC_RA_RAM_IR_FINE_2K_FREQINC__A, 1 << (11 - IRLEN_FINE_2K)),
WR16(B_SC_RA_RAM_IR_FINE_2K_KAISINC__A, 1 << (17 - IRLEN_FINE_2K)),
WR16(B_LC_RA_RAM_FILTER_CRMM_A__A, 7),
WR16(B_LC_RA_RAM_FILTER_CRMM_B__A, 4),
WR16(B_LC_RA_RAM_FILTER_SRMM_A__A, 7),
WR16(B_LC_RA_RAM_FILTER_SRMM_B__A, 4),
WR16(B_LC_RA_RAM_FILTER_SYM_SET__A, 500),
WR16(B_CC_REG_DIVERSITY__A, 0x0001),
WR16(B_EC_OC_REG_OC_MODE_HIP__A, 0x0010),
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_PASS_B_CE |
B_EQ_REG_RC_SEL_CAR_LOCAL_B_CE | B_EQ_REG_RC_SEL_CAR_MEAS_B_CE),
/* 0x2a ), *//* CE to PASS mux */
END_OF_TABLE
};
u8 DRXD_InitDiversityEnd[] = {
/* End demod *********** combining RF in and diversity in, MPEG TS out **** */
/* disable near/far; switch on timing slave mode */
WR16(B_SC_RA_RAM_CONFIG__A, B_SC_RA_RAM_CONFIG_FR_ENABLE__M |
B_SC_RA_RAM_CONFIG_FREQSCAN__M |
B_SC_RA_RAM_CONFIG_DIV_ECHO_ENABLE__M |
B_SC_RA_RAM_CONFIG_SLAVE__M |
B_SC_RA_RAM_CONFIG_DIV_BLANK_ENABLE__M
/* MV from CtrlDiversity */
),
#ifdef DRXDDIV_SRMM_SLAVING
WR16(SC_RA_RAM_LC_ABS_2K__A, 0x3c7),
WR16(SC_RA_RAM_LC_ABS_8K__A, 0x3c7),
#else
WR16(SC_RA_RAM_LC_ABS_2K__A, 0x7),
WR16(SC_RA_RAM_LC_ABS_8K__A, 0x7),
#endif
WR16(B_SC_RA_RAM_IR_COARSE_8K_LENGTH__A, IRLEN_COARSE_8K),
WR16(B_SC_RA_RAM_IR_COARSE_8K_FREQINC__A, 1 << (11 - IRLEN_COARSE_8K)),
WR16(B_SC_RA_RAM_IR_COARSE_8K_KAISINC__A, 1 << (17 - IRLEN_COARSE_8K)),
WR16(B_SC_RA_RAM_IR_FINE_8K_LENGTH__A, IRLEN_FINE_8K),
WR16(B_SC_RA_RAM_IR_FINE_8K_FREQINC__A, 1 << (11 - IRLEN_FINE_8K)),
WR16(B_SC_RA_RAM_IR_FINE_8K_KAISINC__A, 1 << (17 - IRLEN_FINE_8K)),
WR16(B_SC_RA_RAM_IR_COARSE_2K_LENGTH__A, IRLEN_COARSE_2K),
WR16(B_SC_RA_RAM_IR_COARSE_2K_FREQINC__A, 1 << (11 - IRLEN_COARSE_2K)),
WR16(B_SC_RA_RAM_IR_COARSE_2K_KAISINC__A, 1 << (17 - IRLEN_COARSE_2K)),
WR16(B_SC_RA_RAM_IR_FINE_2K_LENGTH__A, IRLEN_FINE_2K),
WR16(B_SC_RA_RAM_IR_FINE_2K_FREQINC__A, 1 << (11 - IRLEN_FINE_2K)),
WR16(B_SC_RA_RAM_IR_FINE_2K_KAISINC__A, 1 << (17 - IRLEN_FINE_2K)),
WR16(B_LC_RA_RAM_FILTER_CRMM_A__A, 7),
WR16(B_LC_RA_RAM_FILTER_CRMM_B__A, 4),
WR16(B_LC_RA_RAM_FILTER_SRMM_A__A, 7),
WR16(B_LC_RA_RAM_FILTER_SRMM_B__A, 4),
WR16(B_LC_RA_RAM_FILTER_SYM_SET__A, 500),
WR16(B_CC_REG_DIVERSITY__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_DisableDiversity[] = {
WR16(B_SC_RA_RAM_LC_ABS_2K__A, B_SC_RA_RAM_LC_ABS_2K__PRE),
WR16(B_SC_RA_RAM_LC_ABS_8K__A, B_SC_RA_RAM_LC_ABS_8K__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_8K_LENGTH__A,
B_SC_RA_RAM_IR_COARSE_8K_LENGTH__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_8K_FREQINC__A,
B_SC_RA_RAM_IR_COARSE_8K_FREQINC__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_8K_KAISINC__A,
B_SC_RA_RAM_IR_COARSE_8K_KAISINC__PRE),
WR16(B_SC_RA_RAM_IR_FINE_8K_LENGTH__A,
B_SC_RA_RAM_IR_FINE_8K_LENGTH__PRE),
WR16(B_SC_RA_RAM_IR_FINE_8K_FREQINC__A,
B_SC_RA_RAM_IR_FINE_8K_FREQINC__PRE),
WR16(B_SC_RA_RAM_IR_FINE_8K_KAISINC__A,
B_SC_RA_RAM_IR_FINE_8K_KAISINC__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_2K_LENGTH__A,
B_SC_RA_RAM_IR_COARSE_2K_LENGTH__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_2K_FREQINC__A,
B_SC_RA_RAM_IR_COARSE_2K_FREQINC__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_2K_KAISINC__A,
B_SC_RA_RAM_IR_COARSE_2K_KAISINC__PRE),
WR16(B_SC_RA_RAM_IR_FINE_2K_LENGTH__A,
B_SC_RA_RAM_IR_FINE_2K_LENGTH__PRE),
WR16(B_SC_RA_RAM_IR_FINE_2K_FREQINC__A,
B_SC_RA_RAM_IR_FINE_2K_FREQINC__PRE),
WR16(B_SC_RA_RAM_IR_FINE_2K_KAISINC__A,
B_SC_RA_RAM_IR_FINE_2K_KAISINC__PRE),
WR16(B_LC_RA_RAM_FILTER_CRMM_A__A, B_LC_RA_RAM_FILTER_CRMM_A__PRE),
WR16(B_LC_RA_RAM_FILTER_CRMM_B__A, B_LC_RA_RAM_FILTER_CRMM_B__PRE),
WR16(B_LC_RA_RAM_FILTER_SRMM_A__A, B_LC_RA_RAM_FILTER_SRMM_A__PRE),
WR16(B_LC_RA_RAM_FILTER_SRMM_B__A, B_LC_RA_RAM_FILTER_SRMM_B__PRE),
WR16(B_LC_RA_RAM_FILTER_SYM_SET__A, B_LC_RA_RAM_FILTER_SYM_SET__PRE),
WR16(B_CC_REG_DIVERSITY__A, 0x0000),
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_INIT), /* combining disabled */
END_OF_TABLE
};
u8 DRXD_StartDiversityFront[] = {
/* Start demod, RF in and diversity out, no combining */
WR16(B_FE_CF_REG_IMP_VAL__A, 0x0),
WR16(B_FE_AD_REG_FDB_IN__A, 0x0),
WR16(B_FE_AD_REG_INVEXT__A, 0x0),
WR16(B_EQ_REG_COMM_MB__A, 0x12), /* EQ to MB out */
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_PASS_B_CE | /* CE to PASS mux */
B_EQ_REG_RC_SEL_CAR_LOCAL_B_CE | B_EQ_REG_RC_SEL_CAR_MEAS_B_CE),
WR16(SC_RA_RAM_ECHO_SHIFT_LIM__A, 2),
END_OF_TABLE
};
u8 DRXD_StartDiversityEnd[] = {
/* End demod, combining RF in and diversity in, MPEG TS out */
WR16(B_FE_CF_REG_IMP_VAL__A, 0x0), /* disable impulse noise cruncher */
WR16(B_FE_AD_REG_INVEXT__A, 0x0), /* clock inversion (for sohard board) */
WR16(B_CP_REG_BR_STR_DEL__A, 10), /* apperently no mb delay matching is best */
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_DIV_ON | /* org = 0x81 combining enabled */
B_EQ_REG_RC_SEL_CAR_MEAS_A_CC |
B_EQ_REG_RC_SEL_CAR_PASS_A_CC | B_EQ_REG_RC_SEL_CAR_LOCAL_A_CC),
END_OF_TABLE
};
u8 DRXD_DiversityDelay8MHZ[] = {
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_32__A, 1150 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_16__A, 1100 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_8__A, 1000 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_4__A, 800 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_32__A, 5420 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_16__A, 5200 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_8__A, 4800 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_4__A, 4000 - 50),
END_OF_TABLE
};
u8 DRXD_DiversityDelay6MHZ[] = /* also used ok for 7 MHz */
{
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_32__A, 1100 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_16__A, 1000 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_8__A, 900 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_4__A, 600 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_32__A, 5300 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_16__A, 5000 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_8__A, 4500 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_4__A, 3500 - 50),
END_OF_TABLE
};
| gpl-2.0 |
marcofreda527/caf_kernel_msm_ics_strawberry | drivers/media/dvb/frontends/drxd_firm.c | 12605 | 36425 | /*
* drxd_firm.c : DRXD firmware tables
*
* Copyright (C) 2006-2007 Micronas
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 only, as published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
/* TODO: generate this file with a script from a settings file */
/* Contains A2 firmware version: 1.4.2
* Contains B1 firmware version: 3.3.33
* Contains settings from driver 1.4.23
*/
#include "drxd_firm.h"
#define ADDRESS(x) ((x) & 0xFF), (((x)>>8) & 0xFF), (((x)>>16) & 0xFF), (((x)>>24) & 0xFF)
#define LENGTH(x) ((x) & 0xFF), (((x)>>8) & 0xFF)
/* Is written via block write, must be little endian */
#define DATA16(x) ((x) & 0xFF), (((x)>>8) & 0xFF)
#define WRBLOCK(a, l) ADDRESS(a), LENGTH(l)
#define WR16(a, d) ADDRESS(a), LENGTH(1), DATA16(d)
#define END_OF_TABLE 0xFF, 0xFF, 0xFF, 0xFF
/* HI firmware patches */
#define HI_TR_FUNC_ADDR HI_IF_RAM_USR_BEGIN__A
#define HI_TR_FUNC_SIZE 9 /* size of this function in instruction words */
u8 DRXD_InitAtomicRead[] = {
WRBLOCK(HI_TR_FUNC_ADDR, HI_TR_FUNC_SIZE),
0x26, 0x00, /* 0 -> ring.rdy; */
0x60, 0x04, /* r0rami.dt -> ring.xba; */
0x61, 0x04, /* r0rami.dt -> ring.xad; */
0xE3, 0x07, /* HI_RA_RAM_USR_BEGIN -> ring.iad; */
0x40, 0x00, /* (long immediate) */
0x64, 0x04, /* r0rami.dt -> ring.len; */
0x65, 0x04, /* r0rami.dt -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x38, 0x00, /* 0 -> jumps.ad; */
END_OF_TABLE
};
/* Pins D0 and D1 of the parallel MPEG output can be used
to set the I2C address of a device. */
#define HI_RST_FUNC_ADDR (HI_IF_RAM_USR_BEGIN__A + HI_TR_FUNC_SIZE)
#define HI_RST_FUNC_SIZE 54 /* size of this function in instruction words */
/* D0 Version */
u8 DRXD_HiI2cPatch_1[] = {
WRBLOCK(HI_RST_FUNC_ADDR, HI_RST_FUNC_SIZE),
0xC8, 0x07, 0x01, 0x00, /* MASK -> reg0.dt; */
0xE0, 0x07, 0x15, 0x02, /* (EC__BLK << 6) + EC_OC_REG__BNK -> ring.xba; */
0xE1, 0x07, 0x12, 0x00, /* EC_OC_REG_OC_MPG_SIO__A -> ring.xad; */
0xA2, 0x00, /* M_BNK_ID_DAT -> ring.iba; */
0x23, 0x00, /* &data -> ring.iad; */
0x24, 0x00, /* 0 -> ring.len; */
0xA5, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_READ -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x42, 0x00, /* &data+1 -> w0ram.ad; */
0xC0, 0x07, 0xFF, 0x0F, /* -1 -> w0ram.dt; */
0x63, 0x00, /* &data+1 -> ring.iad; */
0x65, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_WRITE -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0xE1, 0x07, 0x38, 0x00, /* EC_OC_REG_OCR_MPG_USR_DAT__A -> ring.xad; */
0xA5, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_READ -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0xE1, 0x07, 0x12, 0x00, /* EC_OC_REG_OC_MPG_SIO__A -> ring.xad; */
0x23, 0x00, /* &data -> ring.iad; */
0x65, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_WRITE -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x42, 0x00, /* &data+1 -> w0ram.ad; */
0x0F, 0x04, /* r0ram.dt -> and.op; */
0x1C, 0x06, /* reg0.dt -> and.tr; */
0xCF, 0x04, /* and.rs -> add.op; */
0xD0, 0x07, 0x70, 0x00, /* DEF_DEV_ID -> add.tr; */
0xD0, 0x04, /* add.rs -> add.tr; */
0xC8, 0x04, /* add.rs -> reg0.dt; */
0x60, 0x00, /* reg0.dt -> w0ram.dt; */
0xC2, 0x07, 0x10, 0x00, /* SLV0_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x06, /* reg0.dt -> w0rami.dt; */
0xC2, 0x07, 0x20, 0x00, /* SLV1_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x06, /* reg0.dt -> w0rami.dt; */
0xC2, 0x07, 0x30, 0x00, /* CMD_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x68, 0x00, /* M_IC_SEL_PT1 -> i2c.sel; */
0x29, 0x00, /* M_IC_CMD_RESET -> i2c.cmd; */
0x28, 0x00, /* M_IC_SEL_PT0 -> i2c.sel; */
0x29, 0x00, /* M_IC_CMD_RESET -> i2c.cmd; */
0xF8, 0x07, 0x2F, 0x00, /* 0x2F -> jumps.ad; */
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 0) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 1) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 2) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 3) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
/* Force quick and dirty reset */
WR16(B_HI_CT_REG_COMM_STATE__A, 0),
END_OF_TABLE
};
/* D0,D1 Version */
u8 DRXD_HiI2cPatch_3[] = {
WRBLOCK(HI_RST_FUNC_ADDR, HI_RST_FUNC_SIZE),
0xC8, 0x07, 0x03, 0x00, /* MASK -> reg0.dt; */
0xE0, 0x07, 0x15, 0x02, /* (EC__BLK << 6) + EC_OC_REG__BNK -> ring.xba; */
0xE1, 0x07, 0x12, 0x00, /* EC_OC_REG_OC_MPG_SIO__A -> ring.xad; */
0xA2, 0x00, /* M_BNK_ID_DAT -> ring.iba; */
0x23, 0x00, /* &data -> ring.iad; */
0x24, 0x00, /* 0 -> ring.len; */
0xA5, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_READ -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x42, 0x00, /* &data+1 -> w0ram.ad; */
0xC0, 0x07, 0xFF, 0x0F, /* -1 -> w0ram.dt; */
0x63, 0x00, /* &data+1 -> ring.iad; */
0x65, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_WRITE -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0xE1, 0x07, 0x38, 0x00, /* EC_OC_REG_OCR_MPG_USR_DAT__A -> ring.xad; */
0xA5, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_READ -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0xE1, 0x07, 0x12, 0x00, /* EC_OC_REG_OC_MPG_SIO__A -> ring.xad; */
0x23, 0x00, /* &data -> ring.iad; */
0x65, 0x02, /* M_RC_CTR_SWAP | M_RC_CTR_WRITE -> ring.ctl; */
0x26, 0x00, /* 0 -> ring.rdy; */
0x42, 0x00, /* &data+1 -> w0ram.ad; */
0x0F, 0x04, /* r0ram.dt -> and.op; */
0x1C, 0x06, /* reg0.dt -> and.tr; */
0xCF, 0x04, /* and.rs -> add.op; */
0xD0, 0x07, 0x70, 0x00, /* DEF_DEV_ID -> add.tr; */
0xD0, 0x04, /* add.rs -> add.tr; */
0xC8, 0x04, /* add.rs -> reg0.dt; */
0x60, 0x00, /* reg0.dt -> w0ram.dt; */
0xC2, 0x07, 0x10, 0x00, /* SLV0_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x06, /* reg0.dt -> w0rami.dt; */
0xC2, 0x07, 0x20, 0x00, /* SLV1_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x06, /* reg0.dt -> w0rami.dt; */
0xC2, 0x07, 0x30, 0x00, /* CMD_BASE -> w0rami.ad; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x01, 0x00, /* 0 -> w0rami.dt; */
0x68, 0x00, /* M_IC_SEL_PT1 -> i2c.sel; */
0x29, 0x00, /* M_IC_CMD_RESET -> i2c.cmd; */
0x28, 0x00, /* M_IC_SEL_PT0 -> i2c.sel; */
0x29, 0x00, /* M_IC_CMD_RESET -> i2c.cmd; */
0xF8, 0x07, 0x2F, 0x00, /* 0x2F -> jumps.ad; */
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 0) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 1) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 2) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
WR16((B_HI_IF_RAM_TRP_BPT0__AX + ((2 * 3) + 1)),
(u16) (HI_RST_FUNC_ADDR & 0x3FF)),
/* Force quick and dirty reset */
WR16(B_HI_CT_REG_COMM_STATE__A, 0),
END_OF_TABLE
};
u8 DRXD_ResetCEFR[] = {
WRBLOCK(CE_REG_FR_TREAL00__A, 57),
0x52, 0x00, /* CE_REG_FR_TREAL00__A */
0x00, 0x00, /* CE_REG_FR_TIMAG00__A */
0x52, 0x00, /* CE_REG_FR_TREAL01__A */
0x00, 0x00, /* CE_REG_FR_TIMAG01__A */
0x52, 0x00, /* CE_REG_FR_TREAL02__A */
0x00, 0x00, /* CE_REG_FR_TIMAG02__A */
0x52, 0x00, /* CE_REG_FR_TREAL03__A */
0x00, 0x00, /* CE_REG_FR_TIMAG03__A */
0x52, 0x00, /* CE_REG_FR_TREAL04__A */
0x00, 0x00, /* CE_REG_FR_TIMAG04__A */
0x52, 0x00, /* CE_REG_FR_TREAL05__A */
0x00, 0x00, /* CE_REG_FR_TIMAG05__A */
0x52, 0x00, /* CE_REG_FR_TREAL06__A */
0x00, 0x00, /* CE_REG_FR_TIMAG06__A */
0x52, 0x00, /* CE_REG_FR_TREAL07__A */
0x00, 0x00, /* CE_REG_FR_TIMAG07__A */
0x52, 0x00, /* CE_REG_FR_TREAL08__A */
0x00, 0x00, /* CE_REG_FR_TIMAG08__A */
0x52, 0x00, /* CE_REG_FR_TREAL09__A */
0x00, 0x00, /* CE_REG_FR_TIMAG09__A */
0x52, 0x00, /* CE_REG_FR_TREAL10__A */
0x00, 0x00, /* CE_REG_FR_TIMAG10__A */
0x52, 0x00, /* CE_REG_FR_TREAL11__A */
0x00, 0x00, /* CE_REG_FR_TIMAG11__A */
0x52, 0x00, /* CE_REG_FR_MID_TAP__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G00__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G01__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G02__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G03__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G04__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G05__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G06__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G07__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G08__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G09__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G10__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G11__A */
0x0B, 0x00, /* CE_REG_FR_SQS_G12__A */
0xFF, 0x01, /* CE_REG_FR_RIO_G00__A */
0x90, 0x01, /* CE_REG_FR_RIO_G01__A */
0x0B, 0x01, /* CE_REG_FR_RIO_G02__A */
0xC8, 0x00, /* CE_REG_FR_RIO_G03__A */
0xA0, 0x00, /* CE_REG_FR_RIO_G04__A */
0x85, 0x00, /* CE_REG_FR_RIO_G05__A */
0x72, 0x00, /* CE_REG_FR_RIO_G06__A */
0x64, 0x00, /* CE_REG_FR_RIO_G07__A */
0x59, 0x00, /* CE_REG_FR_RIO_G08__A */
0x50, 0x00, /* CE_REG_FR_RIO_G09__A */
0x49, 0x00, /* CE_REG_FR_RIO_G10__A */
0x10, 0x00, /* CE_REG_FR_MODE__A */
0x78, 0x00, /* CE_REG_FR_SQS_TRH__A */
0x00, 0x00, /* CE_REG_FR_RIO_GAIN__A */
0x00, 0x02, /* CE_REG_FR_BYPASS__A */
0x0D, 0x00, /* CE_REG_FR_PM_SET__A */
0x07, 0x00, /* CE_REG_FR_ERR_SH__A */
0x04, 0x00, /* CE_REG_FR_MAN_SH__A */
0x06, 0x00, /* CE_REG_FR_TAP_SH__A */
END_OF_TABLE
};
u8 DRXD_InitFEA2_1[] = {
WRBLOCK(FE_AD_REG_PD__A, 3),
0x00, 0x00, /* FE_AD_REG_PD__A */
0x01, 0x00, /* FE_AD_REG_INVEXT__A */
0x00, 0x00, /* FE_AD_REG_CLKNEG__A */
WRBLOCK(FE_AG_REG_DCE_AUR_CNT__A, 2),
0x10, 0x00, /* FE_AG_REG_DCE_AUR_CNT__A */
0x10, 0x00, /* FE_AG_REG_DCE_RUR_CNT__A */
WRBLOCK(FE_AG_REG_ACE_AUR_CNT__A, 2),
0x0E, 0x00, /* FE_AG_REG_ACE_AUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_ACE_RUR_CNT__A */
WRBLOCK(FE_AG_REG_EGC_FLA_RGN__A, 5),
0x04, 0x00, /* FE_AG_REG_EGC_FLA_RGN__A */
0x1F, 0x00, /* FE_AG_REG_EGC_SLO_RGN__A */
0x00, 0x00, /* FE_AG_REG_EGC_JMP_PSN__A */
0x00, 0x00, /* FE_AG_REG_EGC_FLA_INC__A */
0x00, 0x00, /* FE_AG_REG_EGC_FLA_DEC__A */
WRBLOCK(FE_AG_REG_GC1_AGC_MAX__A, 2),
0xFF, 0x01, /* FE_AG_REG_GC1_AGC_MAX__A */
0x00, 0xFE, /* FE_AG_REG_GC1_AGC_MIN__A */
WRBLOCK(FE_AG_REG_IND_WIN__A, 29),
0x00, 0x00, /* FE_AG_REG_IND_WIN__A */
0x05, 0x00, /* FE_AG_REG_IND_THD_LOL__A */
0x0F, 0x00, /* FE_AG_REG_IND_THD_HIL__A */
0x00, 0x00, /* FE_AG_REG_IND_DEL__A don't care */
0x1E, 0x00, /* FE_AG_REG_IND_PD1_WRI__A */
0x0C, 0x00, /* FE_AG_REG_PDA_AUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_PDA_RUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_PDA_AVE_DAT__A don't care */
0x00, 0x00, /* FE_AG_REG_PDC_RUR_CNT__A */
0x01, 0x00, /* FE_AG_REG_PDC_SET_LVL__A */
0x02, 0x00, /* FE_AG_REG_PDC_FLA_RGN__A */
0x00, 0x00, /* FE_AG_REG_PDC_JMP_PSN__A don't care */
0xFF, 0xFF, /* FE_AG_REG_PDC_FLA_STP__A */
0xFF, 0xFF, /* FE_AG_REG_PDC_SLO_STP__A */
0x00, 0x1F, /* FE_AG_REG_PDC_PD2_WRI__A don't care */
0x00, 0x00, /* FE_AG_REG_PDC_MAP_DAT__A don't care */
0x02, 0x00, /* FE_AG_REG_PDC_MAX__A */
0x0C, 0x00, /* FE_AG_REG_TGA_AUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_TGA_RUR_CNT__A */
0x00, 0x00, /* FE_AG_REG_TGA_AVE_DAT__A don't care */
0x00, 0x00, /* FE_AG_REG_TGC_RUR_CNT__A */
0x22, 0x00, /* FE_AG_REG_TGC_SET_LVL__A */
0x15, 0x00, /* FE_AG_REG_TGC_FLA_RGN__A */
0x00, 0x00, /* FE_AG_REG_TGC_JMP_PSN__A don't care */
0x01, 0x00, /* FE_AG_REG_TGC_FLA_STP__A */
0x0A, 0x00, /* FE_AG_REG_TGC_SLO_STP__A */
0x00, 0x00, /* FE_AG_REG_TGC_MAP_DAT__A don't care */
0x10, 0x00, /* FE_AG_REG_FGA_AUR_CNT__A */
0x10, 0x00, /* FE_AG_REG_FGA_RUR_CNT__A */
WRBLOCK(FE_AG_REG_BGC_FGC_WRI__A, 2),
0x00, 0x00, /* FE_AG_REG_BGC_FGC_WRI__A */
0x00, 0x00, /* FE_AG_REG_BGC_CGC_WRI__A */
WRBLOCK(FE_FD_REG_SCL__A, 3),
0x05, 0x00, /* FE_FD_REG_SCL__A */
0x03, 0x00, /* FE_FD_REG_MAX_LEV__A */
0x05, 0x00, /* FE_FD_REG_NR__A */
WRBLOCK(FE_CF_REG_SCL__A, 5),
0x16, 0x00, /* FE_CF_REG_SCL__A */
0x04, 0x00, /* FE_CF_REG_MAX_LEV__A */
0x06, 0x00, /* FE_CF_REG_NR__A */
0x00, 0x00, /* FE_CF_REG_IMP_VAL__A */
0x01, 0x00, /* FE_CF_REG_MEAS_VAL__A */
WRBLOCK(FE_CU_REG_FRM_CNT_RST__A, 2),
0x00, 0x08, /* FE_CU_REG_FRM_CNT_RST__A */
0x00, 0x00, /* FE_CU_REG_FRM_CNT_STR__A */
END_OF_TABLE
};
/* with PGA */
/* WR16COND( DRXD_WITH_PGA, FE_AG_REG_AG_PGA_MODE__A , 0x0004), */
/* without PGA */
/* WR16COND( DRXD_WITHOUT_PGA, FE_AG_REG_AG_PGA_MODE__A , 0x0001), */
/* WR16(FE_AG_REG_AG_AGC_SIO__A, (extAttr -> FeAgRegAgAgcSio), 0x0000 );*/
/* WR16(FE_AG_REG_AG_PWD__A ,(extAttr -> FeAgRegAgPwd), 0x0000 );*/
u8 DRXD_InitFEA2_2[] = {
WR16(FE_AG_REG_CDR_RUR_CNT__A, 0x0010),
WR16(FE_AG_REG_FGM_WRI__A, 48),
/* Activate measurement, activate scale */
WR16(FE_FD_REG_MEAS_VAL__A, 0x0001),
WR16(FE_CU_REG_COMM_EXEC__A, 0x0001),
WR16(FE_CF_REG_COMM_EXEC__A, 0x0001),
WR16(FE_IF_REG_COMM_EXEC__A, 0x0001),
WR16(FE_FD_REG_COMM_EXEC__A, 0x0001),
WR16(FE_FS_REG_COMM_EXEC__A, 0x0001),
WR16(FE_AD_REG_COMM_EXEC__A, 0x0001),
WR16(FE_AG_REG_COMM_EXEC__A, 0x0001),
WR16(FE_AG_REG_AG_MODE_LOP__A, 0x895E),
END_OF_TABLE
};
u8 DRXD_InitFEB1_1[] = {
WR16(B_FE_AD_REG_PD__A, 0x0000),
WR16(B_FE_AD_REG_CLKNEG__A, 0x0000),
WR16(B_FE_AG_REG_BGC_FGC_WRI__A, 0x0000),
WR16(B_FE_AG_REG_BGC_CGC_WRI__A, 0x0000),
WR16(B_FE_AG_REG_AG_MODE_LOP__A, 0x000a),
WR16(B_FE_AG_REG_IND_PD1_WRI__A, 35),
WR16(B_FE_AG_REG_IND_WIN__A, 0),
WR16(B_FE_AG_REG_IND_THD_LOL__A, 8),
WR16(B_FE_AG_REG_IND_THD_HIL__A, 8),
WR16(B_FE_CF_REG_IMP_VAL__A, 1),
WR16(B_FE_AG_REG_EGC_FLA_RGN__A, 7),
END_OF_TABLE
};
/* with PGA */
/* WR16(B_FE_AG_REG_AG_PGA_MODE__A , 0x0000, 0x0000); */
/* without PGA */
/* WR16(B_FE_AG_REG_AG_PGA_MODE__A ,
B_FE_AG_REG_AG_PGA_MODE_PFN_PCN_AFY_REN, 0x0000);*/
/* WR16(B_FE_AG_REG_AG_AGC_SIO__A,(extAttr -> FeAgRegAgAgcSio), 0x0000 );*//*added HS 23-05-2005 */
/* WR16(B_FE_AG_REG_AG_PWD__A ,(extAttr -> FeAgRegAgPwd), 0x0000 );*/
u8 DRXD_InitFEB1_2[] = {
WR16(B_FE_COMM_EXEC__A, 0x0001),
/* RF-AGC setup */
WR16(B_FE_AG_REG_PDA_AUR_CNT__A, 0x0C),
WR16(B_FE_AG_REG_PDC_SET_LVL__A, 0x01),
WR16(B_FE_AG_REG_PDC_FLA_RGN__A, 0x02),
WR16(B_FE_AG_REG_PDC_FLA_STP__A, 0xFFFF),
WR16(B_FE_AG_REG_PDC_SLO_STP__A, 0xFFFF),
WR16(B_FE_AG_REG_PDC_MAX__A, 0x02),
WR16(B_FE_AG_REG_TGA_AUR_CNT__A, 0x0C),
WR16(B_FE_AG_REG_TGC_SET_LVL__A, 0x22),
WR16(B_FE_AG_REG_TGC_FLA_RGN__A, 0x15),
WR16(B_FE_AG_REG_TGC_FLA_STP__A, 0x01),
WR16(B_FE_AG_REG_TGC_SLO_STP__A, 0x0A),
WR16(B_FE_CU_REG_DIV_NFC_CLP__A, 0),
WR16(B_FE_CU_REG_CTR_NFC_OCR__A, 25000),
WR16(B_FE_CU_REG_CTR_NFC_ICR__A, 1),
END_OF_TABLE
};
u8 DRXD_InitCPA2[] = {
WRBLOCK(CP_REG_BR_SPL_OFFSET__A, 2),
0x07, 0x00, /* CP_REG_BR_SPL_OFFSET__A */
0x0A, 0x00, /* CP_REG_BR_STR_DEL__A */
WRBLOCK(CP_REG_RT_ANG_INC0__A, 4),
0x00, 0x00, /* CP_REG_RT_ANG_INC0__A */
0x00, 0x00, /* CP_REG_RT_ANG_INC1__A */
0x03, 0x00, /* CP_REG_RT_DETECT_ENA__A */
0x03, 0x00, /* CP_REG_RT_DETECT_TRH__A */
WRBLOCK(CP_REG_AC_NEXP_OFFS__A, 5),
0x32, 0x00, /* CP_REG_AC_NEXP_OFFS__A */
0x62, 0x00, /* CP_REG_AC_AVER_POW__A */
0x82, 0x00, /* CP_REG_AC_MAX_POW__A */
0x26, 0x00, /* CP_REG_AC_WEIGHT_MAN__A */
0x0F, 0x00, /* CP_REG_AC_WEIGHT_EXP__A */
WRBLOCK(CP_REG_AC_AMP_MODE__A, 2),
0x02, 0x00, /* CP_REG_AC_AMP_MODE__A */
0x01, 0x00, /* CP_REG_AC_AMP_FIX__A */
WR16(CP_REG_INTERVAL__A, 0x0005),
WR16(CP_REG_RT_EXP_MARG__A, 0x0004),
WR16(CP_REG_AC_ANG_MODE__A, 0x0003),
WR16(CP_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitCPB1[] = {
WR16(B_CP_REG_BR_SPL_OFFSET__A, 0x0008),
WR16(B_CP_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitCEA2[] = {
WRBLOCK(CE_REG_AVG_POW__A, 4),
0x62, 0x00, /* CE_REG_AVG_POW__A */
0x78, 0x00, /* CE_REG_MAX_POW__A */
0x62, 0x00, /* CE_REG_ATT__A */
0x17, 0x00, /* CE_REG_NRED__A */
WRBLOCK(CE_REG_NE_ERR_SELECT__A, 2),
0x07, 0x00, /* CE_REG_NE_ERR_SELECT__A */
0xEB, 0xFF, /* CE_REG_NE_TD_CAL__A */
WRBLOCK(CE_REG_NE_MIXAVG__A, 2),
0x06, 0x00, /* CE_REG_NE_MIXAVG__A */
0x00, 0x00, /* CE_REG_NE_NUPD_OFS__A */
WRBLOCK(CE_REG_PE_NEXP_OFFS__A, 2),
0x00, 0x00, /* CE_REG_PE_NEXP_OFFS__A */
0x00, 0x00, /* CE_REG_PE_TIMESHIFT__A */
WRBLOCK(CE_REG_TP_A0_TAP_NEW__A, 3),
0x00, 0x01, /* CE_REG_TP_A0_TAP_NEW__A */
0x01, 0x00, /* CE_REG_TP_A0_TAP_NEW_VALID__A */
0x0E, 0x00, /* CE_REG_TP_A0_MU_LMS_STEP__A */
WRBLOCK(CE_REG_TP_A1_TAP_NEW__A, 3),
0x00, 0x00, /* CE_REG_TP_A1_TAP_NEW__A */
0x01, 0x00, /* CE_REG_TP_A1_TAP_NEW_VALID__A */
0x0A, 0x00, /* CE_REG_TP_A1_MU_LMS_STEP__A */
WRBLOCK(CE_REG_FI_SHT_INCR__A, 2),
0x12, 0x00, /* CE_REG_FI_SHT_INCR__A */
0x0C, 0x00, /* CE_REG_FI_EXP_NORM__A */
WRBLOCK(CE_REG_IR_INPUTSEL__A, 3),
0x00, 0x00, /* CE_REG_IR_INPUTSEL__A */
0x00, 0x00, /* CE_REG_IR_STARTPOS__A */
0xFF, 0x00, /* CE_REG_IR_NEXP_THRES__A */
WR16(CE_REG_TI_NEXP_OFFS__A, 0x0000),
END_OF_TABLE
};
u8 DRXD_InitCEB1[] = {
WR16(B_CE_REG_TI_PHN_ENABLE__A, 0x0001),
WR16(B_CE_REG_FR_PM_SET__A, 0x000D),
END_OF_TABLE
};
u8 DRXD_InitEQA2[] = {
WRBLOCK(EQ_REG_OT_QNT_THRES0__A, 4),
0x1E, 0x00, /* EQ_REG_OT_QNT_THRES0__A */
0x1F, 0x00, /* EQ_REG_OT_QNT_THRES1__A */
0x06, 0x00, /* EQ_REG_OT_CSI_STEP__A */
0x02, 0x00, /* EQ_REG_OT_CSI_OFFSET__A */
WR16(EQ_REG_TD_REQ_SMB_CNT__A, 0x0200),
WR16(EQ_REG_IS_CLIP_EXP__A, 0x001F),
WR16(EQ_REG_SN_OFFSET__A, (u16) (-7)),
WR16(EQ_REG_RC_SEL_CAR__A, 0x0002),
WR16(EQ_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitEQB1[] = {
WR16(B_EQ_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_ResetECRAM[] = {
/* Reset packet sync bytes in EC_VD ram */
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (0 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (1 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (2 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (3 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (4 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (5 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (6 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (7 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (8 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (9 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (10 * 17), 0x0000),
/* Reset packet sync bytes in EC_RS ram */
WR16(EC_RS_EC_RAM__A, 0x0000),
WR16(EC_RS_EC_RAM__A + 204, 0x0000),
END_OF_TABLE
};
u8 DRXD_InitECA2[] = {
WRBLOCK(EC_SB_REG_CSI_HI__A, 6),
0x1F, 0x00, /* EC_SB_REG_CSI_HI__A */
0x1E, 0x00, /* EC_SB_REG_CSI_LO__A */
0x01, 0x00, /* EC_SB_REG_SMB_TGL__A */
0x7F, 0x00, /* EC_SB_REG_SNR_HI__A */
0x7F, 0x00, /* EC_SB_REG_SNR_MID__A */
0x7F, 0x00, /* EC_SB_REG_SNR_LO__A */
WRBLOCK(EC_RS_REG_REQ_PCK_CNT__A, 2),
0x00, 0x10, /* EC_RS_REG_REQ_PCK_CNT__A */
DATA16(EC_RS_REG_VAL_PCK), /* EC_RS_REG_VAL__A */
WRBLOCK(EC_OC_REG_TMD_TOP_MODE__A, 5),
0x03, 0x00, /* EC_OC_REG_TMD_TOP_MODE__A */
0xF4, 0x01, /* EC_OC_REG_TMD_TOP_CNT__A */
0xC0, 0x03, /* EC_OC_REG_TMD_HIL_MAR__A */
0x40, 0x00, /* EC_OC_REG_TMD_LOL_MAR__A */
0x03, 0x00, /* EC_OC_REG_TMD_CUR_CNT__A */
WRBLOCK(EC_OC_REG_AVR_ASH_CNT__A, 2),
0x06, 0x00, /* EC_OC_REG_AVR_ASH_CNT__A */
0x02, 0x00, /* EC_OC_REG_AVR_BSH_CNT__A */
WRBLOCK(EC_OC_REG_RCN_MODE__A, 7),
0x07, 0x00, /* EC_OC_REG_RCN_MODE__A */
0x00, 0x00, /* EC_OC_REG_RCN_CRA_LOP__A */
0xc0, 0x00, /* EC_OC_REG_RCN_CRA_HIP__A */
0x00, 0x10, /* EC_OC_REG_RCN_CST_LOP__A */
0x00, 0x00, /* EC_OC_REG_RCN_CST_HIP__A */
0xFF, 0x01, /* EC_OC_REG_RCN_SET_LVL__A */
0x0D, 0x00, /* EC_OC_REG_RCN_GAI_LVL__A */
WRBLOCK(EC_OC_REG_RCN_CLP_LOP__A, 2),
0x00, 0x00, /* EC_OC_REG_RCN_CLP_LOP__A */
0xC0, 0x00, /* EC_OC_REG_RCN_CLP_HIP__A */
WR16(EC_SB_REG_CSI_OFS__A, 0x0001),
WR16(EC_VD_REG_FORCE__A, 0x0002),
WR16(EC_VD_REG_REQ_SMB_CNT__A, 0x0001),
WR16(EC_VD_REG_RLK_ENA__A, 0x0001),
WR16(EC_OD_REG_SYNC__A, 0x0664),
WR16(EC_OC_REG_OC_MON_SIO__A, 0x0000),
WR16(EC_OC_REG_SNC_ISC_LVL__A, 0x0D0C),
/* Output zero on monitorbus pads, power saving */
WR16(EC_OC_REG_OCR_MON_UOS__A,
(EC_OC_REG_OCR_MON_UOS_DAT_0_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_1_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_2_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_3_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_4_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_5_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_6_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_7_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_8_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_9_ENABLE |
EC_OC_REG_OCR_MON_UOS_VAL_ENABLE |
EC_OC_REG_OCR_MON_UOS_CLK_ENABLE)),
WR16(EC_OC_REG_OCR_MON_WRI__A,
EC_OC_REG_OCR_MON_WRI_INIT),
/* CHK_ERROR(ResetECRAM(demod)); */
/* Reset packet sync bytes in EC_VD ram */
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (0 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (1 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (2 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (3 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (4 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (5 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (6 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (7 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (8 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (9 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (10 * 17), 0x0000),
/* Reset packet sync bytes in EC_RS ram */
WR16(EC_RS_EC_RAM__A, 0x0000),
WR16(EC_RS_EC_RAM__A + 204, 0x0000),
WR16(EC_SB_REG_COMM_EXEC__A, 0x0001),
WR16(EC_VD_REG_COMM_EXEC__A, 0x0001),
WR16(EC_OD_REG_COMM_EXEC__A, 0x0001),
WR16(EC_RS_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitECB1[] = {
WR16(B_EC_SB_REG_CSI_OFS0__A, 0x0001),
WR16(B_EC_SB_REG_CSI_OFS1__A, 0x0001),
WR16(B_EC_SB_REG_CSI_OFS2__A, 0x0001),
WR16(B_EC_SB_REG_CSI_LO__A, 0x000c),
WR16(B_EC_SB_REG_CSI_HI__A, 0x0018),
WR16(B_EC_SB_REG_SNR_HI__A, 0x007f),
WR16(B_EC_SB_REG_SNR_MID__A, 0x007f),
WR16(B_EC_SB_REG_SNR_LO__A, 0x007f),
WR16(B_EC_OC_REG_DTO_CLKMODE__A, 0x0002),
WR16(B_EC_OC_REG_DTO_PER__A, 0x0006),
WR16(B_EC_OC_REG_DTO_BUR__A, 0x0001),
WR16(B_EC_OC_REG_RCR_CLKMODE__A, 0x0000),
WR16(B_EC_OC_REG_RCN_GAI_LVL__A, 0x000D),
WR16(B_EC_OC_REG_OC_MPG_SIO__A, 0x0000),
/* Needed because shadow registers do not have correct default value */
WR16(B_EC_OC_REG_RCN_CST_LOP__A, 0x1000),
WR16(B_EC_OC_REG_RCN_CST_HIP__A, 0x0000),
WR16(B_EC_OC_REG_RCN_CRA_LOP__A, 0x0000),
WR16(B_EC_OC_REG_RCN_CRA_HIP__A, 0x00C0),
WR16(B_EC_OC_REG_RCN_CLP_LOP__A, 0x0000),
WR16(B_EC_OC_REG_RCN_CLP_HIP__A, 0x00C0),
WR16(B_EC_OC_REG_DTO_INC_LOP__A, 0x0000),
WR16(B_EC_OC_REG_DTO_INC_HIP__A, 0x00C0),
WR16(B_EC_OD_REG_SYNC__A, 0x0664),
WR16(B_EC_RS_REG_REQ_PCK_CNT__A, 0x1000),
/* CHK_ERROR(ResetECRAM(demod)); */
/* Reset packet sync bytes in EC_VD ram */
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (0 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (1 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (2 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (3 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (4 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (5 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (6 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (7 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (8 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (9 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (10 * 17), 0x0000),
/* Reset packet sync bytes in EC_RS ram */
WR16(EC_RS_EC_RAM__A, 0x0000),
WR16(EC_RS_EC_RAM__A + 204, 0x0000),
WR16(B_EC_SB_REG_COMM_EXEC__A, 0x0001),
WR16(B_EC_VD_REG_COMM_EXEC__A, 0x0001),
WR16(B_EC_OD_REG_COMM_EXEC__A, 0x0001),
WR16(B_EC_RS_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_ResetECA2[] = {
WR16(EC_OC_REG_COMM_EXEC__A, 0x0000),
WR16(EC_OD_REG_COMM_EXEC__A, 0x0000),
WRBLOCK(EC_OC_REG_TMD_TOP_MODE__A, 5),
0x03, 0x00, /* EC_OC_REG_TMD_TOP_MODE__A */
0xF4, 0x01, /* EC_OC_REG_TMD_TOP_CNT__A */
0xC0, 0x03, /* EC_OC_REG_TMD_HIL_MAR__A */
0x40, 0x00, /* EC_OC_REG_TMD_LOL_MAR__A */
0x03, 0x00, /* EC_OC_REG_TMD_CUR_CNT__A */
WRBLOCK(EC_OC_REG_AVR_ASH_CNT__A, 2),
0x06, 0x00, /* EC_OC_REG_AVR_ASH_CNT__A */
0x02, 0x00, /* EC_OC_REG_AVR_BSH_CNT__A */
WRBLOCK(EC_OC_REG_RCN_MODE__A, 7),
0x07, 0x00, /* EC_OC_REG_RCN_MODE__A */
0x00, 0x00, /* EC_OC_REG_RCN_CRA_LOP__A */
0xc0, 0x00, /* EC_OC_REG_RCN_CRA_HIP__A */
0x00, 0x10, /* EC_OC_REG_RCN_CST_LOP__A */
0x00, 0x00, /* EC_OC_REG_RCN_CST_HIP__A */
0xFF, 0x01, /* EC_OC_REG_RCN_SET_LVL__A */
0x0D, 0x00, /* EC_OC_REG_RCN_GAI_LVL__A */
WRBLOCK(EC_OC_REG_RCN_CLP_LOP__A, 2),
0x00, 0x00, /* EC_OC_REG_RCN_CLP_LOP__A */
0xC0, 0x00, /* EC_OC_REG_RCN_CLP_HIP__A */
WR16(EC_OD_REG_SYNC__A, 0x0664),
WR16(EC_OC_REG_OC_MON_SIO__A, 0x0000),
WR16(EC_OC_REG_SNC_ISC_LVL__A, 0x0D0C),
/* Output zero on monitorbus pads, power saving */
WR16(EC_OC_REG_OCR_MON_UOS__A,
(EC_OC_REG_OCR_MON_UOS_DAT_0_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_1_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_2_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_3_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_4_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_5_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_6_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_7_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_8_ENABLE |
EC_OC_REG_OCR_MON_UOS_DAT_9_ENABLE |
EC_OC_REG_OCR_MON_UOS_VAL_ENABLE |
EC_OC_REG_OCR_MON_UOS_CLK_ENABLE)),
WR16(EC_OC_REG_OCR_MON_WRI__A,
EC_OC_REG_OCR_MON_WRI_INIT),
/* CHK_ERROR(ResetECRAM(demod)); */
/* Reset packet sync bytes in EC_VD ram */
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (0 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (1 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (2 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (3 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (4 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (5 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (6 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (7 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (8 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (9 * 17), 0x0000),
WR16(EC_OD_DEINT_RAM__A + 0x3b7 + (10 * 17), 0x0000),
/* Reset packet sync bytes in EC_RS ram */
WR16(EC_RS_EC_RAM__A, 0x0000),
WR16(EC_RS_EC_RAM__A + 204, 0x0000),
WR16(EC_OD_REG_COMM_EXEC__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_InitSC[] = {
WR16(SC_COMM_EXEC__A, 0),
WR16(SC_COMM_STATE__A, 0),
#ifdef COMPILE_FOR_QT
WR16(SC_RA_RAM_BE_OPT_DELAY__A, 0x100),
#endif
/* SC is not started, this is done in SetChannels() */
END_OF_TABLE
};
/* Diversity settings */
u8 DRXD_InitDiversityFront[] = {
/* Start demod ********* RF in , diversity out **************************** */
WR16(B_SC_RA_RAM_CONFIG__A, B_SC_RA_RAM_CONFIG_FR_ENABLE__M |
B_SC_RA_RAM_CONFIG_FREQSCAN__M),
WR16(B_SC_RA_RAM_LC_ABS_2K__A, 0x7),
WR16(B_SC_RA_RAM_LC_ABS_8K__A, 0x7),
WR16(B_SC_RA_RAM_IR_COARSE_8K_LENGTH__A, IRLEN_COARSE_8K),
WR16(B_SC_RA_RAM_IR_COARSE_8K_FREQINC__A, 1 << (11 - IRLEN_COARSE_8K)),
WR16(B_SC_RA_RAM_IR_COARSE_8K_KAISINC__A, 1 << (17 - IRLEN_COARSE_8K)),
WR16(B_SC_RA_RAM_IR_FINE_8K_LENGTH__A, IRLEN_FINE_8K),
WR16(B_SC_RA_RAM_IR_FINE_8K_FREQINC__A, 1 << (11 - IRLEN_FINE_8K)),
WR16(B_SC_RA_RAM_IR_FINE_8K_KAISINC__A, 1 << (17 - IRLEN_FINE_8K)),
WR16(B_SC_RA_RAM_IR_COARSE_2K_LENGTH__A, IRLEN_COARSE_2K),
WR16(B_SC_RA_RAM_IR_COARSE_2K_FREQINC__A, 1 << (11 - IRLEN_COARSE_2K)),
WR16(B_SC_RA_RAM_IR_COARSE_2K_KAISINC__A, 1 << (17 - IRLEN_COARSE_2K)),
WR16(B_SC_RA_RAM_IR_FINE_2K_LENGTH__A, IRLEN_FINE_2K),
WR16(B_SC_RA_RAM_IR_FINE_2K_FREQINC__A, 1 << (11 - IRLEN_FINE_2K)),
WR16(B_SC_RA_RAM_IR_FINE_2K_KAISINC__A, 1 << (17 - IRLEN_FINE_2K)),
WR16(B_LC_RA_RAM_FILTER_CRMM_A__A, 7),
WR16(B_LC_RA_RAM_FILTER_CRMM_B__A, 4),
WR16(B_LC_RA_RAM_FILTER_SRMM_A__A, 7),
WR16(B_LC_RA_RAM_FILTER_SRMM_B__A, 4),
WR16(B_LC_RA_RAM_FILTER_SYM_SET__A, 500),
WR16(B_CC_REG_DIVERSITY__A, 0x0001),
WR16(B_EC_OC_REG_OC_MODE_HIP__A, 0x0010),
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_PASS_B_CE |
B_EQ_REG_RC_SEL_CAR_LOCAL_B_CE | B_EQ_REG_RC_SEL_CAR_MEAS_B_CE),
/* 0x2a ), *//* CE to PASS mux */
END_OF_TABLE
};
u8 DRXD_InitDiversityEnd[] = {
/* End demod *********** combining RF in and diversity in, MPEG TS out **** */
/* disable near/far; switch on timing slave mode */
WR16(B_SC_RA_RAM_CONFIG__A, B_SC_RA_RAM_CONFIG_FR_ENABLE__M |
B_SC_RA_RAM_CONFIG_FREQSCAN__M |
B_SC_RA_RAM_CONFIG_DIV_ECHO_ENABLE__M |
B_SC_RA_RAM_CONFIG_SLAVE__M |
B_SC_RA_RAM_CONFIG_DIV_BLANK_ENABLE__M
/* MV from CtrlDiversity */
),
#ifdef DRXDDIV_SRMM_SLAVING
WR16(SC_RA_RAM_LC_ABS_2K__A, 0x3c7),
WR16(SC_RA_RAM_LC_ABS_8K__A, 0x3c7),
#else
WR16(SC_RA_RAM_LC_ABS_2K__A, 0x7),
WR16(SC_RA_RAM_LC_ABS_8K__A, 0x7),
#endif
WR16(B_SC_RA_RAM_IR_COARSE_8K_LENGTH__A, IRLEN_COARSE_8K),
WR16(B_SC_RA_RAM_IR_COARSE_8K_FREQINC__A, 1 << (11 - IRLEN_COARSE_8K)),
WR16(B_SC_RA_RAM_IR_COARSE_8K_KAISINC__A, 1 << (17 - IRLEN_COARSE_8K)),
WR16(B_SC_RA_RAM_IR_FINE_8K_LENGTH__A, IRLEN_FINE_8K),
WR16(B_SC_RA_RAM_IR_FINE_8K_FREQINC__A, 1 << (11 - IRLEN_FINE_8K)),
WR16(B_SC_RA_RAM_IR_FINE_8K_KAISINC__A, 1 << (17 - IRLEN_FINE_8K)),
WR16(B_SC_RA_RAM_IR_COARSE_2K_LENGTH__A, IRLEN_COARSE_2K),
WR16(B_SC_RA_RAM_IR_COARSE_2K_FREQINC__A, 1 << (11 - IRLEN_COARSE_2K)),
WR16(B_SC_RA_RAM_IR_COARSE_2K_KAISINC__A, 1 << (17 - IRLEN_COARSE_2K)),
WR16(B_SC_RA_RAM_IR_FINE_2K_LENGTH__A, IRLEN_FINE_2K),
WR16(B_SC_RA_RAM_IR_FINE_2K_FREQINC__A, 1 << (11 - IRLEN_FINE_2K)),
WR16(B_SC_RA_RAM_IR_FINE_2K_KAISINC__A, 1 << (17 - IRLEN_FINE_2K)),
WR16(B_LC_RA_RAM_FILTER_CRMM_A__A, 7),
WR16(B_LC_RA_RAM_FILTER_CRMM_B__A, 4),
WR16(B_LC_RA_RAM_FILTER_SRMM_A__A, 7),
WR16(B_LC_RA_RAM_FILTER_SRMM_B__A, 4),
WR16(B_LC_RA_RAM_FILTER_SYM_SET__A, 500),
WR16(B_CC_REG_DIVERSITY__A, 0x0001),
END_OF_TABLE
};
u8 DRXD_DisableDiversity[] = {
WR16(B_SC_RA_RAM_LC_ABS_2K__A, B_SC_RA_RAM_LC_ABS_2K__PRE),
WR16(B_SC_RA_RAM_LC_ABS_8K__A, B_SC_RA_RAM_LC_ABS_8K__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_8K_LENGTH__A,
B_SC_RA_RAM_IR_COARSE_8K_LENGTH__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_8K_FREQINC__A,
B_SC_RA_RAM_IR_COARSE_8K_FREQINC__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_8K_KAISINC__A,
B_SC_RA_RAM_IR_COARSE_8K_KAISINC__PRE),
WR16(B_SC_RA_RAM_IR_FINE_8K_LENGTH__A,
B_SC_RA_RAM_IR_FINE_8K_LENGTH__PRE),
WR16(B_SC_RA_RAM_IR_FINE_8K_FREQINC__A,
B_SC_RA_RAM_IR_FINE_8K_FREQINC__PRE),
WR16(B_SC_RA_RAM_IR_FINE_8K_KAISINC__A,
B_SC_RA_RAM_IR_FINE_8K_KAISINC__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_2K_LENGTH__A,
B_SC_RA_RAM_IR_COARSE_2K_LENGTH__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_2K_FREQINC__A,
B_SC_RA_RAM_IR_COARSE_2K_FREQINC__PRE),
WR16(B_SC_RA_RAM_IR_COARSE_2K_KAISINC__A,
B_SC_RA_RAM_IR_COARSE_2K_KAISINC__PRE),
WR16(B_SC_RA_RAM_IR_FINE_2K_LENGTH__A,
B_SC_RA_RAM_IR_FINE_2K_LENGTH__PRE),
WR16(B_SC_RA_RAM_IR_FINE_2K_FREQINC__A,
B_SC_RA_RAM_IR_FINE_2K_FREQINC__PRE),
WR16(B_SC_RA_RAM_IR_FINE_2K_KAISINC__A,
B_SC_RA_RAM_IR_FINE_2K_KAISINC__PRE),
WR16(B_LC_RA_RAM_FILTER_CRMM_A__A, B_LC_RA_RAM_FILTER_CRMM_A__PRE),
WR16(B_LC_RA_RAM_FILTER_CRMM_B__A, B_LC_RA_RAM_FILTER_CRMM_B__PRE),
WR16(B_LC_RA_RAM_FILTER_SRMM_A__A, B_LC_RA_RAM_FILTER_SRMM_A__PRE),
WR16(B_LC_RA_RAM_FILTER_SRMM_B__A, B_LC_RA_RAM_FILTER_SRMM_B__PRE),
WR16(B_LC_RA_RAM_FILTER_SYM_SET__A, B_LC_RA_RAM_FILTER_SYM_SET__PRE),
WR16(B_CC_REG_DIVERSITY__A, 0x0000),
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_INIT), /* combining disabled */
END_OF_TABLE
};
u8 DRXD_StartDiversityFront[] = {
/* Start demod, RF in and diversity out, no combining */
WR16(B_FE_CF_REG_IMP_VAL__A, 0x0),
WR16(B_FE_AD_REG_FDB_IN__A, 0x0),
WR16(B_FE_AD_REG_INVEXT__A, 0x0),
WR16(B_EQ_REG_COMM_MB__A, 0x12), /* EQ to MB out */
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_PASS_B_CE | /* CE to PASS mux */
B_EQ_REG_RC_SEL_CAR_LOCAL_B_CE | B_EQ_REG_RC_SEL_CAR_MEAS_B_CE),
WR16(SC_RA_RAM_ECHO_SHIFT_LIM__A, 2),
END_OF_TABLE
};
u8 DRXD_StartDiversityEnd[] = {
/* End demod, combining RF in and diversity in, MPEG TS out */
WR16(B_FE_CF_REG_IMP_VAL__A, 0x0), /* disable impulse noise cruncher */
WR16(B_FE_AD_REG_INVEXT__A, 0x0), /* clock inversion (for sohard board) */
WR16(B_CP_REG_BR_STR_DEL__A, 10), /* apperently no mb delay matching is best */
WR16(B_EQ_REG_RC_SEL_CAR__A, B_EQ_REG_RC_SEL_CAR_DIV_ON | /* org = 0x81 combining enabled */
B_EQ_REG_RC_SEL_CAR_MEAS_A_CC |
B_EQ_REG_RC_SEL_CAR_PASS_A_CC | B_EQ_REG_RC_SEL_CAR_LOCAL_A_CC),
END_OF_TABLE
};
u8 DRXD_DiversityDelay8MHZ[] = {
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_32__A, 1150 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_16__A, 1100 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_8__A, 1000 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_4__A, 800 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_32__A, 5420 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_16__A, 5200 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_8__A, 4800 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_4__A, 4000 - 50),
END_OF_TABLE
};
u8 DRXD_DiversityDelay6MHZ[] = /* also used ok for 7 MHz */
{
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_32__A, 1100 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_16__A, 1000 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_8__A, 900 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_2K_4__A, 600 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_32__A, 5300 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_16__A, 5000 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_8__A, 4500 - 50),
WR16(B_SC_RA_RAM_DIVERSITY_DELAY_8K_4__A, 3500 - 50),
END_OF_TABLE
};
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.