repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
ashyx/Samsung_Galaxy_Tab_A_kernel | sound/soc/codecs/wsa881x-tables.c | 278 | 5146 | /*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/regmap.h>
#include <linux/device.h>
#include "wsa881x-registers.h"
const u8 wsa881x_reg_readable[WSA881X_CACHE_SIZE] = {
[WSA881X_CHIP_ID0] = 1,
[WSA881X_CHIP_ID1] = 1,
[WSA881X_CHIP_ID2] = 1,
[WSA881X_CHIP_ID3] = 1,
[WSA881X_BUS_ID] = 1,
[WSA881X_CDC_RST_CTL] = 1,
[WSA881X_CDC_TOP_CLK_CTL] = 1,
[WSA881X_CDC_ANA_CLK_CTL] = 1,
[WSA881X_CDC_DIG_CLK_CTL] = 1,
[WSA881X_CLOCK_CONFIG] = 1,
[WSA881X_ANA_CTL] = 1,
[WSA881X_SWR_RESET_EN] = 1,
[WSA881X_RESET_CTL] = 1,
[WSA881X_TADC_VALUE_CTL] = 1,
[WSA881X_TEMP_DETECT_CTL] = 1,
[WSA881X_TEMP_MSB] = 1,
[WSA881X_TEMP_LSB] = 1,
[WSA881X_TEMP_CONFIG0] = 1,
[WSA881X_TEMP_CONFIG1] = 1,
[WSA881X_CDC_CLIP_CTL] = 1,
[WSA881X_SDM_PDM9_LSB] = 1,
[WSA881X_SDM_PDM9_MSB] = 1,
[WSA881X_CDC_RX_CTL] = 1,
[WSA881X_DEM_BYPASS_DATA0] = 1,
[WSA881X_DEM_BYPASS_DATA1] = 1,
[WSA881X_DEM_BYPASS_DATA2] = 1,
[WSA881X_DEM_BYPASS_DATA3] = 1,
[WSA881X_OTP_CTRL0] = 1,
[WSA881X_OTP_CTRL1] = 1,
[WSA881X_HDRIVE_CTL_GROUP1] = 1,
[WSA881X_INTR_MODE] = 1,
[WSA881X_INTR_MASK] = 1,
[WSA881X_INTR_STATUS] = 1,
[WSA881X_INTR_CLEAR] = 1,
[WSA881X_INTR_LEVEL] = 1,
[WSA881X_INTR_SET] = 1,
[WSA881X_INTR_TEST] = 1,
[WSA881X_PDM_TEST_MODE] = 1,
[WSA881X_ATE_TEST_MODE] = 1,
[WSA881X_PIN_CTL_MODE] = 1,
[WSA881X_PIN_CTL_OE] = 1,
[WSA881X_PIN_WDATA_IOPAD] = 1,
[WSA881X_PIN_STATUS] = 1,
[WSA881X_DIG_DEBUG_MODE] = 1,
[WSA881X_DIG_DEBUG_SEL] = 1,
[WSA881X_DIG_DEBUG_EN] = 1,
[WSA881X_SWR_HM_TEST1] = 1,
[WSA881X_SWR_HM_TEST2] = 1,
[WSA881X_TEMP_DETECT_DBG_CTL] = 1,
[WSA881X_TEMP_DEBUG_MSB] = 1,
[WSA881X_TEMP_DEBUG_LSB] = 1,
[WSA881X_SAMPLE_EDGE_SEL] = 1,
[WSA881X_IOPAD_CTL] = 1,
[WSA881X_SPARE_0] = 1,
[WSA881X_SPARE_1] = 1,
[WSA881X_SPARE_2] = 1,
[WSA881X_OTP_REG_0] = 1,
[WSA881X_OTP_REG_1] = 1,
[WSA881X_OTP_REG_2] = 1,
[WSA881X_OTP_REG_3] = 1,
[WSA881X_OTP_REG_4] = 1,
[WSA881X_OTP_REG_5] = 1,
[WSA881X_OTP_REG_6] = 1,
[WSA881X_OTP_REG_7] = 1,
[WSA881X_OTP_REG_8] = 1,
[WSA881X_OTP_REG_9] = 1,
[WSA881X_OTP_REG_10] = 1,
[WSA881X_OTP_REG_11] = 1,
[WSA881X_OTP_REG_12] = 1,
[WSA881X_OTP_REG_13] = 1,
[WSA881X_OTP_REG_14] = 1,
[WSA881X_OTP_REG_15] = 1,
[WSA881X_OTP_REG_16] = 1,
[WSA881X_OTP_REG_17] = 1,
[WSA881X_OTP_REG_18] = 1,
[WSA881X_OTP_REG_19] = 1,
[WSA881X_OTP_REG_20] = 1,
[WSA881X_OTP_REG_21] = 1,
[WSA881X_OTP_REG_22] = 1,
[WSA881X_OTP_REG_23] = 1,
[WSA881X_OTP_REG_24] = 1,
[WSA881X_OTP_REG_25] = 1,
[WSA881X_OTP_REG_26] = 1,
[WSA881X_OTP_REG_27] = 1,
[WSA881X_OTP_REG_28] = 1,
[WSA881X_OTP_REG_29] = 1,
[WSA881X_OTP_REG_30] = 1,
[WSA881X_OTP_REG_31] = 1,
[WSA881X_OTP_REG_63] = 1,
/* Analog Registers */
[WSA881X_BIAS_REF_CTRL] = 1,
[WSA881X_BIAS_TEST] = 1,
[WSA881X_BIAS_BIAS] = 1,
[WSA881X_TEMP_OP] = 1,
[WSA881X_TEMP_IREF_CTRL] = 1,
[WSA881X_TEMP_ISENS_CTRL] = 1,
[WSA881X_TEMP_CLK_CTRL] = 1,
[WSA881X_TEMP_TEST] = 1,
[WSA881X_TEMP_BIAS] = 1,
[WSA881X_TEMP_ADC_CTRL] = 1,
[WSA881X_TEMP_DOUT_MSB] = 1,
[WSA881X_TEMP_DOUT_LSB] = 1,
[WSA881X_ADC_EN_MODU_V] = 1,
[WSA881X_ADC_EN_MODU_I] = 1,
[WSA881X_ADC_EN_DET_TEST_V] = 1,
[WSA881X_ADC_EN_DET_TEST_I] = 1,
[WSA881X_ADC_SEL_IBIAS] = 1,
[WSA881X_ADC_EN_SEL_IBAIS] = 1,
[WSA881X_SPKR_DRV_EN] = 1,
[WSA881X_SPKR_DRV_GAIN] = 1,
[WSA881X_SPKR_DAC_CTL] = 1,
[WSA881X_SPKR_DRV_DBG] = 1,
[WSA881X_SPKR_PWRSTG_DBG] = 1,
[WSA881X_SPKR_OCP_CTL] = 1,
[WSA881X_SPKR_CLIP_CTL] = 1,
[WSA881X_SPKR_BBM_CTL] = 1,
[WSA881X_SPKR_MISC_CTL1] = 1,
[WSA881X_SPKR_MISC_CTL2] = 1,
[WSA881X_SPKR_BIAS_INT] = 1,
[WSA881X_SPKR_PA_INT] = 1,
[WSA881X_SPKR_BIAS_CAL] = 1,
[WSA881X_SPKR_BIAS_PSRR] = 1,
[WSA881X_SPKR_STATUS1] = 1,
[WSA881X_SPKR_STATUS2] = 1,
[WSA881X_BOOST_EN_CTL] = 1,
[WSA881X_BOOST_CURRENT_LIMIT] = 1,
[WSA881X_BOOST_PS_CTL] = 1,
[WSA881X_BOOST_PRESET_OUT1] = 1,
[WSA881X_BOOST_PRESET_OUT2] = 1,
[WSA881X_BOOST_FORCE_OUT] = 1,
[WSA881X_BOOST_LDO_PROG] = 1,
[WSA881X_BOOST_SLOPE_COMP_ISENSE_FB] = 1,
[WSA881X_BOOST_RON_CTL] = 1,
[WSA881X_BOOST_LOOP_STABILITY] = 1,
[WSA881X_BOOST_ZX_CTL] = 1,
[WSA881X_BOOST_START_CTL] = 1,
[WSA881X_BOOST_MISC1_CTL] = 1,
[WSA881X_BOOST_MISC2_CTL] = 1,
[WSA881X_BOOST_MISC3_CTL] = 1,
[WSA881X_BOOST_ATEST_CTL] = 1,
[WSA881X_SPKR_PROT_FE_GAIN] = 1,
[WSA881X_SPKR_PROT_FE_CM_LDO_SET] = 1,
[WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET1] = 1,
[WSA881X_SPKR_PROT_FE_ISENSE_BIAS_SET2] = 1,
[WSA881X_SPKR_PROT_ATEST1] = 1,
[WSA881X_SPKR_PROT_ATEST2] = 1,
[WSA881X_SPKR_PROT_FE_VSENSE_VCM] = 1,
[WSA881X_SPKR_PROT_FE_VSENSE_BIAS_SET1] = 1,
[WSA881X_BONGO_RESRV_REG1] = 1,
[WSA881X_BONGO_RESRV_REG2] = 1,
[WSA881X_SPKR_PROT_SAR] = 1,
[WSA881X_SPKR_STATUS3] = 1,
};
| gpl-2.0 |
tudorsirb/lge_kernel_p700 | arch/arm/mach-msm/idle_stats.c | 790 | 13992 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/ktime.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/version.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
#include "idle_stats.h"
#include <mach/cpuidle.h>
/******************************************************************************
* Debug Definitions
*****************************************************************************/
enum {
MSM_IDLE_STATS_DEBUG_API = BIT(0),
MSM_IDLE_STATS_DEBUG_SIGNAL = BIT(1),
MSM_IDLE_STATS_DEBUG_MIGRATION = BIT(2),
};
static int msm_idle_stats_debug_mask;
module_param_named(
debug_mask, msm_idle_stats_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
);
/******************************************************************************
* Driver Definitions
*****************************************************************************/
#define MSM_IDLE_STATS_DRIVER_NAME "msm_idle_stats"
static dev_t msm_idle_stats_dev_nr;
static struct cdev msm_idle_stats_cdev;
static struct class *msm_idle_stats_class;
/******************************************************************************
* Device Definitions
*****************************************************************************/
struct msm_idle_stats_device {
unsigned int cpu;
struct mutex mutex;
struct notifier_block notifier;
int64_t collection_expiration;
struct msm_idle_stats stats;
struct hrtimer timer;
wait_queue_head_t wait_q;
atomic_t collecting;
};
static DEFINE_SPINLOCK(msm_idle_stats_devs_lock);
static DEFINE_PER_CPU(struct msm_idle_stats_device *, msm_idle_stats_devs);
/******************************************************************************
*
*****************************************************************************/
static inline int64_t msm_idle_stats_bound_interval(int64_t interval)
{
if (interval <= 0)
return 1;
if (interval > UINT_MAX)
return UINT_MAX;
return interval;
}
static enum hrtimer_restart msm_idle_stats_timer(struct hrtimer *timer)
{
struct msm_idle_stats_device *stats_dev;
unsigned int cpu;
int64_t now;
int64_t interval;
stats_dev = container_of(timer, struct msm_idle_stats_device, timer);
cpu = get_cpu();
if (cpu != stats_dev->cpu) {
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_MIGRATION)
pr_info("%s: timer migrated from cpu%u to cpu%u\n",
__func__, stats_dev->cpu, cpu);
stats_dev->stats.event = MSM_IDLE_STATS_EVENT_TIMER_MIGRATED;
goto timer_exit;
}
now = ktime_to_us(ktime_get());
interval = now - stats_dev->stats.last_busy_start;
if (stats_dev->stats.busy_timer > 0 &&
interval >= stats_dev->stats.busy_timer - 1)
stats_dev->stats.event =
MSM_IDLE_STATS_EVENT_BUSY_TIMER_EXPIRED;
else
stats_dev->stats.event =
MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;
timer_exit:
atomic_set(&stats_dev->collecting, 0);
wake_up_interruptible(&stats_dev->wait_q);
put_cpu();
return HRTIMER_NORESTART;
}
static void msm_idle_stats_pre_idle(struct msm_idle_stats_device *stats_dev)
{
int64_t now;
int64_t interval;
if (smp_processor_id() != stats_dev->cpu) {
WARN_ON(1);
return;
}
if (!atomic_read(&stats_dev->collecting))
return;
hrtimer_cancel(&stats_dev->timer);
now = ktime_to_us(ktime_get());
interval = now - stats_dev->stats.last_busy_start;
interval = msm_idle_stats_bound_interval(interval);
stats_dev->stats.busy_intervals[stats_dev->stats.nr_collected]
= (__u32) interval;
stats_dev->stats.last_idle_start = now;
}
static void msm_idle_stats_post_idle(struct msm_idle_stats_device *stats_dev)
{
int64_t now;
int64_t interval;
int64_t timer_interval;
int rc;
if (smp_processor_id() != stats_dev->cpu) {
WARN_ON(1);
return;
}
if (!atomic_read(&stats_dev->collecting))
return;
now = ktime_to_us(ktime_get());
interval = now - stats_dev->stats.last_idle_start;
interval = msm_idle_stats_bound_interval(interval);
stats_dev->stats.idle_intervals[stats_dev->stats.nr_collected]
= (__u32) interval;
stats_dev->stats.nr_collected++;
stats_dev->stats.last_busy_start = now;
if (stats_dev->stats.nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS) {
stats_dev->stats.event = MSM_IDLE_STATS_EVENT_COLLECTION_FULL;
goto post_idle_collection_done;
}
timer_interval = stats_dev->collection_expiration - now;
if (timer_interval <= 0) {
stats_dev->stats.event =
MSM_IDLE_STATS_EVENT_COLLECTION_TIMER_EXPIRED;
goto post_idle_collection_done;
}
if (stats_dev->stats.busy_timer > 0 &&
timer_interval > stats_dev->stats.busy_timer)
timer_interval = stats_dev->stats.busy_timer;
rc = hrtimer_start(&stats_dev->timer,
ktime_set(0, timer_interval * 1000), HRTIMER_MODE_REL_PINNED);
WARN_ON(rc);
return;
post_idle_collection_done:
atomic_set(&stats_dev->collecting, 0);
wake_up_interruptible(&stats_dev->wait_q);
}
static int msm_idle_stats_notified(struct notifier_block *nb,
unsigned long val, void *v)
{
struct msm_idle_stats_device *stats_dev = container_of(
nb, struct msm_idle_stats_device, notifier);
if (val == MSM_CPUIDLE_STATE_EXIT)
msm_idle_stats_post_idle(stats_dev);
else
msm_idle_stats_pre_idle(stats_dev);
return 0;
}
static int msm_idle_stats_collect(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct msm_idle_stats_device *stats_dev;
struct msm_idle_stats *stats;
int rc;
stats_dev = (struct msm_idle_stats_device *) filp->private_data;
stats = &stats_dev->stats;
rc = mutex_lock_interruptible(&stats_dev->mutex);
if (rc) {
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
pr_info("%s: interrupted while waiting on device "
"mutex\n", __func__);
rc = -EINTR;
goto collect_exit;
}
if (atomic_read(&stats_dev->collecting)) {
pr_err("%s: inconsistent state\n", __func__);
rc = -EBUSY;
goto collect_unlock_exit;
}
rc = copy_from_user(stats, (void *)arg, sizeof(*stats));
if (rc) {
rc = -EFAULT;
goto collect_unlock_exit;
}
if (stats->nr_collected >= MSM_IDLE_STATS_NR_MAX_INTERVALS ||
stats->busy_timer > MSM_IDLE_STATS_MAX_TIMER ||
stats->collection_timer > MSM_IDLE_STATS_MAX_TIMER) {
rc = -EINVAL;
goto collect_unlock_exit;
}
if (get_cpu() != stats_dev->cpu) {
put_cpu();
rc = -EACCES;
goto collect_unlock_exit;
}
/*
* When collection_timer == 0, stop collecting at the next
* post idle.
*/
stats_dev->collection_expiration =
ktime_to_us(ktime_get()) + stats->collection_timer;
/*
* Enable collection before starting any timer.
*/
atomic_set(&stats_dev->collecting, 1);
/*
* When busy_timer == 0, do not set any busy timer.
*/
if (stats->busy_timer > 0) {
rc = hrtimer_start(&stats_dev->timer,
ktime_set(0, stats->busy_timer * 1000),
HRTIMER_MODE_REL_PINNED);
WARN_ON(rc);
}
put_cpu();
if (wait_event_interruptible(stats_dev->wait_q,
!atomic_read(&stats_dev->collecting))) {
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_SIGNAL)
pr_info("%s: interrupted while waiting on "
"collection\n", __func__);
hrtimer_cancel(&stats_dev->timer);
atomic_set(&stats_dev->collecting, 0);
rc = -EINTR;
goto collect_unlock_exit;
}
stats->return_timestamp = ktime_to_us(ktime_get());
rc = copy_to_user((void *)arg, stats, sizeof(*stats));
if (rc) {
rc = -EFAULT;
goto collect_unlock_exit;
}
collect_unlock_exit:
mutex_unlock(&stats_dev->mutex);
collect_exit:
return rc;
}
static int msm_idle_stats_open(struct inode *inode, struct file *filp)
{
struct msm_idle_stats_device *stats_dev;
int rc;
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: enter\n", __func__);
rc = nonseekable_open(inode, filp);
if (rc) {
pr_err("%s: failed to set nonseekable\n", __func__);
goto open_bail;
}
stats_dev = (struct msm_idle_stats_device *)
kzalloc(sizeof(*stats_dev), GFP_KERNEL);
if (!stats_dev) {
pr_err("%s: failed to allocate device struct\n", __func__);
rc = -ENOMEM;
goto open_bail;
}
stats_dev->cpu = MINOR(inode->i_rdev);
mutex_init(&stats_dev->mutex);
stats_dev->notifier.notifier_call = msm_idle_stats_notified;
hrtimer_init(&stats_dev->timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
stats_dev->timer.function = msm_idle_stats_timer;
init_waitqueue_head(&stats_dev->wait_q);
atomic_set(&stats_dev->collecting, 0);
filp->private_data = stats_dev;
/*
* Make sure only one device exists per cpu.
*/
spin_lock(&msm_idle_stats_devs_lock);
if (per_cpu(msm_idle_stats_devs, stats_dev->cpu)) {
spin_unlock(&msm_idle_stats_devs_lock);
rc = -EBUSY;
goto open_free_bail;
}
per_cpu(msm_idle_stats_devs, stats_dev->cpu) = stats_dev;
spin_unlock(&msm_idle_stats_devs_lock);
rc = msm_cpuidle_register_notifier(stats_dev->cpu,
&stats_dev->notifier);
if (rc) {
pr_err("%s: failed to register idle notification\n", __func__);
goto open_null_bail;
}
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: done\n", __func__);
return 0;
open_null_bail:
spin_lock(&msm_idle_stats_devs_lock);
per_cpu(msm_idle_stats_devs, stats_dev->cpu) = NULL;
spin_unlock(&msm_idle_stats_devs_lock);
open_free_bail:
kfree(stats_dev);
open_bail:
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: exit, %d\n", __func__, rc);
return rc;
}
static int msm_idle_stats_release(struct inode *inode, struct file *filp)
{
struct msm_idle_stats_device *stats_dev;
int rc;
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: enter\n", __func__);
stats_dev = (struct msm_idle_stats_device *) filp->private_data;
rc = msm_cpuidle_unregister_notifier(stats_dev->cpu,
&stats_dev->notifier);
WARN_ON(rc);
spin_lock(&msm_idle_stats_devs_lock);
per_cpu(msm_idle_stats_devs, stats_dev->cpu) = NULL;
spin_unlock(&msm_idle_stats_devs_lock);
filp->private_data = NULL;
hrtimer_cancel(&stats_dev->timer);
kfree(stats_dev);
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: done\n", __func__);
return 0;
}
static long msm_idle_stats_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc;
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: enter\n", __func__);
switch (cmd) {
case MSM_IDLE_STATS_IOC_COLLECT:
rc = msm_idle_stats_collect(filp, cmd, arg);
break;
default:
rc = -ENOTTY;
break;
}
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: exit, %d\n", __func__, rc);
return rc;
}
/******************************************************************************
*
*****************************************************************************/
static const struct file_operations msm_idle_stats_fops = {
.owner = THIS_MODULE,
.open = msm_idle_stats_open,
.release = msm_idle_stats_release,
.unlocked_ioctl = msm_idle_stats_ioctl,
};
static int __init msm_idle_stats_init(void)
{
unsigned int nr_cpus = num_possible_cpus();
struct device *dev;
int rc;
int i;
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: enter\n", __func__);
rc = alloc_chrdev_region(&msm_idle_stats_dev_nr,
0, nr_cpus, MSM_IDLE_STATS_DRIVER_NAME);
if (rc) {
pr_err("%s: failed to allocate device number, rc %d\n",
__func__, rc);
goto init_bail;
}
msm_idle_stats_class = class_create(THIS_MODULE,
MSM_IDLE_STATS_DRIVER_NAME);
if (IS_ERR(msm_idle_stats_class)) {
pr_err("%s: failed to create device class\n", __func__);
rc = -ENOMEM;
goto init_unreg_bail;
}
for (i = 0; i < nr_cpus; i++) {
dev = device_create(msm_idle_stats_class, NULL,
msm_idle_stats_dev_nr + i, NULL,
MSM_IDLE_STATS_DRIVER_NAME "%d", i);
if (!dev) {
pr_err("%s: failed to create device %d\n",
__func__, i);
rc = -ENOMEM;
goto init_remove_bail;
}
}
cdev_init(&msm_idle_stats_cdev, &msm_idle_stats_fops);
msm_idle_stats_cdev.owner = THIS_MODULE;
/*
* Call cdev_add() last, after everything else is initialized and
* the driver is ready to accept system calls.
*/
rc = cdev_add(&msm_idle_stats_cdev, msm_idle_stats_dev_nr, nr_cpus);
if (rc) {
pr_err("%s: failed to register char device, rc %d\n",
__func__, rc);
goto init_remove_bail;
}
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: done\n", __func__);
return 0;
init_remove_bail:
for (i = i - 1; i >= 0; i--)
device_destroy(
msm_idle_stats_class, msm_idle_stats_dev_nr + i);
class_destroy(msm_idle_stats_class);
init_unreg_bail:
unregister_chrdev_region(msm_idle_stats_dev_nr, nr_cpus);
init_bail:
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: exit, %d\n", __func__, rc);
return rc;
}
static void __exit msm_idle_stats_exit(void)
{
unsigned int nr_cpus = num_possible_cpus();
int i;
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: enter\n", __func__);
cdev_del(&msm_idle_stats_cdev);
for (i = nr_cpus - 1; i >= 0; i--)
device_destroy(
msm_idle_stats_class, msm_idle_stats_dev_nr + i);
class_destroy(msm_idle_stats_class);
unregister_chrdev_region(msm_idle_stats_dev_nr, nr_cpus);
if (msm_idle_stats_debug_mask & MSM_IDLE_STATS_DEBUG_API)
pr_info("%s: done\n", __func__);
}
module_init(msm_idle_stats_init);
module_exit(msm_idle_stats_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("idle stats driver");
MODULE_VERSION("1.0");
| gpl-2.0 |
CyberGrandChallenge/linux-source-3.13.11-ckt21-cgc | drivers/media/radio/si470x/radio-si470x-i2c.c | 1302 | 13261 | /*
* drivers/media/radio/si470x/radio-si470x-i2c.c
*
* I2C driver for radios with Silicon Labs Si470x FM Radio Receivers
*
* Copyright (c) 2009 Samsung Electronics Co.Ltd
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* driver definitions */
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.2"
/* kernel includes */
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "radio-si470x.h"
/* I2C Device ID List */
static const struct i2c_device_id si470x_i2c_id[] = {
/* Generic Entry */
{ "si470x", 0 },
/* Terminating entry */
{ }
};
MODULE_DEVICE_TABLE(i2c, si470x_i2c_id);
/**************************************************************************
* Module Parameters
**************************************************************************/
/* Radio Nr */
static int radio_nr = -1;
module_param(radio_nr, int, 0444);
MODULE_PARM_DESC(radio_nr, "Radio Nr");
/* RDS buffer blocks */
static unsigned int rds_buf = 100;
module_param(rds_buf, uint, 0444);
MODULE_PARM_DESC(rds_buf, "RDS buffer entries: *100*");
/* RDS maximum block errors */
static unsigned short max_rds_errors = 1;
/* 0 means 0 errors requiring correction */
/* 1 means 1-2 errors requiring correction (used by original USBRadio.exe) */
/* 2 means 3-5 errors requiring correction */
/* 3 means 6+ errors or errors in checkword, correction not possible */
module_param(max_rds_errors, ushort, 0644);
MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
/**************************************************************************
* I2C Definitions
**************************************************************************/
/* Write starts with the upper byte of register 0x02 */
#define WRITE_REG_NUM 8
#define WRITE_INDEX(i) (i + 0x02)
/* Read starts with the upper byte of register 0x0a */
#define READ_REG_NUM RADIO_REGISTER_NUM
#define READ_INDEX(i) ((i + RADIO_REGISTER_NUM - 0x0a) % READ_REG_NUM)
/**************************************************************************
* General Driver Functions - REGISTERs
**************************************************************************/
/*
* si470x_get_register - read register
*/
int si470x_get_register(struct si470x_device *radio, int regnr)
{
u16 buf[READ_REG_NUM];
struct i2c_msg msgs[1] = {
{
.addr = radio->client->addr,
.flags = I2C_M_RD,
.len = sizeof(u16) * READ_REG_NUM,
.buf = (void *)buf
},
};
if (i2c_transfer(radio->client->adapter, msgs, 1) != 1)
return -EIO;
radio->registers[regnr] = __be16_to_cpu(buf[READ_INDEX(regnr)]);
return 0;
}
/*
* si470x_set_register - write register
*/
int si470x_set_register(struct si470x_device *radio, int regnr)
{
int i;
u16 buf[WRITE_REG_NUM];
struct i2c_msg msgs[1] = {
{
.addr = radio->client->addr,
.len = sizeof(u16) * WRITE_REG_NUM,
.buf = (void *)buf
},
};
for (i = 0; i < WRITE_REG_NUM; i++)
buf[i] = __cpu_to_be16(radio->registers[WRITE_INDEX(i)]);
if (i2c_transfer(radio->client->adapter, msgs, 1) != 1)
return -EIO;
return 0;
}
/**************************************************************************
* General Driver Functions - ENTIRE REGISTERS
**************************************************************************/
/*
* si470x_get_all_registers - read entire registers
*/
static int si470x_get_all_registers(struct si470x_device *radio)
{
int i;
u16 buf[READ_REG_NUM];
struct i2c_msg msgs[1] = {
{
.addr = radio->client->addr,
.flags = I2C_M_RD,
.len = sizeof(u16) * READ_REG_NUM,
.buf = (void *)buf
},
};
if (i2c_transfer(radio->client->adapter, msgs, 1) != 1)
return -EIO;
for (i = 0; i < READ_REG_NUM; i++)
radio->registers[i] = __be16_to_cpu(buf[READ_INDEX(i)]);
return 0;
}
/**************************************************************************
* File Operations Interface
**************************************************************************/
/*
* si470x_fops_open - file open
*/
int si470x_fops_open(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
int retval = v4l2_fh_open(file);
if (retval)
return retval;
if (v4l2_fh_is_singular_file(file)) {
/* start radio */
retval = si470x_start(radio);
if (retval < 0)
goto done;
/* enable RDS / STC interrupt */
radio->registers[SYSCONFIG1] |= SYSCONFIG1_RDSIEN;
radio->registers[SYSCONFIG1] |= SYSCONFIG1_STCIEN;
radio->registers[SYSCONFIG1] &= ~SYSCONFIG1_GPIO2;
radio->registers[SYSCONFIG1] |= 0x1 << 2;
retval = si470x_set_register(radio, SYSCONFIG1);
}
done:
if (retval)
v4l2_fh_release(file);
return retval;
}
/*
* si470x_fops_release - file release
*/
int si470x_fops_release(struct file *file)
{
struct si470x_device *radio = video_drvdata(file);
if (v4l2_fh_is_singular_file(file))
/* stop radio */
si470x_stop(radio);
return v4l2_fh_release(file);
}
/**************************************************************************
* Video4Linux Interface
**************************************************************************/
/*
* si470x_vidioc_querycap - query device capabilities
*/
int si470x_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *capability)
{
strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver));
strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card));
capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE |
V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE;
capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
/**************************************************************************
* I2C Interface
**************************************************************************/
/*
* si470x_i2c_interrupt - interrupt handler
*/
static irqreturn_t si470x_i2c_interrupt(int irq, void *dev_id)
{
struct si470x_device *radio = dev_id;
unsigned char regnr;
unsigned char blocknum;
unsigned short bler; /* rds block errors */
unsigned short rds;
unsigned char tmpbuf[3];
int retval = 0;
/* check Seek/Tune Complete */
retval = si470x_get_register(radio, STATUSRSSI);
if (retval < 0)
goto end;
if (radio->registers[STATUSRSSI] & STATUSRSSI_STC)
complete(&radio->completion);
/* safety checks */
if ((radio->registers[SYSCONFIG1] & SYSCONFIG1_RDS) == 0)
goto end;
/* Update RDS registers */
for (regnr = 1; regnr < RDS_REGISTER_NUM; regnr++) {
retval = si470x_get_register(radio, STATUSRSSI + regnr);
if (retval < 0)
goto end;
}
/* get rds blocks */
if ((radio->registers[STATUSRSSI] & STATUSRSSI_RDSR) == 0)
/* No RDS group ready, better luck next time */
goto end;
for (blocknum = 0; blocknum < 4; blocknum++) {
switch (blocknum) {
default:
bler = (radio->registers[STATUSRSSI] &
STATUSRSSI_BLERA) >> 9;
rds = radio->registers[RDSA];
break;
case 1:
bler = (radio->registers[READCHAN] &
READCHAN_BLERB) >> 14;
rds = radio->registers[RDSB];
break;
case 2:
bler = (radio->registers[READCHAN] &
READCHAN_BLERC) >> 12;
rds = radio->registers[RDSC];
break;
case 3:
bler = (radio->registers[READCHAN] &
READCHAN_BLERD) >> 10;
rds = radio->registers[RDSD];
break;
}
/* Fill the V4L2 RDS buffer */
put_unaligned_le16(rds, &tmpbuf);
tmpbuf[2] = blocknum; /* offset name */
tmpbuf[2] |= blocknum << 3; /* received offset */
if (bler > max_rds_errors)
tmpbuf[2] |= 0x80; /* uncorrectable errors */
else if (bler > 0)
tmpbuf[2] |= 0x40; /* corrected error(s) */
/* copy RDS block to internal buffer */
memcpy(&radio->buffer[radio->wr_index], &tmpbuf, 3);
radio->wr_index += 3;
/* wrap write pointer */
if (radio->wr_index >= radio->buf_size)
radio->wr_index = 0;
/* check for overflow */
if (radio->wr_index == radio->rd_index) {
/* increment and wrap read pointer */
radio->rd_index += 3;
if (radio->rd_index >= radio->buf_size)
radio->rd_index = 0;
}
}
if (radio->wr_index != radio->rd_index)
wake_up_interruptible(&radio->read_queue);
end:
return IRQ_HANDLED;
}
/*
* si470x_i2c_probe - probe for the device
*/
static int si470x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct si470x_device *radio;
int retval = 0;
unsigned char version_warning = 0;
/* private data allocation and initialization */
radio = kzalloc(sizeof(struct si470x_device), GFP_KERNEL);
if (!radio) {
retval = -ENOMEM;
goto err_initial;
}
radio->client = client;
radio->band = 1; /* Default to 76 - 108 MHz */
mutex_init(&radio->lock);
init_completion(&radio->completion);
/* video device initialization */
radio->videodev = si470x_viddev_template;
video_set_drvdata(&radio->videodev, radio);
/* power up : need 110ms */
radio->registers[POWERCFG] = POWERCFG_ENABLE;
if (si470x_set_register(radio, POWERCFG) < 0) {
retval = -EIO;
goto err_radio;
}
msleep(110);
/* get device and chip versions */
if (si470x_get_all_registers(radio) < 0) {
retval = -EIO;
goto err_radio;
}
dev_info(&client->dev, "DeviceID=0x%4.4hx ChipID=0x%4.4hx\n",
radio->registers[DEVICEID], radio->registers[CHIPID]);
if ((radio->registers[CHIPID] & CHIPID_FIRMWARE) < RADIO_FW_VERSION) {
dev_warn(&client->dev,
"This driver is known to work with "
"firmware version %hu,\n", RADIO_FW_VERSION);
dev_warn(&client->dev,
"but the device has firmware version %hu.\n",
radio->registers[CHIPID] & CHIPID_FIRMWARE);
version_warning = 1;
}
/* give out version warning */
if (version_warning == 1) {
dev_warn(&client->dev,
"If you have some trouble using this driver,\n");
dev_warn(&client->dev,
"please report to V4L ML at "
"linux-media@vger.kernel.org\n");
}
/* set initial frequency */
si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
/* rds buffer allocation */
radio->buf_size = rds_buf * 3;
radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
if (!radio->buffer) {
retval = -EIO;
goto err_radio;
}
/* rds buffer configuration */
radio->wr_index = 0;
radio->rd_index = 0;
init_waitqueue_head(&radio->read_queue);
retval = request_threaded_irq(client->irq, NULL, si470x_i2c_interrupt,
IRQF_TRIGGER_FALLING, DRIVER_NAME, radio);
if (retval) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_rds;
}
/* register video device */
retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO,
radio_nr);
if (retval) {
dev_warn(&client->dev, "Could not register video device\n");
goto err_all;
}
i2c_set_clientdata(client, radio);
return 0;
err_all:
free_irq(client->irq, radio);
err_rds:
kfree(radio->buffer);
err_radio:
kfree(radio);
err_initial:
return retval;
}
/*
* si470x_i2c_remove - remove the device
*/
static int si470x_i2c_remove(struct i2c_client *client)
{
struct si470x_device *radio = i2c_get_clientdata(client);
free_irq(client->irq, radio);
video_unregister_device(&radio->videodev);
kfree(radio);
return 0;
}
#ifdef CONFIG_PM_SLEEP
/*
* si470x_i2c_suspend - suspend the device
*/
static int si470x_i2c_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct si470x_device *radio = i2c_get_clientdata(client);
/* power down */
radio->registers[POWERCFG] |= POWERCFG_DISABLE;
if (si470x_set_register(radio, POWERCFG) < 0)
return -EIO;
return 0;
}
/*
* si470x_i2c_resume - resume the device
*/
static int si470x_i2c_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct si470x_device *radio = i2c_get_clientdata(client);
/* power up : need 110ms */
radio->registers[POWERCFG] |= POWERCFG_ENABLE;
if (si470x_set_register(radio, POWERCFG) < 0)
return -EIO;
msleep(110);
return 0;
}
static SIMPLE_DEV_PM_OPS(si470x_i2c_pm, si470x_i2c_suspend, si470x_i2c_resume);
#endif
/*
* si470x_i2c_driver - i2c driver interface
*/
static struct i2c_driver si470x_i2c_driver = {
.driver = {
.name = "si470x",
.owner = THIS_MODULE,
#ifdef CONFIG_PM_SLEEP
.pm = &si470x_i2c_pm,
#endif
},
.probe = si470x_i2c_probe,
.remove = si470x_i2c_remove,
.id_table = si470x_i2c_id,
};
module_i2c_driver(si470x_i2c_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_VERSION(DRIVER_VERSION);
| gpl-2.0 |
stefanstrogin/linux | drivers/watchdog/intel_scu_watchdog.c | 2070 | 15396 | /*
* Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
* for Intel part #(s):
* - AF82MP20 PCH
*
* Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General
* Public License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the Free
* Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* The full GNU General Public License is included in this
* distribution in the file called COPYING.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/sfi.h>
#include <asm/irq.h>
#include <linux/atomic.h>
#include <asm/intel_scu_ipc.h>
#include <asm/apb_timer.h>
#include <asm/intel-mid.h>
#include "intel_scu_watchdog.h"
/* Bounds number of times we will retry loading time count */
/* This retry is a work around for a silicon bug. */
#define MAX_RETRY 16
#define IPC_SET_WATCHDOG_TIMER 0xF8
static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
module_param(timer_margin, int, 0);
MODULE_PARM_DESC(timer_margin,
"Watchdog timer margin"
"Time between interrupt and resetting the system"
"The range is from 1 to 160"
"This is the time for all keep alives to arrive");
static int timer_set = DEFAULT_TIME;
module_param(timer_set, int, 0);
MODULE_PARM_DESC(timer_set,
"Default Watchdog timer setting"
"Complete cycle time"
"The range is from 1 to 170"
"This is the time for all keep alives to arrive");
/* After watchdog device is closed, check force_boot. If:
* force_boot == 0, then force boot on next watchdog interrupt after close,
* force_boot == 1, then force boot immediately when device is closed.
*/
static int force_boot;
module_param(force_boot, int, 0);
MODULE_PARM_DESC(force_boot,
"A value of 1 means that the driver will reboot"
"the system immediately if the /dev/watchdog device is closed"
"A value of 0 means that when /dev/watchdog device is closed"
"the watchdog timer will be refreshed for one more interval"
"of length: timer_set. At the end of this interval, the"
"watchdog timer will reset the system."
);
/* there is only one device in the system now; this can be made into
* an array in the future if we have more than one device */
static struct intel_scu_watchdog_dev watchdog_device;
/* Forces restart, if force_reboot is set */
static void watchdog_fire(void)
{
if (force_boot) {
pr_crit("Initiating system reboot\n");
emergency_restart();
pr_crit("Reboot didn't ?????\n");
}
else {
pr_crit("Immediate Reboot Disabled\n");
pr_crit("System will reset when watchdog timer times out!\n");
}
}
static int check_timer_margin(int new_margin)
{
if ((new_margin < MIN_TIME_CYCLE) ||
(new_margin > MAX_TIME - timer_set)) {
pr_debug("value of new_margin %d is out of the range %d to %d\n",
new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
return -EINVAL;
}
return 0;
}
/*
* IPC operations
*/
static int watchdog_set_ipc(int soft_threshold, int threshold)
{
u32 *ipc_wbuf;
u8 cbuf[16] = { '\0' };
int ipc_ret = 0;
ipc_wbuf = (u32 *)&cbuf;
ipc_wbuf[0] = soft_threshold;
ipc_wbuf[1] = threshold;
ipc_ret = intel_scu_ipc_command(
IPC_SET_WATCHDOG_TIMER,
0,
ipc_wbuf,
2,
NULL,
0);
if (ipc_ret != 0)
pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
return ipc_ret;
};
/*
* Intel_SCU operations
*/
/* timer interrupt handler */
static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
{
int int_status;
int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
pr_debug("irq, int_status: %x\n", int_status);
if (int_status != 0)
return IRQ_NONE;
/* has the timer been started? If not, then this is spurious */
if (watchdog_device.timer_started == 0) {
pr_debug("spurious interrupt received\n");
return IRQ_HANDLED;
}
/* temporarily disable the timer */
iowrite32(0x00000002, watchdog_device.timer_control_addr);
/* set the timer to the threshold */
iowrite32(watchdog_device.threshold,
watchdog_device.timer_load_count_addr);
/* allow the timer to run */
iowrite32(0x00000003, watchdog_device.timer_control_addr);
return IRQ_HANDLED;
}
static int intel_scu_keepalive(void)
{
/* read eoi register - clears interrupt */
ioread32(watchdog_device.timer_clear_interrupt_addr);
/* temporarily disable the timer */
iowrite32(0x00000002, watchdog_device.timer_control_addr);
/* set the timer to the soft_threshold */
iowrite32(watchdog_device.soft_threshold,
watchdog_device.timer_load_count_addr);
/* allow the timer to run */
iowrite32(0x00000003, watchdog_device.timer_control_addr);
return 0;
}
static int intel_scu_stop(void)
{
iowrite32(0, watchdog_device.timer_control_addr);
return 0;
}
static int intel_scu_set_heartbeat(u32 t)
{
int ipc_ret;
int retry_count;
u32 soft_value;
u32 hw_value;
watchdog_device.timer_set = t;
watchdog_device.threshold =
timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
watchdog_device.soft_threshold =
(watchdog_device.timer_set - timer_margin)
* watchdog_device.timer_tbl_ptr->freq_hz;
pr_debug("set_heartbeat: timer freq is %d\n",
watchdog_device.timer_tbl_ptr->freq_hz);
pr_debug("set_heartbeat: timer_set is %x (hex)\n",
watchdog_device.timer_set);
pr_debug("set_hearbeat: timer_margin is %x (hex)\n", timer_margin);
pr_debug("set_heartbeat: threshold is %x (hex)\n",
watchdog_device.threshold);
pr_debug("set_heartbeat: soft_threshold is %x (hex)\n",
watchdog_device.soft_threshold);
/* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
/* watchdog timing come out right. */
watchdog_device.threshold =
watchdog_device.threshold / FREQ_ADJUSTMENT;
watchdog_device.soft_threshold =
watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
/* temporarily disable the timer */
iowrite32(0x00000002, watchdog_device.timer_control_addr);
/* send the threshold and soft_threshold via IPC to the processor */
ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
watchdog_device.threshold);
if (ipc_ret != 0) {
/* Make sure the watchdog timer is stopped */
intel_scu_stop();
return ipc_ret;
}
/* Soft Threshold set loop. Early versions of silicon did */
/* not always set this count correctly. This loop checks */
/* the value and retries if it was not set correctly. */
retry_count = 0;
soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
do {
/* Make sure timer is stopped */
intel_scu_stop();
if (MAX_RETRY < retry_count++) {
/* Unable to set timer value */
pr_err("Unable to set timer\n");
return -ENODEV;
}
/* set the timer to the soft threshold */
iowrite32(watchdog_device.soft_threshold,
watchdog_device.timer_load_count_addr);
/* read count value before starting timer */
ioread32(watchdog_device.timer_load_count_addr);
/* Start the timer */
iowrite32(0x00000003, watchdog_device.timer_control_addr);
/* read the value the time loaded into its count reg */
hw_value = ioread32(watchdog_device.timer_load_count_addr);
hw_value = hw_value & 0xFFFF0000;
} while (soft_value != hw_value);
watchdog_device.timer_started = 1;
return 0;
}
/*
* /dev/watchdog handling
*/
static int intel_scu_open(struct inode *inode, struct file *file)
{
/* Set flag to indicate that watchdog device is open */
if (test_and_set_bit(0, &watchdog_device.driver_open))
return -EBUSY;
/* Check for reopen of driver. Reopens are not allowed */
if (watchdog_device.driver_closed)
return -EPERM;
return nonseekable_open(inode, file);
}
static int intel_scu_release(struct inode *inode, struct file *file)
{
/*
* This watchdog should not be closed, after the timer
* is started with the WDIPC_SETTIMEOUT ioctl
* If force_boot is set watchdog_fire() will cause an
* immediate reset. If force_boot is not set, the watchdog
* timer is refreshed for one more interval. At the end
* of that interval, the watchdog timer will reset the system.
*/
if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
pr_debug("intel_scu_release, without open\n");
return -ENOTTY;
}
if (!watchdog_device.timer_started) {
/* Just close, since timer has not been started */
pr_debug("closed, without starting timer\n");
return 0;
}
pr_crit("Unexpected close of /dev/watchdog!\n");
/* Since the timer was started, prevent future reopens */
watchdog_device.driver_closed = 1;
/* Refresh the timer for one more interval */
intel_scu_keepalive();
/* Reboot system (if force_boot is set) */
watchdog_fire();
/* We should only reach this point if force_boot is not set */
return 0;
}
static ssize_t intel_scu_write(struct file *file,
char const *data,
size_t len,
loff_t *ppos)
{
if (watchdog_device.timer_started)
/* Watchdog already started, keep it alive */
intel_scu_keepalive();
else
/* Start watchdog with timer value set by init */
intel_scu_set_heartbeat(watchdog_device.timer_set);
return len;
}
static long intel_scu_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
u32 __user *p = argp;
u32 new_margin;
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT
| WDIOF_KEEPALIVEPING,
.firmware_version = 0, /* @todo Get from SCU via
ipc_get_scu_fw_version()? */
.identity = "Intel_SCU IOH Watchdog" /* len < 32 */
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp,
&ident,
sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
intel_scu_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, p))
return -EFAULT;
if (check_timer_margin(new_margin))
return -EINVAL;
if (intel_scu_set_heartbeat(new_margin))
return -EINVAL;
return 0;
case WDIOC_GETTIMEOUT:
return put_user(watchdog_device.soft_threshold, p);
default:
return -ENOTTY;
}
}
/*
* Notifier for system down
*/
static int intel_scu_notify_sys(struct notifier_block *this,
unsigned long code,
void *another_unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
/* Turn off the watchdog timer. */
intel_scu_stop();
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static const struct file_operations intel_scu_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = intel_scu_write,
.unlocked_ioctl = intel_scu_ioctl,
.open = intel_scu_open,
.release = intel_scu_release,
};
static int __init intel_scu_watchdog_init(void)
{
int ret;
u32 __iomem *tmp_addr;
/*
* We don't really need to check this as the SFI timer get will fail
* but if we do so we can exit with a clearer reason and no noise.
*
* If it isn't an intel MID device then it doesn't have this watchdog
*/
if (!intel_mid_identify_cpu())
return -ENODEV;
/* Check boot parameters to verify that their initial values */
/* are in range. */
/* Check value of timer_set boot parameter */
if ((timer_set < MIN_TIME_CYCLE) ||
(timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
pr_err("value of timer_set %x (hex) is out of range from %x to %x (hex)\n",
timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
return -EINVAL;
}
/* Check value of timer_margin boot parameter */
if (check_timer_margin(timer_margin))
return -EINVAL;
watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
if (watchdog_device.timer_tbl_ptr == NULL) {
pr_debug("timer is not available\n");
return -ENODEV;
}
/* make sure the timer exists */
if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
pr_debug("timer %d does not have valid physical memory\n",
sfi_mtimer_num);
return -ENODEV;
}
if (watchdog_device.timer_tbl_ptr->irq == 0) {
pr_debug("timer %d invalid irq\n", sfi_mtimer_num);
return -ENODEV;
}
tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
20);
if (tmp_addr == NULL) {
pr_debug("timer unable to ioremap\n");
return -ENOMEM;
}
watchdog_device.timer_load_count_addr = tmp_addr++;
watchdog_device.timer_current_value_addr = tmp_addr++;
watchdog_device.timer_control_addr = tmp_addr++;
watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
watchdog_device.timer_interrupt_status_addr = tmp_addr++;
/* Set the default time values in device structure */
watchdog_device.timer_set = timer_set;
watchdog_device.threshold =
timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
watchdog_device.soft_threshold =
(watchdog_device.timer_set - timer_margin)
* watchdog_device.timer_tbl_ptr->freq_hz;
watchdog_device.intel_scu_notifier.notifier_call =
intel_scu_notify_sys;
ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
if (ret) {
pr_err("cannot register notifier %d)\n", ret);
goto register_reboot_error;
}
watchdog_device.miscdev.minor = WATCHDOG_MINOR;
watchdog_device.miscdev.name = "watchdog";
watchdog_device.miscdev.fops = &intel_scu_fops;
ret = misc_register(&watchdog_device.miscdev);
if (ret) {
pr_err("cannot register miscdev %d err =%d\n",
WATCHDOG_MINOR, ret);
goto misc_register_error;
}
ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
watchdog_timer_interrupt,
IRQF_SHARED, "watchdog",
&watchdog_device.timer_load_count_addr);
if (ret) {
pr_err("error requesting irq %d\n", ret);
goto request_irq_error;
}
/* Make sure timer is disabled before returning */
intel_scu_stop();
return 0;
/* error cleanup */
request_irq_error:
misc_deregister(&watchdog_device.miscdev);
misc_register_error:
unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
register_reboot_error:
intel_scu_stop();
iounmap(watchdog_device.timer_load_count_addr);
return ret;
}
static void __exit intel_scu_watchdog_exit(void)
{
misc_deregister(&watchdog_device.miscdev);
unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
/* disable the timer */
iowrite32(0x00000002, watchdog_device.timer_control_addr);
iounmap(watchdog_device.timer_load_count_addr);
}
late_initcall(intel_scu_watchdog_init);
module_exit(intel_scu_watchdog_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(WDT_VER);
| gpl-2.0 |
Droid-Concepts/DEMENTEDConcepts_kernel_n8013 | kernel/rcupdate.c | 2326 | 8805 | /*
* Read-Copy Update mechanism for mutual exclusion
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2001
*
* Authors: Dipankar Sarma <dipankar@in.ibm.com>
* Manfred Spraul <manfred@colorfullife.com>
*
* Based on the original work by Paul McKenney <paulmck@us.ibm.com>
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
* Papers:
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
*
* For detailed explanation of Read-Copy Update mechanism see -
* http://lse.sourceforge.net/locking/rcupdate.html
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
struct lockdep_map rcu_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
EXPORT_SYMBOL_GPL(rcu_lock_map);
static struct lock_class_key rcu_bh_lock_key;
struct lockdep_map rcu_bh_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
static struct lock_class_key rcu_sched_lock_key;
struct lockdep_map rcu_sched_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
int debug_lockdep_rcu_enabled(void)
{
return rcu_scheduler_active && debug_locks &&
current->lockdep_recursion == 0;
}
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
* Check for bottom half being disabled, which covers both the
* CONFIG_PROVE_RCU and not cases. Note that if someone uses
* rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
* will show the situation. This is useful for debug checks in functions
* that require that they be called within an RCU read-side critical
* section.
*
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
*/
int rcu_read_lock_bh_held(void)
{
if (!debug_lockdep_rcu_enabled())
return 1;
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/*
* Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed.
*/
void wakeme_after_rcu(struct rcu_head *head)
{
struct rcu_synchronize *rcu;
rcu = container_of(head, struct rcu_synchronize, head);
complete(&rcu->completion);
}
#ifdef CONFIG_PROVE_RCU
/*
* wrapper function to avoid #include problems.
*/
int rcu_my_thread_group_empty(void)
{
return thread_group_empty(current);
}
EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
#endif /* #ifdef CONFIG_PROVE_RCU */
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
static inline void debug_init_rcu_head(struct rcu_head *head)
{
debug_object_init(head, &rcuhead_debug_descr);
}
static inline void debug_rcu_head_free(struct rcu_head *head)
{
debug_object_free(head, &rcuhead_debug_descr);
}
/*
* fixup_init is called when:
* - an active object is initialized
*/
static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
{
struct rcu_head *head = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
/*
* Ensure that queued callbacks are all executed.
* If we detect that we are nested in a RCU read-side critical
* section, we should simply fail, otherwise we would deadlock.
* In !PREEMPT configurations, there is no way to tell if we are
* in a RCU read-side critical section or not, so we never
* attempt any fixup and just print a warning.
*/
#ifndef CONFIG_PREEMPT
WARN_ON_ONCE(1);
return 0;
#endif
if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
irqs_disabled()) {
WARN_ON_ONCE(1);
return 0;
}
rcu_barrier();
rcu_barrier_sched();
rcu_barrier_bh();
debug_object_init(head, &rcuhead_debug_descr);
return 1;
default:
return 0;
}
}
/*
* fixup_activate is called when:
* - an active object is activated
* - an unknown object is activated (might be a statically initialized object)
* Activation is performed internally by call_rcu().
*/
static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
{
struct rcu_head *head = addr;
switch (state) {
case ODEBUG_STATE_NOTAVAILABLE:
/*
* This is not really a fixup. We just make sure that it is
* tracked in the object tracker.
*/
debug_object_init(head, &rcuhead_debug_descr);
debug_object_activate(head, &rcuhead_debug_descr);
return 0;
case ODEBUG_STATE_ACTIVE:
/*
* Ensure that queued callbacks are all executed.
* If we detect that we are nested in a RCU read-side critical
* section, we should simply fail, otherwise we would deadlock.
* In !PREEMPT configurations, there is no way to tell if we are
* in a RCU read-side critical section or not, so we never
* attempt any fixup and just print a warning.
*/
#ifndef CONFIG_PREEMPT
WARN_ON_ONCE(1);
return 0;
#endif
if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
irqs_disabled()) {
WARN_ON_ONCE(1);
return 0;
}
rcu_barrier();
rcu_barrier_sched();
rcu_barrier_bh();
debug_object_activate(head, &rcuhead_debug_descr);
return 1;
default:
return 0;
}
}
/*
* fixup_free is called when:
* - an active object is freed
*/
static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
{
struct rcu_head *head = addr;
switch (state) {
case ODEBUG_STATE_ACTIVE:
/*
* Ensure that queued callbacks are all executed.
* If we detect that we are nested in a RCU read-side critical
* section, we should simply fail, otherwise we would deadlock.
* In !PREEMPT configurations, there is no way to tell if we are
* in a RCU read-side critical section or not, so we never
* attempt any fixup and just print a warning.
*/
#ifndef CONFIG_PREEMPT
WARN_ON_ONCE(1);
return 0;
#endif
if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
irqs_disabled()) {
WARN_ON_ONCE(1);
return 0;
}
rcu_barrier();
rcu_barrier_sched();
rcu_barrier_bh();
debug_object_free(head, &rcuhead_debug_descr);
return 1;
default:
return 0;
}
}
/**
* init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
* @head: pointer to rcu_head structure to be initialized
*
* This function informs debugobjects of a new rcu_head structure that
* has been allocated as an auto variable on the stack. This function
* is not required for rcu_head structures that are statically defined or
* that are dynamically allocated on the heap. This function has no
* effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
*/
void init_rcu_head_on_stack(struct rcu_head *head)
{
debug_object_init_on_stack(head, &rcuhead_debug_descr);
}
EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
/**
* destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
* @head: pointer to rcu_head structure to be initialized
*
* This function informs debugobjects that an on-stack rcu_head structure
* is about to go out of scope. As with init_rcu_head_on_stack(), this
* function is not required for rcu_head structures that are statically
* defined or that are dynamically allocated on the heap. Also as with
* init_rcu_head_on_stack(), this function has no effect for
* !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
*/
void destroy_rcu_head_on_stack(struct rcu_head *head)
{
debug_object_free(head, &rcuhead_debug_descr);
}
EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
struct debug_obj_descr rcuhead_debug_descr = {
.name = "rcu_head",
.fixup_init = rcuhead_fixup_init,
.fixup_activate = rcuhead_fixup_activate,
.fixup_free = rcuhead_fixup_free,
};
EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
| gpl-2.0 |
maniacx/android_kernel_htcleo-3.0_older | arch/sparc/kernel/time_32.c | 2838 | 5623 | /* linux/arch/sparc/kernel/time.c
*
* Copyright (C) 1995 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
*
* Chris Davis (cdavis@cois.on.ca) 03/27/1998
* Added support for the intersil on the sun4/4200
*
* Gleb Raiko (rajko@mech.math.msu.su) 08/18/1998
* Support for MicroSPARC-IIep, PCI CPU.
*
* This file handles the Sparc specific time handling details.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/rtc.h>
#include <linux/rtc/m48t59.h>
#include <linux/timex.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/profile.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <asm/oplib.h>
#include <asm/timex.h>
#include <asm/timer.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/idprom.h>
#include <asm/machines.h>
#include <asm/page.h>
#include <asm/pcic.h>
#include <asm/irq_regs.h>
#include "irq.h"
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
static int set_rtc_mmss(unsigned long);
unsigned long profile_pc(struct pt_regs *regs)
{
extern char __copy_user_begin[], __copy_user_end[];
extern char __atomic_begin[], __atomic_end[];
extern char __bzero_begin[], __bzero_end[];
unsigned long pc = regs->pc;
if (in_lock_functions(pc) ||
(pc >= (unsigned long) __copy_user_begin &&
pc < (unsigned long) __copy_user_end) ||
(pc >= (unsigned long) __atomic_begin &&
pc < (unsigned long) __atomic_end) ||
(pc >= (unsigned long) __bzero_begin &&
pc < (unsigned long) __bzero_end))
pc = regs->u_regs[UREG_RETPC];
return pc;
}
EXPORT_SYMBOL(profile_pc);
__volatile__ unsigned int *master_l10_counter;
u32 (*do_arch_gettimeoffset)(void);
int update_persistent_clock(struct timespec now)
{
return set_rtc_mmss(now.tv_sec);
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "xtime_update()" routine every clocktick
*/
#define TICK_SIZE (tick_nsec / 1000)
static irqreturn_t timer_interrupt(int dummy, void *dev_id)
{
#ifndef CONFIG_SMP
profile_tick(CPU_PROFILING);
#endif
clear_clock_irq();
xtime_update(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
return IRQ_HANDLED;
}
static unsigned char mostek_read_byte(struct device *dev, u32 ofs)
{
struct platform_device *pdev = to_platform_device(dev);
struct m48t59_plat_data *pdata = pdev->dev.platform_data;
return readb(pdata->ioaddr + ofs);
}
static void mostek_write_byte(struct device *dev, u32 ofs, u8 val)
{
struct platform_device *pdev = to_platform_device(dev);
struct m48t59_plat_data *pdata = pdev->dev.platform_data;
writeb(val, pdata->ioaddr + ofs);
}
static struct m48t59_plat_data m48t59_data = {
.read_byte = mostek_read_byte,
.write_byte = mostek_write_byte,
};
/* resource is set at runtime */
static struct platform_device m48t59_rtc = {
.name = "rtc-m48t59",
.id = 0,
.num_resources = 1,
.dev = {
.platform_data = &m48t59_data,
},
};
static int __devinit clock_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
if (!model)
return -ENODEV;
/* Only the primary RTC has an address property */
if (!of_find_property(dp, "address", NULL))
return -ENODEV;
m48t59_rtc.resource = &op->resource[0];
if (!strcmp(model, "mk48t02")) {
/* Map the clock register io area read-only */
m48t59_data.ioaddr = of_ioremap(&op->resource[0], 0,
2048, "rtc-m48t59");
m48t59_data.type = M48T59RTC_TYPE_M48T02;
} else if (!strcmp(model, "mk48t08")) {
m48t59_data.ioaddr = of_ioremap(&op->resource[0], 0,
8192, "rtc-m48t59");
m48t59_data.type = M48T59RTC_TYPE_M48T08;
} else
return -ENODEV;
if (platform_device_register(&m48t59_rtc) < 0)
printk(KERN_ERR "Registering RTC device failed\n");
return 0;
}
static struct of_device_id clock_match[] = {
{
.name = "eeprom",
},
{},
};
static struct platform_driver clock_driver = {
.probe = clock_probe,
.driver = {
.name = "rtc",
.owner = THIS_MODULE,
.of_match_table = clock_match,
},
};
/* Probe for the mostek real time clock chip. */
static int __init clock_init(void)
{
return platform_driver_register(&clock_driver);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the RTC driver
* need to see the clock registers.
*/
fs_initcall(clock_init);
u32 sbus_do_gettimeoffset(void)
{
unsigned long val = *master_l10_counter;
unsigned long usec = (val >> 10) & 0x1fffff;
/* Limit hit? */
if (val & 0x80000000)
usec += 1000000 / HZ;
return usec * 1000;
}
u32 arch_gettimeoffset(void)
{
if (unlikely(!do_arch_gettimeoffset))
return 0;
return do_arch_gettimeoffset();
}
static void __init sbus_time_init(void)
{
do_arch_gettimeoffset = sbus_do_gettimeoffset;
btfixup();
sparc_irq_config.init_timers(timer_interrupt);
}
void __init time_init(void)
{
if (pcic_present())
pci_time_init();
else
sbus_time_init();
}
static int set_rtc_mmss(unsigned long secs)
{
struct rtc_device *rtc = rtc_class_open("rtc0");
int err = -1;
if (rtc) {
err = rtc_set_mmss(rtc, secs);
rtc_class_close(rtc);
}
return err;
}
| gpl-2.0 |
MoKee/android_kernel_motorola_msm8960-common | drivers/media/rc/keymaps/rc-total-media-in-hand.c | 3094 | 2883 | /*
* Total Media In Hand remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <media/rc-map.h>
/* Uses NEC extended 0x02bd */
static struct rc_map_table total_media_in_hand[] = {
{ 0x02bd00, KEY_1 },
{ 0x02bd01, KEY_2 },
{ 0x02bd02, KEY_3 },
{ 0x02bd03, KEY_4 },
{ 0x02bd04, KEY_5 },
{ 0x02bd05, KEY_6 },
{ 0x02bd06, KEY_7 },
{ 0x02bd07, KEY_8 },
{ 0x02bd08, KEY_9 },
{ 0x02bd09, KEY_0 },
{ 0x02bd0a, KEY_MUTE },
{ 0x02bd0b, KEY_CYCLEWINDOWS }, /* yellow, [min / max] */
{ 0x02bd0c, KEY_VIDEO }, /* TV / AV */
{ 0x02bd0e, KEY_VOLUMEDOWN },
{ 0x02bd0f, KEY_TIME }, /* TimeShift */
{ 0x02bd10, KEY_RIGHT }, /* right arrow */
{ 0x02bd11, KEY_LEFT }, /* left arrow */
{ 0x02bd12, KEY_UP }, /* up arrow */
{ 0x02bd13, KEY_DOWN }, /* down arrow */
{ 0x02bd14, KEY_POWER2 }, /* [red] */
{ 0x02bd15, KEY_OK }, /* OK */
{ 0x02bd16, KEY_STOP },
{ 0x02bd17, KEY_CAMERA }, /* Snapshot */
{ 0x02bd18, KEY_CHANNELUP },
{ 0x02bd19, KEY_RECORD },
{ 0x02bd1a, KEY_CHANNELDOWN },
{ 0x02bd1c, KEY_ESC }, /* Esc */
{ 0x02bd1e, KEY_PLAY },
{ 0x02bd1f, KEY_VOLUMEUP },
{ 0x02bd40, KEY_PAUSE },
{ 0x02bd41, KEY_FASTFORWARD }, /* FF >> */
{ 0x02bd42, KEY_REWIND }, /* FR << */
{ 0x02bd43, KEY_ZOOM }, /* [window + mouse pointer] */
{ 0x02bd44, KEY_SHUFFLE }, /* Shuffle */
{ 0x02bd45, KEY_INFO }, /* [red (I)] */
};
static struct rc_map_list total_media_in_hand_map = {
.map = {
.scan = total_media_in_hand,
.size = ARRAY_SIZE(total_media_in_hand),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_TOTAL_MEDIA_IN_HAND,
}
};
static int __init init_rc_map_total_media_in_hand(void)
{
return rc_map_register(&total_media_in_hand_map);
}
static void __exit exit_rc_map_total_media_in_hand(void)
{
rc_map_unregister(&total_media_in_hand_map);
}
module_init(init_rc_map_total_media_in_hand)
module_exit(exit_rc_map_total_media_in_hand)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
| gpl-2.0 |
Sunfong/sunfong-samsung-3.0 | arch/arm/mach-at91/at91rm9200_time.c | 3094 | 5733 | /*
* linux/arch/arm/mach-at91/at91rm9200_time.c
*
* Copyright (C) 2003 SAN People
* Copyright (C) 2003 ATMEL
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/clockchips.h>
#include <asm/mach/time.h>
#include <mach/at91_st.h>
static unsigned long last_crtr;
static u32 irqmask;
static struct clock_event_device clkevt;
/*
* The ST_CRTR is updated asynchronously to the master clock ... but
* the updates as seen by the CPU don't seem to be strictly monotonic.
* Waiting until we read the same value twice avoids glitching.
*/
static inline unsigned long read_CRTR(void)
{
unsigned long x1, x2;
x1 = at91_sys_read(AT91_ST_CRTR);
do {
x2 = at91_sys_read(AT91_ST_CRTR);
if (x1 == x2)
break;
x1 = x2;
} while (1);
return x1;
}
/*
* IRQ handler for the timer.
*/
static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
{
u32 sr = at91_sys_read(AT91_ST_SR) & irqmask;
/*
* irqs should be disabled here, but as the irq is shared they are only
* guaranteed to be off if the timer irq is registered first.
*/
WARN_ON_ONCE(!irqs_disabled());
/* simulate "oneshot" timer with alarm */
if (sr & AT91_ST_ALMS) {
clkevt.event_handler(&clkevt);
return IRQ_HANDLED;
}
/* periodic mode should handle delayed ticks */
if (sr & AT91_ST_PITS) {
u32 crtr = read_CRTR();
while (((crtr - last_crtr) & AT91_ST_CRTV) >= LATCH) {
last_crtr += LATCH;
clkevt.event_handler(&clkevt);
}
return IRQ_HANDLED;
}
/* this irq is shared ... */
return IRQ_NONE;
}
static struct irqaction at91rm9200_timer_irq = {
.name = "at91_tick",
.flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = at91rm9200_timer_interrupt
};
static cycle_t read_clk32k(struct clocksource *cs)
{
return read_CRTR();
}
static struct clocksource clk32k = {
.name = "32k_counter",
.rating = 150,
.read = read_clk32k,
.mask = CLOCKSOURCE_MASK(20),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void
clkevt32k_mode(enum clock_event_mode mode, struct clock_event_device *dev)
{
/* Disable and flush pending timer interrupts */
at91_sys_write(AT91_ST_IDR, AT91_ST_PITS | AT91_ST_ALMS);
(void) at91_sys_read(AT91_ST_SR);
last_crtr = read_CRTR();
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* PIT for periodic irqs; fixed rate of 1/HZ */
irqmask = AT91_ST_PITS;
at91_sys_write(AT91_ST_PIMR, LATCH);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* ALM for oneshot irqs, set by next_event()
* before 32 seconds have passed
*/
irqmask = AT91_ST_ALMS;
at91_sys_write(AT91_ST_RTAR, last_crtr);
break;
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
}
at91_sys_write(AT91_ST_IER, irqmask);
}
static int
clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
{
u32 alm;
int status = 0;
BUG_ON(delta < 2);
/* The alarm IRQ uses absolute time (now+delta), not the relative
* time (delta) in our calling convention. Like all clockevents
* using such "match" hardware, we have a race to defend against.
*
* Our defense here is to have set up the clockevent device so the
* delta is at least two. That way we never end up writing RTAR
* with the value then held in CRTR ... which would mean the match
* wouldn't trigger until 32 seconds later, after CRTR wraps.
*/
alm = read_CRTR();
/* Cancel any pending alarm; flush any pending IRQ */
at91_sys_write(AT91_ST_RTAR, alm);
(void) at91_sys_read(AT91_ST_SR);
/* Schedule alarm by writing RTAR. */
alm += delta;
at91_sys_write(AT91_ST_RTAR, alm);
return status;
}
static struct clock_event_device clkevt = {
.name = "at91_tick",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
.rating = 150,
.set_next_event = clkevt32k_next_event,
.set_mode = clkevt32k_mode,
};
/*
* ST (system timer) module supports both clockevents and clocksource.
*/
void __init at91rm9200_timer_init(void)
{
/* Disable all timer interrupts, and clear any pending ones */
at91_sys_write(AT91_ST_IDR,
AT91_ST_PITS | AT91_ST_WDOVF | AT91_ST_RTTINC | AT91_ST_ALMS);
(void) at91_sys_read(AT91_ST_SR);
/* Make IRQs happen for the system timer */
setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
* directly for the clocksource and all clockevents, after adjusting
* its prescaler from the 1 Hz default.
*/
at91_sys_write(AT91_ST_RTMR, 1);
/* Setup timer clockevent, with minimum of two ticks (important!!) */
clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
clkevt.cpumask = cpumask_of(0);
clockevents_register_device(&clkevt);
/* register clocksource */
clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
}
struct sys_timer at91rm9200_timer = {
.init = at91rm9200_timer_init,
};
| gpl-2.0 |
bestmjh47/ActiveKernel_M250S-JB | drivers/telephony/phonedev.c | 3350 | 3548 | /*
* Telephony registration for Linux
*
* (c) Copyright 1999 Red Hat Software Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* Fixes: Mar 01 2000 Thomas Sparr, <thomas.l.sparr@telia.com>
* phone_register_device now works with unit!=PHONE_UNIT_ANY
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/phonedev.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/kmod.h>
#include <linux/sem.h>
#include <linux/mutex.h>
#define PHONE_NUM_DEVICES 256
/*
* Active devices
*/
static struct phone_device *phone_device[PHONE_NUM_DEVICES];
static DEFINE_MUTEX(phone_lock);
/*
* Open a phone device.
*/
static int phone_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
int err = 0;
struct phone_device *p;
const struct file_operations *old_fops, *new_fops = NULL;
if (minor >= PHONE_NUM_DEVICES)
return -ENODEV;
mutex_lock(&phone_lock);
p = phone_device[minor];
if (p)
new_fops = fops_get(p->f_op);
if (!new_fops) {
mutex_unlock(&phone_lock);
request_module("char-major-%d-%d", PHONE_MAJOR, minor);
mutex_lock(&phone_lock);
p = phone_device[minor];
if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL)
{
err=-ENODEV;
goto end;
}
}
old_fops = file->f_op;
file->f_op = new_fops;
if (p->open)
err = p->open(p, file); /* Tell the device it is open */
if (err) {
fops_put(file->f_op);
file->f_op = fops_get(old_fops);
}
fops_put(old_fops);
end:
mutex_unlock(&phone_lock);
return err;
}
/*
* Telephony For Linux device drivers request registration here.
*/
int phone_register_device(struct phone_device *p, int unit)
{
int base;
int end;
int i;
base = 0;
end = PHONE_NUM_DEVICES - 1;
if (unit != PHONE_UNIT_ANY) {
base = unit;
end = unit + 1; /* enter the loop at least one time */
}
mutex_lock(&phone_lock);
for (i = base; i < end; i++) {
if (phone_device[i] == NULL) {
phone_device[i] = p;
p->minor = i;
mutex_unlock(&phone_lock);
return 0;
}
}
mutex_unlock(&phone_lock);
return -ENFILE;
}
/*
* Unregister an unused Telephony for linux device
*/
void phone_unregister_device(struct phone_device *pfd)
{
mutex_lock(&phone_lock);
if (likely(phone_device[pfd->minor] == pfd))
phone_device[pfd->minor] = NULL;
mutex_unlock(&phone_lock);
}
static const struct file_operations phone_fops =
{
.owner = THIS_MODULE,
.open = phone_open,
.llseek = noop_llseek,
};
/*
* Board init functions
*/
/*
* Initialise Telephony for linux
*/
static int __init telephony_init(void)
{
printk(KERN_INFO "Linux telephony interface: v1.00\n");
if (register_chrdev(PHONE_MAJOR, "telephony", &phone_fops)) {
printk("phonedev: unable to get major %d\n", PHONE_MAJOR);
return -EIO;
}
return 0;
}
static void __exit telephony_exit(void)
{
unregister_chrdev(PHONE_MAJOR, "telephony");
}
module_init(telephony_init);
module_exit(telephony_exit);
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(phone_register_device);
EXPORT_SYMBOL(phone_unregister_device);
| gpl-2.0 |
MyAOSP/kernel_samsung_manta | arch/x86/kernel/quirks.c | 4118 | 15380 | /*
* This file contains work-arounds for x86 and x86_64 platform bugs.
*/
#include <linux/pci.h>
#include <linux/irq.h>
#include <asm/hpet.h>
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
{
u8 config;
u16 word;
/* BIOS may enable hardware IRQ balancing for
* E7520/E7320/E7525(revision ID 0x9 and below)
* based platforms.
* Disable SW irqbalance/affinity on those platforms.
*/
if (dev->revision > 0x9)
return;
/* enable access to config space*/
pci_read_config_byte(dev, 0xf4, &config);
pci_write_config_byte(dev, 0xf4, config|0x2);
/*
* read xTPR register. We may not have a pci_dev for device 8
* because it might be hidden until the above write.
*/
pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
if (!(word & (1 << 13))) {
dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
"disabling irq balancing and affinity\n");
noirqdebug_setup("");
#ifdef CONFIG_PROC_FS
no_irq_affinity = 1;
#endif
}
/* put back the original value for config space*/
if (!(config & 0x2))
pci_write_config_byte(dev, 0xf4, config);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
quirk_intel_irqbalance);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
quirk_intel_irqbalance);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
quirk_intel_irqbalance);
#endif
#if defined(CONFIG_HPET_TIMER)
unsigned long force_hpet_address;
static enum {
NONE_FORCE_HPET_RESUME,
OLD_ICH_FORCE_HPET_RESUME,
ICH_FORCE_HPET_RESUME,
VT8237_FORCE_HPET_RESUME,
NVIDIA_FORCE_HPET_RESUME,
ATI_FORCE_HPET_RESUME,
} force_hpet_resume_type;
static void __iomem *rcba_base;
static void ich_force_hpet_resume(void)
{
u32 val;
if (!force_hpet_address)
return;
BUG_ON(rcba_base == NULL);
/* read the Function Disable register, dword mode only */
val = readl(rcba_base + 0x3404);
if (!(val & 0x80)) {
/* HPET disabled in HPTC. Trying to enable */
writel(val | 0x80, rcba_base + 0x3404);
}
val = readl(rcba_base + 0x3404);
if (!(val & 0x80))
BUG();
else
printk(KERN_DEBUG "Force enabled HPET at resume\n");
return;
}
static void ich_force_enable_hpet(struct pci_dev *dev)
{
u32 val;
u32 uninitialized_var(rcba);
int err = 0;
if (hpet_address || force_hpet_address)
return;
pci_read_config_dword(dev, 0xF0, &rcba);
rcba &= 0xFFFFC000;
if (rcba == 0) {
dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
"cannot force enable HPET\n");
return;
}
/* use bits 31:14, 16 kB aligned */
rcba_base = ioremap_nocache(rcba, 0x4000);
if (rcba_base == NULL) {
dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
"cannot force enable HPET\n");
return;
}
/* read the Function Disable register, dword mode only */
val = readl(rcba_base + 0x3404);
if (val & 0x80) {
/* HPET is enabled in HPTC. Just not reported by BIOS */
val = val & 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
"0x%lx\n", force_hpet_address);
iounmap(rcba_base);
return;
}
/* HPET disabled in HPTC. Trying to enable */
writel(val | 0x80, rcba_base + 0x3404);
val = readl(rcba_base + 0x3404);
if (!(val & 0x80)) {
err = 1;
} else {
val = val & 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
}
if (err) {
force_hpet_address = 0;
iounmap(rcba_base);
dev_printk(KERN_DEBUG, &dev->dev,
"Failed to force enable HPET\n");
} else {
force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
"0x%lx\n", force_hpet_address);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */
ich_force_enable_hpet);
static struct pci_dev *cached_dev;
static void hpet_print_force_info(void)
{
printk(KERN_INFO "HPET not enabled in BIOS. "
"You might try hpet=force boot option\n");
}
static void old_ich_force_hpet_resume(void)
{
u32 val;
u32 uninitialized_var(gen_cntl);
if (!force_hpet_address || !cached_dev)
return;
pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
gen_cntl &= (~(0x7 << 15));
gen_cntl |= (0x4 << 15);
pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
val = gen_cntl >> 15;
val &= 0x7;
if (val == 0x4)
printk(KERN_DEBUG "Force enabled HPET at resume\n");
else
BUG();
}
static void old_ich_force_enable_hpet(struct pci_dev *dev)
{
u32 val;
u32 uninitialized_var(gen_cntl);
if (hpet_address || force_hpet_address)
return;
pci_read_config_dword(dev, 0xD0, &gen_cntl);
/*
* Bit 17 is HPET enable bit.
* Bit 16:15 control the HPET base address.
*/
val = gen_cntl >> 15;
val &= 0x7;
if (val & 0x4) {
val &= 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
force_hpet_address);
return;
}
/*
* HPET is disabled. Trying enabling at FED00000 and check
* whether it sticks
*/
gen_cntl &= (~(0x7 << 15));
gen_cntl |= (0x4 << 15);
pci_write_config_dword(dev, 0xD0, gen_cntl);
pci_read_config_dword(dev, 0xD0, &gen_cntl);
val = gen_cntl >> 15;
val &= 0x7;
if (val & 0x4) {
/* HPET is enabled in HPTC. Just not reported by BIOS */
val &= 0x3;
force_hpet_address = 0xFED00000 | (val << 12);
dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
"0x%lx\n", force_hpet_address);
cached_dev = dev;
force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
return;
}
dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
}
/*
* Undocumented chipset features. Make sure that the user enforced
* this.
*/
static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
{
if (hpet_force_user)
old_ich_force_enable_hpet(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
old_ich_force_enable_hpet_user);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
old_ich_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
old_ich_force_enable_hpet);
static void vt8237_force_hpet_resume(void)
{
u32 val;
if (!force_hpet_address || !cached_dev)
return;
val = 0xfed00000 | 0x80;
pci_write_config_dword(cached_dev, 0x68, val);
pci_read_config_dword(cached_dev, 0x68, &val);
if (val & 0x80)
printk(KERN_DEBUG "Force enabled HPET at resume\n");
else
BUG();
}
static void vt8237_force_enable_hpet(struct pci_dev *dev)
{
u32 uninitialized_var(val);
if (hpet_address || force_hpet_address)
return;
if (!hpet_force_user) {
hpet_print_force_info();
return;
}
pci_read_config_dword(dev, 0x68, &val);
/*
* Bit 7 is HPET enable bit.
* Bit 31:10 is HPET base address (contrary to what datasheet claims)
*/
if (val & 0x80) {
force_hpet_address = (val & ~0x3ff);
dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
force_hpet_address);
return;
}
/*
* HPET is disabled. Trying enabling at FED00000 and check
* whether it sticks
*/
val = 0xfed00000 | 0x80;
pci_write_config_dword(dev, 0x68, val);
pci_read_config_dword(dev, 0x68, &val);
if (val & 0x80) {
force_hpet_address = (val & ~0x3ff);
dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
"0x%lx\n", force_hpet_address);
cached_dev = dev;
force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
return;
}
dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
vt8237_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
vt8237_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
vt8237_force_enable_hpet);
static void ati_force_hpet_resume(void)
{
pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
printk(KERN_DEBUG "Force enabled HPET at resume\n");
}
static u32 ati_ixp4x0_rev(struct pci_dev *dev)
{
u32 d;
u8 b;
pci_read_config_byte(dev, 0xac, &b);
b &= ~(1<<5);
pci_write_config_byte(dev, 0xac, b);
pci_read_config_dword(dev, 0x70, &d);
d |= 1<<8;
pci_write_config_dword(dev, 0x70, d);
pci_read_config_dword(dev, 0x8, &d);
d &= 0xff;
dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
return d;
}
static void ati_force_enable_hpet(struct pci_dev *dev)
{
u32 d, val;
u8 b;
if (hpet_address || force_hpet_address)
return;
if (!hpet_force_user) {
hpet_print_force_info();
return;
}
d = ati_ixp4x0_rev(dev);
if (d < 0x82)
return;
/* base address */
pci_write_config_dword(dev, 0x14, 0xfed00000);
pci_read_config_dword(dev, 0x14, &val);
/* enable interrupt */
outb(0x72, 0xcd6); b = inb(0xcd7);
b |= 0x1;
outb(0x72, 0xcd6); outb(b, 0xcd7);
outb(0x72, 0xcd6); b = inb(0xcd7);
if (!(b & 0x1))
return;
pci_read_config_dword(dev, 0x64, &d);
d |= (1<<10);
pci_write_config_dword(dev, 0x64, d);
pci_read_config_dword(dev, 0x64, &d);
if (!(d & (1<<10)))
return;
force_hpet_address = val;
force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
force_hpet_address);
cached_dev = dev;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
ati_force_enable_hpet);
/*
* Undocumented chipset feature taken from LinuxBIOS.
*/
static void nvidia_force_hpet_resume(void)
{
pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
printk(KERN_DEBUG "Force enabled HPET at resume\n");
}
static void nvidia_force_enable_hpet(struct pci_dev *dev)
{
u32 uninitialized_var(val);
if (hpet_address || force_hpet_address)
return;
if (!hpet_force_user) {
hpet_print_force_info();
return;
}
pci_write_config_dword(dev, 0x44, 0xfed00001);
pci_read_config_dword(dev, 0x44, &val);
force_hpet_address = val & 0xfffffffe;
force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
force_hpet_address);
cached_dev = dev;
return;
}
/* ISA Bridges */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
nvidia_force_enable_hpet);
/* LPC bridges */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
nvidia_force_enable_hpet);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
nvidia_force_enable_hpet);
void force_hpet_resume(void)
{
switch (force_hpet_resume_type) {
case ICH_FORCE_HPET_RESUME:
ich_force_hpet_resume();
return;
case OLD_ICH_FORCE_HPET_RESUME:
old_ich_force_hpet_resume();
return;
case VT8237_FORCE_HPET_RESUME:
vt8237_force_hpet_resume();
return;
case NVIDIA_FORCE_HPET_RESUME:
nvidia_force_hpet_resume();
return;
case ATI_FORCE_HPET_RESUME:
ati_force_hpet_resume();
return;
default:
break;
}
}
/*
* HPET MSI on some boards (ATI SB700/SB800) has side effect on
* floppy DMA. Disable HPET MSI on such platforms.
* See erratum #27 (Misinterpreted MSI Requests May Result in
* Corrupted LPC DMA Data) in AMD Publication #46837,
* "SB700 Family Product Errata", Rev. 1.0, March 2010.
*/
static void force_disable_hpet_msi(struct pci_dev *unused)
{
hpet_msi_disable = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
force_disable_hpet_msi);
#endif
#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
/* Set correct numa_node information for AMD NB functions */
static void __init quirk_amd_nb_node(struct pci_dev *dev)
{
struct pci_dev *nb_ht;
unsigned int devfn;
u32 node;
u32 val;
devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
nb_ht = pci_get_slot(dev->bus, devfn);
if (!nb_ht)
return;
pci_read_config_dword(nb_ht, 0x60, &val);
node = val & 7;
/*
* Some hardware may return an invalid node ID,
* so check it first:
*/
if (node_online(node))
set_dev_node(&dev->dev, node);
pci_dev_put(nb_ht);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
quirk_amd_nb_node);
#endif
| gpl-2.0 |
Sublime-Development/kernel_flounder | drivers/input/serio/sa1111ps2.c | 4630 | 8198 | /*
* linux/drivers/input/serio/sa1111ps2.c
*
* Copyright (C) 2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <asm/hardware/sa1111.h>
#define PS2CR 0x0000
#define PS2STAT 0x0004
#define PS2DATA 0x0008
#define PS2CLKDIV 0x000c
#define PS2PRECNT 0x0010
#define PS2CR_ENA 0x08
#define PS2CR_FKD 0x02
#define PS2CR_FKC 0x01
#define PS2STAT_STP 0x0100
#define PS2STAT_TXE 0x0080
#define PS2STAT_TXB 0x0040
#define PS2STAT_RXF 0x0020
#define PS2STAT_RXB 0x0010
#define PS2STAT_ENA 0x0008
#define PS2STAT_RXP 0x0004
#define PS2STAT_KBD 0x0002
#define PS2STAT_KBC 0x0001
struct ps2if {
struct serio *io;
struct sa1111_dev *dev;
void __iomem *base;
unsigned int open;
spinlock_t lock;
unsigned int head;
unsigned int tail;
unsigned char buf[4];
};
/*
* Read all bytes waiting in the PS2 port. There should be
* at the most one, but we loop for safety. If there was a
* framing error, we have to manually clear the status.
*/
static irqreturn_t ps2_rxint(int irq, void *dev_id)
{
struct ps2if *ps2if = dev_id;
unsigned int scancode, flag, status;
status = sa1111_readl(ps2if->base + PS2STAT);
while (status & PS2STAT_RXF) {
if (status & PS2STAT_STP)
sa1111_writel(PS2STAT_STP, ps2if->base + PS2STAT);
flag = (status & PS2STAT_STP ? SERIO_FRAME : 0) |
(status & PS2STAT_RXP ? 0 : SERIO_PARITY);
scancode = sa1111_readl(ps2if->base + PS2DATA) & 0xff;
if (hweight8(scancode) & 1)
flag ^= SERIO_PARITY;
serio_interrupt(ps2if->io, scancode, flag);
status = sa1111_readl(ps2if->base + PS2STAT);
}
return IRQ_HANDLED;
}
/*
* Completion of ps2 write
*/
static irqreturn_t ps2_txint(int irq, void *dev_id)
{
struct ps2if *ps2if = dev_id;
unsigned int status;
spin_lock(&ps2if->lock);
status = sa1111_readl(ps2if->base + PS2STAT);
if (ps2if->head == ps2if->tail) {
disable_irq_nosync(irq);
/* done */
} else if (status & PS2STAT_TXE) {
sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA);
ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1);
}
spin_unlock(&ps2if->lock);
return IRQ_HANDLED;
}
/*
* Write a byte to the PS2 port. We have to wait for the
* port to indicate that the transmitter is empty.
*/
static int ps2_write(struct serio *io, unsigned char val)
{
struct ps2if *ps2if = io->port_data;
unsigned long flags;
unsigned int head;
spin_lock_irqsave(&ps2if->lock, flags);
/*
* If the TX register is empty, we can go straight out.
*/
if (sa1111_readl(ps2if->base + PS2STAT) & PS2STAT_TXE) {
sa1111_writel(val, ps2if->base + PS2DATA);
} else {
if (ps2if->head == ps2if->tail)
enable_irq(ps2if->dev->irq[1]);
head = (ps2if->head + 1) & (sizeof(ps2if->buf) - 1);
if (head != ps2if->tail) {
ps2if->buf[ps2if->head] = val;
ps2if->head = head;
}
}
spin_unlock_irqrestore(&ps2if->lock, flags);
return 0;
}
static int ps2_open(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
int ret;
ret = sa1111_enable_device(ps2if->dev);
if (ret)
return ret;
ret = request_irq(ps2if->dev->irq[0], ps2_rxint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
ps2if->dev->irq[0], ret);
sa1111_disable_device(ps2if->dev);
return ret;
}
ret = request_irq(ps2if->dev->irq[1], ps2_txint, 0,
SA1111_DRIVER_NAME(ps2if->dev), ps2if);
if (ret) {
printk(KERN_ERR "sa1111ps2: could not allocate IRQ%d: %d\n",
ps2if->dev->irq[1], ret);
free_irq(ps2if->dev->irq[0], ps2if);
sa1111_disable_device(ps2if->dev);
return ret;
}
ps2if->open = 1;
enable_irq_wake(ps2if->dev->irq[0]);
sa1111_writel(PS2CR_ENA, ps2if->base + PS2CR);
return 0;
}
static void ps2_close(struct serio *io)
{
struct ps2if *ps2if = io->port_data;
sa1111_writel(0, ps2if->base + PS2CR);
disable_irq_wake(ps2if->dev->irq[0]);
ps2if->open = 0;
free_irq(ps2if->dev->irq[1], ps2if);
free_irq(ps2if->dev->irq[0], ps2if);
sa1111_disable_device(ps2if->dev);
}
/*
* Clear the input buffer.
*/
static void ps2_clear_input(struct ps2if *ps2if)
{
int maxread = 100;
while (maxread--) {
if ((sa1111_readl(ps2if->base + PS2DATA) & 0xff) == 0xff)
break;
}
}
static unsigned int ps2_test_one(struct ps2if *ps2if,
unsigned int mask)
{
unsigned int val;
sa1111_writel(PS2CR_ENA | mask, ps2if->base + PS2CR);
udelay(2);
val = sa1111_readl(ps2if->base + PS2STAT);
return val & (PS2STAT_KBC | PS2STAT_KBD);
}
/*
* Test the keyboard interface. We basically check to make sure that
* we can drive each line to the keyboard independently of each other.
*/
static int ps2_test(struct ps2if *ps2if)
{
unsigned int stat;
int ret = 0;
stat = ps2_test_one(ps2if, PS2CR_FKC);
if (stat != PS2STAT_KBD) {
printk("PS/2 interface test failed[1]: %02x\n", stat);
ret = -ENODEV;
}
stat = ps2_test_one(ps2if, 0);
if (stat != (PS2STAT_KBC | PS2STAT_KBD)) {
printk("PS/2 interface test failed[2]: %02x\n", stat);
ret = -ENODEV;
}
stat = ps2_test_one(ps2if, PS2CR_FKD);
if (stat != PS2STAT_KBC) {
printk("PS/2 interface test failed[3]: %02x\n", stat);
ret = -ENODEV;
}
sa1111_writel(0, ps2if->base + PS2CR);
return ret;
}
/*
* Add one device to this driver.
*/
static int ps2_probe(struct sa1111_dev *dev)
{
struct ps2if *ps2if;
struct serio *serio;
int ret;
ps2if = kzalloc(sizeof(struct ps2if), GFP_KERNEL);
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!ps2if || !serio) {
ret = -ENOMEM;
goto free;
}
serio->id.type = SERIO_8042;
serio->write = ps2_write;
serio->open = ps2_open;
serio->close = ps2_close;
strlcpy(serio->name, dev_name(&dev->dev), sizeof(serio->name));
strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
serio->port_data = ps2if;
serio->dev.parent = &dev->dev;
ps2if->io = serio;
ps2if->dev = dev;
sa1111_set_drvdata(dev, ps2if);
spin_lock_init(&ps2if->lock);
/*
* Request the physical region for this PS2 port.
*/
if (!request_mem_region(dev->res.start,
dev->res.end - dev->res.start + 1,
SA1111_DRIVER_NAME(dev))) {
ret = -EBUSY;
goto free;
}
/*
* Our parent device has already mapped the region.
*/
ps2if->base = dev->mapbase;
sa1111_enable_device(ps2if->dev);
/* Incoming clock is 8MHz */
sa1111_writel(0, ps2if->base + PS2CLKDIV);
sa1111_writel(127, ps2if->base + PS2PRECNT);
/*
* Flush any pending input.
*/
ps2_clear_input(ps2if);
/*
* Test the keyboard interface.
*/
ret = ps2_test(ps2if);
if (ret)
goto out;
/*
* Flush any pending input.
*/
ps2_clear_input(ps2if);
sa1111_disable_device(ps2if->dev);
serio_register_port(ps2if->io);
return 0;
out:
sa1111_disable_device(ps2if->dev);
release_mem_region(dev->res.start, resource_size(&dev->res));
free:
sa1111_set_drvdata(dev, NULL);
kfree(ps2if);
kfree(serio);
return ret;
}
/*
* Remove one device from this driver.
*/
static int ps2_remove(struct sa1111_dev *dev)
{
struct ps2if *ps2if = sa1111_get_drvdata(dev);
serio_unregister_port(ps2if->io);
release_mem_region(dev->res.start, resource_size(&dev->res));
sa1111_set_drvdata(dev, NULL);
kfree(ps2if);
return 0;
}
/*
* Our device driver structure
*/
static struct sa1111_driver ps2_driver = {
.drv = {
.name = "sa1111-ps2",
.owner = THIS_MODULE,
},
.devid = SA1111_DEVID_PS2,
.probe = ps2_probe,
.remove = ps2_remove,
};
static int __init ps2_init(void)
{
return sa1111_driver_register(&ps2_driver);
}
static void __exit ps2_exit(void)
{
sa1111_driver_unregister(&ps2_driver);
}
module_init(ps2_init);
module_exit(ps2_exit);
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("SA1111 PS2 controller driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Spondoolies-Tech/kernel | arch/arm/kernel/kprobes-common.c | 6678 | 14512 | /*
* arch/arm/kernel/kprobes-common.c
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*
* Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is
* Copyright (C) 2006, 2007 Motorola Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <asm/system_info.h>
#include "kprobes.h"
#ifndef find_str_pc_offset
/*
* For STR and STM instructions, an ARM core may choose to use either
* a +8 or a +12 displacement from the current instruction's address.
* Whichever value is chosen for a given core, it must be the same for
* both instructions and may not change. This function measures it.
*/
int str_pc_offset;
void __init find_str_pc_offset(void)
{
int addr, scratch, ret;
__asm__ (
"sub %[ret], pc, #4 \n\t"
"str pc, %[addr] \n\t"
"ldr %[scr], %[addr] \n\t"
"sub %[ret], %[scr], %[ret] \n\t"
: [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
str_pc_offset = ret;
}
#endif /* !find_str_pc_offset */
#ifndef test_load_write_pc_interworking
bool load_write_pc_interworks;
void __init test_load_write_pc_interworking(void)
{
int arch = cpu_architecture();
BUG_ON(arch == CPU_ARCH_UNKNOWN);
load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T;
}
#endif /* !test_load_write_pc_interworking */
#ifndef test_alu_write_pc_interworking
bool alu_write_pc_interworks;
void __init test_alu_write_pc_interworking(void)
{
int arch = cpu_architecture();
BUG_ON(arch == CPU_ARCH_UNKNOWN);
alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7;
}
#endif /* !test_alu_write_pc_interworking */
void __init arm_kprobe_decode_init(void)
{
find_str_pc_offset();
test_load_write_pc_interworking();
test_alu_write_pc_interworking();
}
static unsigned long __kprobes __check_eq(unsigned long cpsr)
{
return cpsr & PSR_Z_BIT;
}
static unsigned long __kprobes __check_ne(unsigned long cpsr)
{
return (~cpsr) & PSR_Z_BIT;
}
static unsigned long __kprobes __check_cs(unsigned long cpsr)
{
return cpsr & PSR_C_BIT;
}
static unsigned long __kprobes __check_cc(unsigned long cpsr)
{
return (~cpsr) & PSR_C_BIT;
}
static unsigned long __kprobes __check_mi(unsigned long cpsr)
{
return cpsr & PSR_N_BIT;
}
static unsigned long __kprobes __check_pl(unsigned long cpsr)
{
return (~cpsr) & PSR_N_BIT;
}
static unsigned long __kprobes __check_vs(unsigned long cpsr)
{
return cpsr & PSR_V_BIT;
}
static unsigned long __kprobes __check_vc(unsigned long cpsr)
{
return (~cpsr) & PSR_V_BIT;
}
static unsigned long __kprobes __check_hi(unsigned long cpsr)
{
cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
return cpsr & PSR_C_BIT;
}
static unsigned long __kprobes __check_ls(unsigned long cpsr)
{
cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
return (~cpsr) & PSR_C_BIT;
}
static unsigned long __kprobes __check_ge(unsigned long cpsr)
{
cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
return (~cpsr) & PSR_N_BIT;
}
static unsigned long __kprobes __check_lt(unsigned long cpsr)
{
cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
return cpsr & PSR_N_BIT;
}
static unsigned long __kprobes __check_gt(unsigned long cpsr)
{
unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
return (~temp) & PSR_N_BIT;
}
static unsigned long __kprobes __check_le(unsigned long cpsr)
{
unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
return temp & PSR_N_BIT;
}
static unsigned long __kprobes __check_al(unsigned long cpsr)
{
return true;
}
kprobe_check_cc * const kprobe_condition_checks[16] = {
&__check_eq, &__check_ne, &__check_cs, &__check_cc,
&__check_mi, &__check_pl, &__check_vs, &__check_vc,
&__check_hi, &__check_ls, &__check_ge, &__check_lt,
&__check_gt, &__check_le, &__check_al, &__check_al
};
void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs)
{
}
void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs)
{
p->ainsn.insn_fn();
}
static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
{
kprobe_opcode_t insn = p->opcode;
int rn = (insn >> 16) & 0xf;
int lbit = insn & (1 << 20);
int wbit = insn & (1 << 21);
int ubit = insn & (1 << 23);
int pbit = insn & (1 << 24);
long *addr = (long *)regs->uregs[rn];
int reg_bit_vector;
int reg_count;
reg_count = 0;
reg_bit_vector = insn & 0xffff;
while (reg_bit_vector) {
reg_bit_vector &= (reg_bit_vector - 1);
++reg_count;
}
if (!ubit)
addr -= reg_count;
addr += (!pbit == !ubit);
reg_bit_vector = insn & 0xffff;
while (reg_bit_vector) {
int reg = __ffs(reg_bit_vector);
reg_bit_vector &= (reg_bit_vector - 1);
if (lbit)
regs->uregs[reg] = *addr++;
else
*addr++ = regs->uregs[reg];
}
if (wbit) {
if (!ubit)
addr -= reg_count;
addr -= (!pbit == !ubit);
regs->uregs[rn] = (long)addr;
}
}
static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
{
regs->ARM_pc = (long)p->addr + str_pc_offset;
simulate_ldm1stm1(p, regs);
regs->ARM_pc = (long)p->addr + 4;
}
static void __kprobes simulate_ldm1_pc(struct kprobe *p, struct pt_regs *regs)
{
simulate_ldm1stm1(p, regs);
load_write_pc(regs->ARM_pc, regs);
}
static void __kprobes
emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs)
{
register void *rregs asm("r1") = regs;
register void *rfn asm("lr") = p->ainsn.insn_fn;
__asm__ __volatile__ (
"stmdb sp!, {%[regs], r11} \n\t"
"ldmia %[regs], {r0-r12} \n\t"
#if __LINUX_ARM_ARCH__ >= 6
"blx %[fn] \n\t"
#else
"str %[fn], [sp, #-4]! \n\t"
"adr lr, 1f \n\t"
"ldr pc, [sp], #4 \n\t"
"1: \n\t"
#endif
"ldr lr, [sp], #4 \n\t" /* lr = regs */
"stmia lr, {r0-r12} \n\t"
"ldr r11, [sp], #4 \n\t"
: [regs] "=r" (rregs), [fn] "=r" (rfn)
: "0" (rregs), "1" (rfn)
: "r0", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r12", "memory", "cc"
);
}
static void __kprobes
emulate_generic_r2_14_noflags(struct kprobe *p, struct pt_regs *regs)
{
emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+2));
}
static void __kprobes
emulate_ldm_r3_15(struct kprobe *p, struct pt_regs *regs)
{
emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+3));
load_write_pc(regs->ARM_pc, regs);
}
enum kprobe_insn __kprobes
kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
{
kprobe_insn_handler_t *handler = 0;
unsigned reglist = insn & 0xffff;
int is_ldm = insn & 0x100000;
int rn = (insn >> 16) & 0xf;
if (rn <= 12 && (reglist & 0xe000) == 0) {
/* Instruction only uses registers in the range R0..R12 */
handler = emulate_generic_r0_12_noflags;
} else if (rn >= 2 && (reglist & 0x8003) == 0) {
/* Instruction only uses registers in the range R2..R14 */
rn -= 2;
reglist >>= 2;
handler = emulate_generic_r2_14_noflags;
} else if (rn >= 3 && (reglist & 0x0007) == 0) {
/* Instruction only uses registers in the range R3..R15 */
if (is_ldm && (reglist & 0x8000)) {
rn -= 3;
reglist >>= 3;
handler = emulate_ldm_r3_15;
}
}
if (handler) {
/* We can emulate the instruction in (possibly) modified form */
asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
asi->insn_handler = handler;
return INSN_GOOD;
}
/* Fallback to slower simulation... */
if (reglist & 0x8000)
handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc;
else
handler = simulate_ldm1stm1;
asi->insn_handler = handler;
return INSN_GOOD_NO_SLOT;
}
/*
* Prepare an instruction slot to receive an instruction for emulating.
* This is done by placing a subroutine return after the location where the
* instruction will be placed. We also modify ARM instructions to be
* unconditional as the condition code will already be checked before any
* emulation handler is called.
*/
static kprobe_opcode_t __kprobes
prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
bool thumb)
{
#ifdef CONFIG_THUMB2_KERNEL
if (thumb) {
u16 *thumb_insn = (u16 *)asi->insn;
thumb_insn[1] = 0x4770; /* Thumb bx lr */
thumb_insn[2] = 0x4770; /* Thumb bx lr */
return insn;
}
asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
#else
asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
#endif
/* Make an ARM instruction unconditional */
if (insn < 0xe0000000)
insn = (insn | 0xe0000000) & ~0x10000000;
return insn;
}
/*
* Write a (probably modified) instruction into the slot previously prepared by
* prepare_emulated_insn
*/
static void __kprobes
set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
bool thumb)
{
#ifdef CONFIG_THUMB2_KERNEL
if (thumb) {
u16 *ip = (u16 *)asi->insn;
if (is_wide_instruction(insn))
*ip++ = insn >> 16;
*ip++ = insn;
return;
}
#endif
asi->insn[0] = insn;
}
/*
* When we modify the register numbers encoded in an instruction to be emulated,
* the new values come from this define. For ARM and 32-bit Thumb instructions
* this gives...
*
* bit position 16 12 8 4 0
* ---------------+---+---+---+---+---+
* register r2 r0 r1 -- r3
*/
#define INSN_NEW_BITS 0x00020103
/* Each nibble has same value as that at INSN_NEW_BITS bit 16 */
#define INSN_SAMEAS16_BITS 0x22222222
/*
* Validate and modify each of the registers encoded in an instruction.
*
* Each nibble in regs contains a value from enum decode_reg_type. For each
* non-zero value, the corresponding nibble in pinsn is validated and modified
* according to the type.
*/
static bool __kprobes decode_regs(kprobe_opcode_t* pinsn, u32 regs)
{
kprobe_opcode_t insn = *pinsn;
kprobe_opcode_t mask = 0xf; /* Start at least significant nibble */
for (; regs != 0; regs >>= 4, mask <<= 4) {
kprobe_opcode_t new_bits = INSN_NEW_BITS;
switch (regs & 0xf) {
case REG_TYPE_NONE:
/* Nibble not a register, skip to next */
continue;
case REG_TYPE_ANY:
/* Any register is allowed */
break;
case REG_TYPE_SAMEAS16:
/* Replace register with same as at bit position 16 */
new_bits = INSN_SAMEAS16_BITS;
break;
case REG_TYPE_SP:
/* Only allow SP (R13) */
if ((insn ^ 0xdddddddd) & mask)
goto reject;
break;
case REG_TYPE_PC:
/* Only allow PC (R15) */
if ((insn ^ 0xffffffff) & mask)
goto reject;
break;
case REG_TYPE_NOSP:
/* Reject SP (R13) */
if (((insn ^ 0xdddddddd) & mask) == 0)
goto reject;
break;
case REG_TYPE_NOSPPC:
case REG_TYPE_NOSPPCX:
/* Reject SP and PC (R13 and R15) */
if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0)
goto reject;
break;
case REG_TYPE_NOPCWB:
if (!is_writeback(insn))
break; /* No writeback, so any register is OK */
/* fall through... */
case REG_TYPE_NOPC:
case REG_TYPE_NOPCX:
/* Reject PC (R15) */
if (((insn ^ 0xffffffff) & mask) == 0)
goto reject;
break;
}
/* Replace value of nibble with new register number... */
insn &= ~mask;
insn |= new_bits & mask;
}
*pinsn = insn;
return true;
reject:
return false;
}
static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
[DECODE_TYPE_TABLE] = sizeof(struct decode_table),
[DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom),
[DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate),
[DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate),
[DECODE_TYPE_OR] = sizeof(struct decode_or),
[DECODE_TYPE_REJECT] = sizeof(struct decode_reject)
};
/*
* kprobe_decode_insn operates on data tables in order to decode an ARM
* architecture instruction onto which a kprobe has been placed.
*
* These instruction decoding tables are a concatenation of entries each
* of which consist of one of the following structs:
*
* decode_table
* decode_custom
* decode_simulate
* decode_emulate
* decode_or
* decode_reject
*
* Each of these starts with a struct decode_header which has the following
* fields:
*
* type_regs
* mask
* value
*
* The least significant DECODE_TYPE_BITS of type_regs contains a value
* from enum decode_type, this indicates which of the decode_* structs
* the entry contains. The value DECODE_TYPE_END indicates the end of the
* table.
*
* When the table is parsed, each entry is checked in turn to see if it
* matches the instruction to be decoded using the test:
*
* (insn & mask) == value
*
* If no match is found before the end of the table is reached then decoding
* fails with INSN_REJECTED.
*
* When a match is found, decode_regs() is called to validate and modify each
* of the registers encoded in the instruction; the data it uses to do this
* is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding
* to fail with INSN_REJECTED.
*
* Once the instruction has passed the above tests, further processing
* depends on the type of the table entry's decode struct.
*
*/
int __kprobes
kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
const union decode_item *table, bool thumb)
{
const struct decode_header *h = (struct decode_header *)table;
const struct decode_header *next;
bool matched = false;
insn = prepare_emulated_insn(insn, asi, thumb);
for (;; h = next) {
enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS;
if (type == DECODE_TYPE_END)
return INSN_REJECTED;
next = (struct decode_header *)
((uintptr_t)h + decode_struct_sizes[type]);
if (!matched && (insn & h->mask.bits) != h->value.bits)
continue;
if (!decode_regs(&insn, regs))
return INSN_REJECTED;
switch (type) {
case DECODE_TYPE_TABLE: {
struct decode_table *d = (struct decode_table *)h;
next = (struct decode_header *)d->table.table;
break;
}
case DECODE_TYPE_CUSTOM: {
struct decode_custom *d = (struct decode_custom *)h;
return (*d->decoder.decoder)(insn, asi);
}
case DECODE_TYPE_SIMULATE: {
struct decode_simulate *d = (struct decode_simulate *)h;
asi->insn_handler = d->handler.handler;
return INSN_GOOD_NO_SLOT;
}
case DECODE_TYPE_EMULATE: {
struct decode_emulate *d = (struct decode_emulate *)h;
asi->insn_handler = d->handler.handler;
set_emulated_insn(insn, asi, thumb);
return INSN_GOOD;
}
case DECODE_TYPE_OR:
matched = true;
break;
case DECODE_TYPE_REJECT:
default:
return INSN_REJECTED;
}
}
}
| gpl-2.0 |
Bananian/linux-bananapi | arch/m68k/platform/532x/gpio.c | 7446 | 10512 | /*
* Coldfire generic GPIO support
*
* (C) Copyright 2009, Steven King <sfking@fdwdc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfgpio.h>
static struct mcf_gpio_chip mcf_gpio_chips[] = {
{
.gpio_chip = {
.label = "PIRQ",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFEPORT_EPDDR,
.podr = (void __iomem *) MCFEPORT_EPDR,
.ppdr = (void __iomem *) MCFEPORT_EPPDR,
},
{
.gpio_chip = {
.label = "FECH",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 8,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_FECH,
.podr = (void __iomem *) MCFGPIO_PODR_FECH,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECH,
.setr = (void __iomem *) MCFGPIO_PPDSDR_FECH,
.clrr = (void __iomem *) MCFGPIO_PCLRR_FECH,
},
{
.gpio_chip = {
.label = "FECL",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 16,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_FECL,
.podr = (void __iomem *) MCFGPIO_PODR_FECL,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECL,
.setr = (void __iomem *) MCFGPIO_PPDSDR_FECL,
.clrr = (void __iomem *) MCFGPIO_PCLRR_FECL,
},
{
.gpio_chip = {
.label = "SSI",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 24,
.ngpio = 5,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_SSI,
.podr = (void __iomem *) MCFGPIO_PODR_SSI,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_SSI,
.setr = (void __iomem *) MCFGPIO_PPDSDR_SSI,
.clrr = (void __iomem *) MCFGPIO_PCLRR_SSI,
},
{
.gpio_chip = {
.label = "BUSCTL",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 32,
.ngpio = 4,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_BUSCTL,
.podr = (void __iomem *) MCFGPIO_PODR_BUSCTL,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
.setr = (void __iomem *) MCFGPIO_PPDSDR_BUSCTL,
.clrr = (void __iomem *) MCFGPIO_PCLRR_BUSCTL,
},
{
.gpio_chip = {
.label = "BE",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 40,
.ngpio = 4,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_BE,
.podr = (void __iomem *) MCFGPIO_PODR_BE,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_BE,
.setr = (void __iomem *) MCFGPIO_PPDSDR_BE,
.clrr = (void __iomem *) MCFGPIO_PCLRR_BE,
},
{
.gpio_chip = {
.label = "CS",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 49,
.ngpio = 5,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_CS,
.podr = (void __iomem *) MCFGPIO_PODR_CS,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_CS,
.setr = (void __iomem *) MCFGPIO_PPDSDR_CS,
.clrr = (void __iomem *) MCFGPIO_PCLRR_CS,
},
{
.gpio_chip = {
.label = "PWM",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 58,
.ngpio = 4,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_PWM,
.podr = (void __iomem *) MCFGPIO_PODR_PWM,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_PWM,
.setr = (void __iomem *) MCFGPIO_PPDSDR_PWM,
.clrr = (void __iomem *) MCFGPIO_PCLRR_PWM,
},
{
.gpio_chip = {
.label = "FECI2C",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 64,
.ngpio = 4,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_FECI2C,
.podr = (void __iomem *) MCFGPIO_PODR_FECI2C,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
.setr = (void __iomem *) MCFGPIO_PPDSDR_FECI2C,
.clrr = (void __iomem *) MCFGPIO_PCLRR_FECI2C,
},
{
.gpio_chip = {
.label = "UART",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 72,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_UART,
.podr = (void __iomem *) MCFGPIO_PODR_UART,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_UART,
.setr = (void __iomem *) MCFGPIO_PPDSDR_UART,
.clrr = (void __iomem *) MCFGPIO_PCLRR_UART,
},
{
.gpio_chip = {
.label = "QSPI",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 80,
.ngpio = 6,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_QSPI,
.podr = (void __iomem *) MCFGPIO_PODR_QSPI,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_QSPI,
.setr = (void __iomem *) MCFGPIO_PPDSDR_QSPI,
.clrr = (void __iomem *) MCFGPIO_PCLRR_QSPI,
},
{
.gpio_chip = {
.label = "TIMER",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 88,
.ngpio = 4,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_TIMER,
.podr = (void __iomem *) MCFGPIO_PODR_TIMER,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_TIMER,
.setr = (void __iomem *) MCFGPIO_PPDSDR_TIMER,
.clrr = (void __iomem *) MCFGPIO_PCLRR_TIMER,
},
{
.gpio_chip = {
.label = "LCDDATAH",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 96,
.ngpio = 2,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAH,
.podr = (void __iomem *) MCFGPIO_PODR_LCDDATAH,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH,
.setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAH,
.clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAH,
},
{
.gpio_chip = {
.label = "LCDDATAM",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 104,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAM,
.podr = (void __iomem *) MCFGPIO_PODR_LCDDATAM,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM,
.setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAM,
.clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAM,
},
{
.gpio_chip = {
.label = "LCDDATAL",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 112,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_LCDDATAL,
.podr = (void __iomem *) MCFGPIO_PODR_LCDDATAL,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL,
.setr = (void __iomem *) MCFGPIO_PPDSDR_LCDDATAL,
.clrr = (void __iomem *) MCFGPIO_PCLRR_LCDDATAL,
},
{
.gpio_chip = {
.label = "LCDCTLH",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 120,
.ngpio = 1,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_LCDCTLH,
.podr = (void __iomem *) MCFGPIO_PODR_LCDCTLH,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH,
.setr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLH,
.clrr = (void __iomem *) MCFGPIO_PCLRR_LCDCTLH,
},
{
.gpio_chip = {
.label = "LCDCTLL",
.request = mcf_gpio_request,
.free = mcf_gpio_free,
.direction_input = mcf_gpio_direction_input,
.direction_output = mcf_gpio_direction_output,
.get = mcf_gpio_get_value,
.set = mcf_gpio_set_value_fast,
.base = 128,
.ngpio = 8,
},
.pddr = (void __iomem *) MCFGPIO_PDDR_LCDCTLL,
.podr = (void __iomem *) MCFGPIO_PODR_LCDCTLL,
.ppdr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL,
.setr = (void __iomem *) MCFGPIO_PPDSDR_LCDCTLL,
.clrr = (void __iomem *) MCFGPIO_PCLRR_LCDCTLL,
},
};
static int __init mcf_gpio_init(void)
{
unsigned i = 0;
while (i < ARRAY_SIZE(mcf_gpio_chips))
(void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
return 0;
}
core_initcall(mcf_gpio_init);
| gpl-2.0 |
NSDCars5/kernel_nanhu_ares | arch/powerpc/boot/elf_util.c | 12054 | 2339 | /*
* Copyright (C) Paul Mackerras 1997.
*
* Updates for PPC64 by Todd Inglett, Dave Engebretsen & Peter Bergner.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <stdarg.h>
#include <stddef.h>
#include "elf.h"
#include "page.h"
#include "string.h"
#include "stdio.h"
int parse_elf64(void *hdr, struct elf_info *info)
{
Elf64_Ehdr *elf64 = hdr;
Elf64_Phdr *elf64ph;
unsigned int i;
if (!(elf64->e_ident[EI_MAG0] == ELFMAG0 &&
elf64->e_ident[EI_MAG1] == ELFMAG1 &&
elf64->e_ident[EI_MAG2] == ELFMAG2 &&
elf64->e_ident[EI_MAG3] == ELFMAG3 &&
elf64->e_ident[EI_CLASS] == ELFCLASS64 &&
elf64->e_ident[EI_DATA] == ELFDATA2MSB &&
(elf64->e_type == ET_EXEC ||
elf64->e_type == ET_DYN) &&
elf64->e_machine == EM_PPC64))
return 0;
elf64ph = (Elf64_Phdr *)((unsigned long)elf64 +
(unsigned long)elf64->e_phoff);
for (i = 0; i < (unsigned int)elf64->e_phnum; i++, elf64ph++)
if (elf64ph->p_type == PT_LOAD)
break;
if (i >= (unsigned int)elf64->e_phnum)
return 0;
info->loadsize = (unsigned long)elf64ph->p_filesz;
info->memsize = (unsigned long)elf64ph->p_memsz;
info->elfoffset = (unsigned long)elf64ph->p_offset;
return 1;
}
int parse_elf32(void *hdr, struct elf_info *info)
{
Elf32_Ehdr *elf32 = hdr;
Elf32_Phdr *elf32ph;
unsigned int i;
if (!(elf32->e_ident[EI_MAG0] == ELFMAG0 &&
elf32->e_ident[EI_MAG1] == ELFMAG1 &&
elf32->e_ident[EI_MAG2] == ELFMAG2 &&
elf32->e_ident[EI_MAG3] == ELFMAG3 &&
elf32->e_ident[EI_CLASS] == ELFCLASS32 &&
elf32->e_ident[EI_DATA] == ELFDATA2MSB &&
(elf32->e_type == ET_EXEC ||
elf32->e_type == ET_DYN) &&
elf32->e_machine == EM_PPC))
return 0;
elf32ph = (Elf32_Phdr *) ((unsigned long)elf32 + elf32->e_phoff);
for (i = 0; i < elf32->e_phnum; i++, elf32ph++)
if (elf32ph->p_type == PT_LOAD)
break;
if (i >= elf32->e_phnum)
return 0;
info->loadsize = elf32ph->p_filesz;
info->memsize = elf32ph->p_memsz;
info->elfoffset = elf32ph->p_offset;
return 1;
}
| gpl-2.0 |
CandyDevices/kernel_htc_msm8974 | fs/nls/nls_iso8859-3.c | 12566 | 13181 | /*
* linux/fs/nls/nls_iso8859-3.c
*
* Charset iso8859-3 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x0080, 0x0081, 0x0082, 0x0083,
0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b,
0x008c, 0x008d, 0x008e, 0x008f,
/* 0x90*/
0x0090, 0x0091, 0x0092, 0x0093,
0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b,
0x009c, 0x009d, 0x009e, 0x009f,
/* 0xa0*/
0x00a0, 0x0126, 0x02d8, 0x00a3,
0x00a4, 0x0000, 0x0124, 0x00a7,
0x00a8, 0x0130, 0x015e, 0x011e,
0x0134, 0x00ad, 0x0000, 0x017b,
/* 0xb0*/
0x00b0, 0x0127, 0x00b2, 0x00b3,
0x00b4, 0x00b5, 0x0125, 0x00b7,
0x00b8, 0x0131, 0x015f, 0x011f,
0x0135, 0x00bd, 0x0000, 0x017c,
/* 0xc0*/
0x00c0, 0x00c1, 0x00c2, 0x0000,
0x00c4, 0x010a, 0x0108, 0x00c7,
0x00c8, 0x00c9, 0x00ca, 0x00cb,
0x00cc, 0x00cd, 0x00ce, 0x00cf,
/* 0xd0*/
0x0000, 0x00d1, 0x00d2, 0x00d3,
0x00d4, 0x0120, 0x00d6, 0x00d7,
0x011c, 0x00d9, 0x00da, 0x00db,
0x00dc, 0x016c, 0x015c, 0x00df,
/* 0xe0*/
0x00e0, 0x00e1, 0x00e2, 0x0000,
0x00e4, 0x010b, 0x0109, 0x00e7,
0x00e8, 0x00e9, 0x00ea, 0x00eb,
0x00ec, 0x00ed, 0x00ee, 0x00ef,
/* 0xf0*/
0x0000, 0x00f1, 0x00f2, 0x00f3,
0x00f4, 0x0121, 0x00f6, 0x00f7,
0x011d, 0x00f9, 0x00fa, 0x00fb,
0x00fc, 0x016d, 0x015d, 0x02d9,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0xa3, 0xa4, 0x00, 0x00, 0xa7, /* 0xa0-0xa7 */
0xa8, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
0xb0, 0x00, 0xb2, 0xb3, 0xb4, 0xb5, 0x00, 0xb7, /* 0xb0-0xb7 */
0xb8, 0x00, 0x00, 0x00, 0x00, 0xbd, 0x00, 0x00, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0x00, 0xc4, 0x00, 0x00, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0x00, 0xd1, 0xd2, 0xd3, 0xd4, 0x00, 0xd6, 0xd7, /* 0xd0-0xd7 */
0x00, 0xd9, 0xda, 0xdb, 0xdc, 0x00, 0x00, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0x00, 0xe4, 0x00, 0x00, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0x00, 0xf6, 0xf7, /* 0xf0-0xf7 */
0x00, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page01[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0xc6, 0xe6, 0xc5, 0xe5, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0xd8, 0xf8, 0xab, 0xbb, /* 0x18-0x1f */
0xd5, 0xf5, 0x00, 0x00, 0xa6, 0xb6, 0xa1, 0xb1, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0xa9, 0xb9, 0x00, 0x00, 0xac, 0xbc, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0xde, 0xfe, 0xaa, 0xba, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0xdd, 0xfd, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0xaf, 0xbf, 0x00, 0x00, 0x00, /* 0x78-0x7f */
};
static const unsigned char page02[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0xa2, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
};
static const unsigned char *const page_uni2charset[256] = {
page00, page01, page02, NULL, NULL, NULL, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xb1, 0xa2, 0xa3, 0xa4, 0x00, 0xb6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0x69, 0xba, 0xbb, 0xbc, 0xad, 0x00, 0xbf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0x00, 0xbf, /* 0xb8-0xbf */
0xe0, 0xe1, 0xe2, 0x00, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xd7, /* 0xd0-0xd7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0x00, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0x00, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */
0xb0, 0xa1, 0xb2, 0xb3, 0xb4, 0x00, 0xa6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0x49, 0xaa, 0xab, 0xac, 0xbd, 0x00, 0xaf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0x00, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0x00, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xc0, 0xc1, 0xc2, 0x00, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
0x00, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xf7, /* 0xf0-0xf7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xff, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "iso8859-3",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_iso8859_3(void)
{
return register_nls(&table);
}
static void __exit exit_nls_iso8859_3(void)
{
unregister_nls(&table);
}
module_init(init_nls_iso8859_3)
module_exit(exit_nls_iso8859_3)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
omnirom/android_kernel_samsung_tuna | fs/nls/nls_iso8859-6.c | 12566 | 10468 | /*
* linux/fs/nls/nls_iso8859-6.c
*
* Charset iso8859-6 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0660, 0x0661, 0x0662, 0x0663,
0x0664, 0x0665, 0x0666, 0x0667,
0x0668, 0x0669, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x0080, 0x0081, 0x0082, 0x0083,
0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b,
0x008c, 0x008d, 0x008e, 0x008f,
/* 0x90*/
0x0090, 0x0091, 0x0092, 0x0093,
0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b,
0x009c, 0x009d, 0x009e, 0x009f,
/* 0xa0*/
0x00a0, 0x0000, 0x0000, 0x0000,
0x00a4, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x060c, 0x00ad, 0x0000, 0x0000,
/* 0xb0*/
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x061b,
0x0000, 0x0000, 0x0000, 0x061f,
/* 0xc0*/
0x0000, 0x0621, 0x0622, 0x0623,
0x0624, 0x0625, 0x0626, 0x0627,
0x0628, 0x0629, 0x062a, 0x062b,
0x062c, 0x062d, 0x062e, 0x062f,
/* 0xd0*/
0x0630, 0x0631, 0x0632, 0x0633,
0x0634, 0x0635, 0x0636, 0x0637,
0x0638, 0x0639, 0x063a, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
/* 0xe0*/
0x0640, 0x0641, 0x0642, 0x0643,
0x0644, 0x0645, 0x0646, 0x0647,
0x0648, 0x0649, 0x064a, 0x064b,
0x064c, 0x064d, 0x064e, 0x064f,
/* 0xf0*/
0x0650, 0x0651, 0x0652, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
};
static const unsigned char page06[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0xac, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0xbf, /* 0x18-0x1f */
0x00, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x20-0x27 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x28-0x2f */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0x30-0x37 */
0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x40-0x47 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x48-0x4f */
0xf0, 0xf1, 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x60-0x67 */
0x38, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
};
static const unsigned char *const page_uni2charset[256] = {
page00, NULL, NULL, NULL, NULL, NULL, page06, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0xac, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0xbf, /* 0xb8-0xbf */
0x00, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0x00, 0xa4, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0xac, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0x00, 0x00, 0xbf, /* 0xb8-0xbf */
0x00, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "iso8859-6",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_iso8859_6(void)
{
return register_nls(&table);
}
static void __exit exit_nls_iso8859_6(void)
{
unregister_nls(&table);
}
module_init(init_nls_iso8859_6)
module_exit(exit_nls_iso8859_6)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
HomerSp/shooter_u-ics | fs/nls/nls_cp865.c | 12566 | 17508 | /*
* linux/fs/nls/nls_cp865.c
*
* Charset cp865 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x00c7, 0x00fc, 0x00e9, 0x00e2,
0x00e4, 0x00e0, 0x00e5, 0x00e7,
0x00ea, 0x00eb, 0x00e8, 0x00ef,
0x00ee, 0x00ec, 0x00c4, 0x00c5,
/* 0x90*/
0x00c9, 0x00e6, 0x00c6, 0x00f4,
0x00f6, 0x00f2, 0x00fb, 0x00f9,
0x00ff, 0x00d6, 0x00dc, 0x00f8,
0x00a3, 0x00d8, 0x20a7, 0x0192,
/* 0xa0*/
0x00e1, 0x00ed, 0x00f3, 0x00fa,
0x00f1, 0x00d1, 0x00aa, 0x00ba,
0x00bf, 0x2310, 0x00ac, 0x00bd,
0x00bc, 0x00a1, 0x00ab, 0x00a4,
/* 0xb0*/
0x2591, 0x2592, 0x2593, 0x2502,
0x2524, 0x2561, 0x2562, 0x2556,
0x2555, 0x2563, 0x2551, 0x2557,
0x255d, 0x255c, 0x255b, 0x2510,
/* 0xc0*/
0x2514, 0x2534, 0x252c, 0x251c,
0x2500, 0x253c, 0x255e, 0x255f,
0x255a, 0x2554, 0x2569, 0x2566,
0x2560, 0x2550, 0x256c, 0x2567,
/* 0xd0*/
0x2568, 0x2564, 0x2565, 0x2559,
0x2558, 0x2552, 0x2553, 0x256b,
0x256a, 0x2518, 0x250c, 0x2588,
0x2584, 0x258c, 0x2590, 0x2580,
/* 0xe0*/
0x03b1, 0x00df, 0x0393, 0x03c0,
0x03a3, 0x03c3, 0x00b5, 0x03c4,
0x03a6, 0x0398, 0x03a9, 0x03b4,
0x221e, 0x03c6, 0x03b5, 0x2229,
/* 0xf0*/
0x2261, 0x00b1, 0x2265, 0x2264,
0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a,
0x207f, 0x00b2, 0x25a0, 0x00a0,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xff, 0xad, 0x00, 0x9c, 0xaf, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */
0x00, 0x00, 0xa7, 0x00, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, 0x92, 0x80, /* 0xc0-0xc7 */
0x00, 0x90, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, /* 0xd0-0xd7 */
0x9d, 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
0x85, 0xa0, 0x83, 0x00, 0x84, 0x86, 0x91, 0x87, /* 0xe0-0xe7 */
0x8a, 0x82, 0x88, 0x89, 0x8d, 0xa1, 0x8c, 0x8b, /* 0xe8-0xef */
0x00, 0xa4, 0x95, 0xa2, 0x93, 0x00, 0x94, 0xf6, /* 0xf0-0xf7 */
0x9b, 0x97, 0xa3, 0x96, 0x81, 0x00, 0x00, 0x98, /* 0xf8-0xff */
};
static const unsigned char page01[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
};
static const unsigned char page03[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
};
static const unsigned char page20[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */
};
static const unsigned char page22[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
};
static const unsigned char page23[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
};
static const unsigned char page25[256] = {
0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
};
static const unsigned char *const page_uni2charset[256] = {
page00, page01, NULL, page03, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
page20, NULL, page22, page23, NULL, page25, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x87, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x84, 0x86, /* 0x88-0x8f */
0x82, 0x91, 0x91, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x94, 0x81, 0x9b, 0x9c, 0x9b, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x9a, 0x90, 0x00, 0x8e, 0x00, 0x8f, 0x80, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x92, 0x92, 0x00, 0x99, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x99, 0x9a, 0x9d, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "cp865",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_cp865(void)
{
return register_nls(&table);
}
static void __exit exit_nls_cp865(void)
{
unregister_nls(&table);
}
module_init(init_nls_cp865)
module_exit(exit_nls_cp865)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
imoseyon/leanKernel-note3 | fs/nls/nls_cp862.c | 12566 | 19506 | /*
* linux/fs/nls/nls_cp862.c
*
* Charset cp862 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x05d0, 0x05d1, 0x05d2, 0x05d3,
0x05d4, 0x05d5, 0x05d6, 0x05d7,
0x05d8, 0x05d9, 0x05da, 0x05db,
0x05dc, 0x05dd, 0x05de, 0x05df,
/* 0x90*/
0x05e0, 0x05e1, 0x05e2, 0x05e3,
0x05e4, 0x05e5, 0x05e6, 0x05e7,
0x05e8, 0x05e9, 0x05ea, 0x00a2,
0x00a3, 0x00a5, 0x20a7, 0x0192,
/* 0xa0*/
0x00e1, 0x00ed, 0x00f3, 0x00fa,
0x00f1, 0x00d1, 0x00aa, 0x00ba,
0x00bf, 0x2310, 0x00ac, 0x00bd,
0x00bc, 0x00a1, 0x00ab, 0x00bb,
/* 0xb0*/
0x2591, 0x2592, 0x2593, 0x2502,
0x2524, 0x2561, 0x2562, 0x2556,
0x2555, 0x2563, 0x2551, 0x2557,
0x255d, 0x255c, 0x255b, 0x2510,
/* 0xc0*/
0x2514, 0x2534, 0x252c, 0x251c,
0x2500, 0x253c, 0x255e, 0x255f,
0x255a, 0x2554, 0x2569, 0x2566,
0x2560, 0x2550, 0x256c, 0x2567,
/* 0xd0*/
0x2568, 0x2564, 0x2565, 0x2559,
0x2558, 0x2552, 0x2553, 0x256b,
0x256a, 0x2518, 0x250c, 0x2588,
0x2584, 0x258c, 0x2590, 0x2580,
/* 0xe0*/
0x03b1, 0x00df, 0x0393, 0x03c0,
0x03a3, 0x03c3, 0x00b5, 0x03c4,
0x03a6, 0x0398, 0x03a9, 0x03b4,
0x221e, 0x03c6, 0x03b5, 0x2229,
/* 0xf0*/
0x2261, 0x00b1, 0x2265, 0x2264,
0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a,
0x207f, 0x00b2, 0x25a0, 0x00a0,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xff, 0xad, 0x9b, 0x9c, 0x00, 0x9d, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0xa6, 0xae, 0xaa, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0xf8, 0xf1, 0xfd, 0x00, 0x00, 0xe6, 0x00, 0xfa, /* 0xb0-0xb7 */
0x00, 0x00, 0xa7, 0xaf, 0xac, 0xab, 0x00, 0xa8, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x00, 0xa5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, /* 0xd8-0xdf */
0x00, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0x00, 0x00, /* 0xe8-0xef */
0x00, 0xa4, 0x00, 0xa2, 0x00, 0x00, 0x00, 0xf6, /* 0xf0-0xf7 */
0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
};
static const unsigned char page01[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x9f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
};
static const unsigned char page03[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0xe2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0xe4, 0x00, 0x00, 0xe8, 0x00, /* 0xa0-0xa7 */
0x00, 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0xe0, 0x00, 0x00, 0xeb, 0xee, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0xe3, 0x00, 0x00, 0xe5, 0xe7, 0x00, 0xed, 0x00, /* 0xc0-0xc7 */
};
static const unsigned char page05[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0xd0-0xd7 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0xd8-0xdf */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0xe0-0xe7 */
0x98, 0x99, 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
};
static const unsigned char page20[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, /* 0xa0-0xa7 */
};
static const unsigned char page22[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0xf9, 0xfb, 0x00, 0x00, 0x00, 0xec, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0xef, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0xf7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0xf0, 0x00, 0x00, 0xf3, 0xf2, 0x00, 0x00, /* 0x60-0x67 */
};
static const unsigned char page23[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0xa9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0xf4, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
};
static const unsigned char page25[256] = {
0xc4, 0x00, 0xb3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0xbf, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0xd9, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0xb4, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0xc2, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0xc1, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0xc5, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0xcd, 0xba, 0xd5, 0xd6, 0xc9, 0xb8, 0xb7, 0xbb, /* 0x50-0x57 */
0xd4, 0xd3, 0xc8, 0xbe, 0xbd, 0xbc, 0xc6, 0xc7, /* 0x58-0x5f */
0xcc, 0xb5, 0xb6, 0xb9, 0xd1, 0xd2, 0xcb, 0xcf, /* 0x60-0x67 */
0xd0, 0xca, 0xd8, 0xd7, 0xce, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0xdf, 0x00, 0x00, 0x00, 0xdc, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0xdb, 0x00, 0x00, 0x00, 0xdd, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0xde, 0xb0, 0xb1, 0xb2, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
};
static const unsigned char *const page_uni2charset[256] = {
page00, page01, NULL, page03, NULL, page05, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
page20, NULL, page22, page23, NULL, page25, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa4, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0x00, 0xe3, 0xe5, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xed, 0x00, 0x00, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0x00, 0xe1, 0xe2, 0x00, 0xe4, 0xe4, 0x00, 0x00, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0x00, 0xec, 0xe8, 0x00, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "cp862",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_cp862(void)
{
return register_nls(&table);
}
static void __exit exit_nls_cp862(void)
{
unregister_nls(&table);
}
module_init(init_nls_cp862)
module_exit(exit_nls_cp862)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
anoane/ville-4.2.2-sense5-evitaul_porting | fs/nls/nls_iso8859-7.c | 12566 | 13558 | /*
* linux/fs/nls/nls_iso8859-7.c
*
* Charset iso8859-7 translation tables.
* Generated automatically from the Unicode and charset
* tables from the Unicode Organization (www.unicode.org).
* The Unicode to charset table has only exact mappings.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/nls.h>
#include <linux/errno.h>
static const wchar_t charset2uni[256] = {
/* 0x00*/
0x0000, 0x0001, 0x0002, 0x0003,
0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b,
0x000c, 0x000d, 0x000e, 0x000f,
/* 0x10*/
0x0010, 0x0011, 0x0012, 0x0013,
0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001a, 0x001b,
0x001c, 0x001d, 0x001e, 0x001f,
/* 0x20*/
0x0020, 0x0021, 0x0022, 0x0023,
0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b,
0x002c, 0x002d, 0x002e, 0x002f,
/* 0x30*/
0x0030, 0x0031, 0x0032, 0x0033,
0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b,
0x003c, 0x003d, 0x003e, 0x003f,
/* 0x40*/
0x0040, 0x0041, 0x0042, 0x0043,
0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b,
0x004c, 0x004d, 0x004e, 0x004f,
/* 0x50*/
0x0050, 0x0051, 0x0052, 0x0053,
0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b,
0x005c, 0x005d, 0x005e, 0x005f,
/* 0x60*/
0x0060, 0x0061, 0x0062, 0x0063,
0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b,
0x006c, 0x006d, 0x006e, 0x006f,
/* 0x70*/
0x0070, 0x0071, 0x0072, 0x0073,
0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b,
0x007c, 0x007d, 0x007e, 0x007f,
/* 0x80*/
0x0080, 0x0081, 0x0082, 0x0083,
0x0084, 0x0085, 0x0086, 0x0087,
0x0088, 0x0089, 0x008a, 0x008b,
0x008c, 0x008d, 0x008e, 0x008f,
/* 0x90*/
0x0090, 0x0091, 0x0092, 0x0093,
0x0094, 0x0095, 0x0096, 0x0097,
0x0098, 0x0099, 0x009a, 0x009b,
0x009c, 0x009d, 0x009e, 0x009f,
/* 0xa0*/
0x00a0, 0x02bd, 0x02bc, 0x00a3,
0x0000, 0x0000, 0x00a6, 0x00a7,
0x00a8, 0x00a9, 0x0000, 0x00ab,
0x00ac, 0x00ad, 0x0000, 0x2015,
/* 0xb0*/
0x00b0, 0x00b1, 0x00b2, 0x00b3,
0x0384, 0x0385, 0x0386, 0x00b7,
0x0388, 0x0389, 0x038a, 0x00bb,
0x038c, 0x00bd, 0x038e, 0x038f,
/* 0xc0*/
0x0390, 0x0391, 0x0392, 0x0393,
0x0394, 0x0395, 0x0396, 0x0397,
0x0398, 0x0399, 0x039a, 0x039b,
0x039c, 0x039d, 0x039e, 0x039f,
/* 0xd0*/
0x03a0, 0x03a1, 0x0000, 0x03a3,
0x03a4, 0x03a5, 0x03a6, 0x03a7,
0x03a8, 0x03a9, 0x03aa, 0x03ab,
0x03ac, 0x03ad, 0x03ae, 0x03af,
/* 0xe0*/
0x03b0, 0x03b1, 0x03b2, 0x03b3,
0x03b4, 0x03b5, 0x03b6, 0x03b7,
0x03b8, 0x03b9, 0x03ba, 0x03bb,
0x03bc, 0x03bd, 0x03be, 0x03bf,
/* 0xf0*/
0x03c0, 0x03c1, 0x03c2, 0x03c3,
0x03c4, 0x03c5, 0x03c6, 0x03c7,
0x03c8, 0x03c9, 0x03ca, 0x03cb,
0x03cc, 0x03cd, 0x03ce, 0x0000,
};
static const unsigned char page00[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0x00, 0x00, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0x00, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0x00, 0x00, 0x00, 0xb7, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0xbb, 0x00, 0xbd, 0x00, 0x00, /* 0xb8-0xbf */
};
static const unsigned char page02[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
0x00, 0x00, 0x00, 0x00, 0xa2, 0xa1, 0x00, 0x00, /* 0xb8-0xbf */
};
static const unsigned char page03[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
0x00, 0x00, 0x00, 0x00, 0xb4, 0xb5, 0xb6, 0x00, /* 0x80-0x87 */
0xb8, 0xb9, 0xba, 0x00, 0xbc, 0x00, 0xbe, 0xbf, /* 0x88-0x8f */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0x90-0x97 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0x98-0x9f */
0xd0, 0xd1, 0x00, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xa0-0xa7 */
0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xa8-0xaf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xb0-0xb7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xb8-0xbf */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xc0-0xc7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xc8-0xcf */
};
static const unsigned char page20[256] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x00, 0x00, /* 0x10-0x17 */
};
static const unsigned char *const page_uni2charset[256] = {
page00, NULL, page02, page03, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
page20, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
static const unsigned char charset2lower[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x40-0x47 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x48-0x4f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x50-0x57 */
0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xdc, 0xb7, /* 0xb0-0xb7 */
0xdd, 0xde, 0xdf, 0xbb, 0xfc, 0xbd, 0xfd, 0xfe, /* 0xb8-0xbf */
0xc0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xc0-0xc7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xc8-0xcf */
0xf0, 0xf1, 0x00, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xd0-0xd7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xdc, 0xdd, 0xde, 0xdf, /* 0xd8-0xdf */
0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0xe0-0xe7 */
0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0xe8-0xef */
0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0xf0-0xf7 */
0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0x00, /* 0xf8-0xff */
};
static const unsigned char charset2upper[256] = {
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
0x60, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x60-0x67 */
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x68-0x6f */
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x70-0x77 */
0x58, 0x59, 0x5a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x80-0x87 */
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x88-0x8f */
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x90-0x97 */
0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x98-0x9f */
0xa0, 0xa1, 0xa2, 0xa3, 0x00, 0x00, 0xa6, 0xa7, /* 0xa0-0xa7 */
0xa8, 0xa9, 0x00, 0xab, 0xac, 0xad, 0x00, 0xaf, /* 0xa8-0xaf */
0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, /* 0xb0-0xb7 */
0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, /* 0xb8-0xbf */
0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xc0-0xc7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xc8-0xcf */
0xd0, 0xd1, 0x00, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xd0-0xd7 */
0xd8, 0xd9, 0xda, 0xdb, 0xb6, 0xb8, 0xb9, 0xba, /* 0xd8-0xdf */
0xe0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, /* 0xe0-0xe7 */
0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, /* 0xe8-0xef */
0xd0, 0xd1, 0xd3, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, /* 0xf0-0xf7 */
0xd8, 0xd9, 0xda, 0xdb, 0xbc, 0xbe, 0xbf, 0x00, /* 0xf8-0xff */
};
static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
{
const unsigned char *uni2charset;
unsigned char cl = uni & 0x00ff;
unsigned char ch = (uni & 0xff00) >> 8;
if (boundlen <= 0)
return -ENAMETOOLONG;
uni2charset = page_uni2charset[ch];
if (uni2charset && uni2charset[cl])
out[0] = uni2charset[cl];
else
return -EINVAL;
return 1;
}
static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
{
*uni = charset2uni[*rawstring];
if (*uni == 0x0000)
return -EINVAL;
return 1;
}
static struct nls_table table = {
.charset = "iso8859-7",
.uni2char = uni2char,
.char2uni = char2uni,
.charset2lower = charset2lower,
.charset2upper = charset2upper,
.owner = THIS_MODULE,
};
static int __init init_nls_iso8859_7(void)
{
return register_nls(&table);
}
static void __exit exit_nls_iso8859_7(void)
{
unregister_nls(&table);
}
module_init(init_nls_iso8859_7)
module_exit(exit_nls_iso8859_7)
MODULE_LICENSE("Dual BSD/GPL");
| gpl-2.0 |
hajuuk/R7000 | ap/gpl/amule/wxWidgets-2.8.12/src/expat/examples/elements.c | 23 | 1233 | /* This is simple demonstration of how to use expat. This program
reads an XML document from standard input and writes a line with
the name of each element to standard output indenting child
elements by one tab stop more than their parent element.
*/
#include <stdio.h>
#include "expat.h"
static void
startElement(void *userData, const char *name, const char **atts)
{
int i;
int *depthPtr = userData;
for (i = 0; i < *depthPtr; i++)
putchar('\t');
puts(name);
*depthPtr += 1;
}
static void
endElement(void *userData, const char *name)
{
int *depthPtr = userData;
*depthPtr -= 1;
}
int
main(int argc, char *argv[])
{
char buf[BUFSIZ];
XML_Parser parser = XML_ParserCreate(NULL);
int done;
int depth = 0;
XML_SetUserData(parser, &depth);
XML_SetElementHandler(parser, startElement, endElement);
do {
size_t len = fread(buf, 1, sizeof(buf), stdin);
done = len < sizeof(buf);
if (XML_Parse(parser, buf, len, done) == XML_STATUS_ERROR) {
fprintf(stderr,
"%s at line %d\n",
XML_ErrorString(XML_GetErrorCode(parser)),
XML_GetCurrentLineNumber(parser));
return 1;
}
} while (!done);
XML_ParserFree(parser);
return 0;
}
| gpl-2.0 |
ncopa/uwsgi | core/hash.c | 23 | 2365 | #include <uwsgi.h>
extern struct uwsgi_server uwsgi;
// Bernstein classic hash (this is not static as it is used by other areas)
uint32_t djb33x_hash(char *key, uint64_t keylen) {
register uint32_t hash = 5381;
uint64_t i;
for (i = 0; i < keylen; i++) {
hash = ((hash << 5) + hash) ^ key[i];
}
return hash;
}
// Murmur2 hash Copyright (C) Austin Appleby
// adapted from nginx
static uint32_t murmur2_hash(char *key, uint64_t keylen) {
uint32_t h, k;
uint8_t *ukey = (uint8_t *) key;
h = 0 ^ keylen;
while (keylen >= 4) {
k = ukey[0];
k |= ukey[1] << 8;
k |= ukey[2] << 16;
k |= ukey[3] << 24;
k *= 0x5bd1e995;
k ^= k >> 24;
k *= 0x5bd1e995;
h *= 0x5bd1e995;
h ^= k;
ukey += 4;
keylen -= 4;
}
switch (keylen) {
case 3:
h ^= key[2] << 16;
case 2:
h ^= key[1] << 8;
case 1:
h ^= key[0];
h *= 0x5bd1e995;
}
h ^= h >> 13;
h *= 0x5bd1e995;
h ^= h >> 15;
return h;
}
static uint32_t random_hash(char *key, uint64_t keylen) {
return (uint32_t) rand();
}
/*
not atomic, avoid its use in multithreaded modes
*/
static uint32_t rr_hash(char *key, uint64_t keylen) {
static uint32_t rr = 0;
uint32_t max_value = uwsgi_str_num(key, keylen);
uint32_t ret = rr;
rr++;
if (rr > max_value) {
rr = 0;
}
return ret;
}
struct uwsgi_hash_algo *uwsgi_hash_algo_get(char *name) {
struct uwsgi_hash_algo *uha = uwsgi.hash_algos;
while(uha) {
if (!strcmp(name, uha->name)) {
return uha;
}
uha = uha->next;
}
return NULL;
}
void uwsgi_hash_algo_register(char *name, uint32_t (*func)(char *, uint64_t)) {
struct uwsgi_hash_algo *old_uha = NULL, *uha = uwsgi.hash_algos;
while(uha) {
if (!strcmp(uha->name, name)) return;
old_uha = uha;
uha = uha->next;
}
uha = uwsgi_calloc(sizeof(struct uwsgi_hash_algo));
uha->name = name;
uha->func = func;
if (old_uha) {
old_uha->next = uha;
}
else {
uwsgi.hash_algos = uha;
}
}
void uwsgi_hash_algo_register_all() {
uwsgi_hash_algo_register("djb33x", djb33x_hash);
uwsgi_hash_algo_register("murmur2", murmur2_hash);
uwsgi_hash_algo_register("random", random_hash);
uwsgi_hash_algo_register("rand", random_hash);
uwsgi_hash_algo_register("rr", rr_hash);
}
| gpl-2.0 |
Yamatoo/DiamondCore | dep/acelite/ace/Pagefile_Memory_Pool.cpp | 535 | 12502 | // $Id: Pagefile_Memory_Pool.cpp 91286 2010-08-05 09:04:31Z johnnyw $
// Pagefile_Memory_Pool.cpp
#include "ace/Pagefile_Memory_Pool.h"
#if !defined (__ACE_INLINE__)
#include "ace/Pagefile_Memory_Pool.inl"
#endif /* __ACE_INLINE__ */
#include "ace/Log_Msg.h"
#include "ace/Auto_Ptr.h"
#include "ace/RW_Thread_Mutex.h"
#include "ace/OS_NS_sys_mman.h"
#include "ace/OS_NS_string.h"
#include "ace/OS_NS_sys_stat.h"
#include "ace/OS_NS_unistd.h"
#include "ace/Truncate.h"
#if (ACE_HAS_POSITION_INDEPENDENT_POINTERS == 1)
#include "ace/Based_Pointer_T.h"
#include "ace/Based_Pointer_Repository.h"
#endif /* ACE_HAS_POSITION_INDEPENDENT_POINTERS == 1 */
#if defined (ACE_WIN32) && !defined (ACE_HAS_PHARLAP)
#if !defined (ACE_HAS_WINCE)
#define ACE_MAP_FILE(_hnd, _access, _offHigh, _offLow, _nBytes, _baseAdd)\
MapViewOfFileEx (_hnd, _access, _offHigh, _offLow, _nBytes, _baseAdd)
#else //if !defined (ACE_HAS_WINCE)
#define ACE_MAP_FILE(_hnd, _access, _offHigh, _offLow, _nBytes, _baseAdd)\
MapViewOfFile (_hnd, _access, _offHigh, _offLow, _nBytes)
#endif /* !ACE_HAS_WINCE */
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
ACE_Pagefile_Memory_Pool_Options::ACE_Pagefile_Memory_Pool_Options (
void *base_addr,
size_t max_size)
: base_addr_ (base_addr),
max_size_ (max_size)
{
}
int
ACE_Pagefile_Memory_Pool::release (int)
{
return this->unmap ();
}
ACE_Pagefile_Memory_Pool::ACE_Pagefile_Memory_Pool (const ACE_TCHAR *backing_store_name,
const OPTIONS *options)
: shared_cb_ (0),
object_handle_ (0),
page_size_ (ACE_Pagefile_Memory_Pool::round_to_page_size (1))
{
// Initialize local copy of pool statistics.
if (options != 0)
{
this->local_cb_.req_base_ = options->base_addr_;
this->local_cb_.mapped_base_ = 0;
this->local_cb_.sh_.max_size_ =
options->max_size_;
this->local_cb_.sh_.mapped_size_ = 0;
this->local_cb_.sh_.free_offset_ =
this->local_cb_.sh_.mapped_size_;
this->local_cb_.sh_.free_size_ = 0;
}
else
{
this->local_cb_.req_base_ = 0;
this->local_cb_.mapped_base_ = 0;
this->local_cb_.sh_.max_size_ =
this->round_to_chunk_size (page_size_) ;
this->local_cb_.sh_.mapped_size_ = 0;
this->local_cb_.sh_.free_offset_ =
this->local_cb_.sh_.mapped_size_;
this->local_cb_.sh_.free_size_ = 0;
}
int update_backing_store_name = backing_store_name == 0 ? 0 : 1;
if (backing_store_name == 0)
// Only create a new unique filename for the backing store file if
// the user didn't supply one...
backing_store_name = ACE_DEFAULT_PAGEFILE_POOL_NAME;
ACE_OS::strsncpy (this->backing_store_name_,
backing_store_name,
(sizeof this->backing_store_name_ / sizeof (ACE_TCHAR)));
if (update_backing_store_name
&& ACE_OS::strlen (this->backing_store_name_) < sizeof this->backing_store_name_)
ACE_OS::strcat (this->backing_store_name_,
ACE_TEXT ("_"));
}
ACE_Pagefile_Memory_Pool::~ACE_Pagefile_Memory_Pool (void)
{
}
void *
ACE_Pagefile_Memory_Pool::acquire (size_t nbytes,
size_t &rounded_bytes)
{
rounded_bytes = round_to_page_size (nbytes);
void *result = 0;
int first_time = 0;
// Check local_cb_ for consistency. Remap, if extra space is too
// small and/or we didn't map the whole shared memory section
if (this->shared_cb_->sh_.mapped_size_
> this->local_cb_.sh_.mapped_size_
|| this->shared_cb_->sh_.free_size_ < rounded_bytes)
{
size_t append = 0;
if (rounded_bytes > this->shared_cb_->sh_.free_size_)
append = rounded_bytes - this->shared_cb_->sh_.free_size_;
if (this->map (first_time, append) < 0)
return result;
}
// Get the block from extra space and update shared and local
// control block
if (this->shared_cb_->sh_.free_size_ < rounded_bytes)
return result;
result = (void *)((char *) this->local_cb_.mapped_base_
+ this->shared_cb_->sh_.free_offset_);
this->shared_cb_->sh_.free_offset_ += rounded_bytes;
this->shared_cb_->sh_.free_size_ -= rounded_bytes;
this->local_cb_.sh_ = this->shared_cb_->sh_;
return result;
}
void *
ACE_Pagefile_Memory_Pool::init_acquire (size_t nbytes,
size_t &rounded_bytes,
int &first_time)
{
// Map the shared memory and get information, if we created the
// shared memory.
if (this->map (first_time) < 0)
return 0;
if (first_time != 0)
// We created the shared memory. So we have to allocate the
// requested memory.
return this->acquire (nbytes, rounded_bytes);
else
// We just mapped the memory and return the base address
return (void *)((char *) this->local_cb_.mapped_base_
+ ACE_Pagefile_Memory_Pool::round_to_page_size
((int) sizeof (Control_Block)));
}
int
ACE_Pagefile_Memory_Pool::seh_selector (void *ep)
{
DWORD ecode = ((EXCEPTION_POINTERS *) ep)->ExceptionRecord->ExceptionCode;
if (ecode == EXCEPTION_ACCESS_VIOLATION)
{
void * fault_addr = (void *)
((EXCEPTION_POINTERS *) ep)->ExceptionRecord->ExceptionInformation[1];
if (this->remap (fault_addr) == 0)
return 1;
}
return 0;
}
int
ACE_Pagefile_Memory_Pool::remap (void *addr)
{
// If the shared memory is not mapped or the address, that caused
// the memory fault is outside of the commited range of chunks, we
// return.
if (this->shared_cb_ == 0
|| addr < this->local_cb_.mapped_base_
|| addr >= (void *)((char *) this->local_cb_.mapped_base_
+ this->shared_cb_->sh_.mapped_size_))
return -1;
// We can solve the problem by committing additional chunks.
int first_time = 0;
return this->map (first_time);
}
int
ACE_Pagefile_Memory_Pool::unmap (void)
{
#if (ACE_HAS_POSITION_INDEPENDENT_POINTERS == 1)
ACE_BASED_POINTER_REPOSITORY::instance ()->unbind
(this->local_cb_.mapped_base_);
#endif /* ACE_HAS_POSITION_INDEPENDENT_POINTERS == 1 */
// Cleanup cached pool pointer.
this->shared_cb_ = 0;
if (this->local_cb_.sh_.mapped_size_ > 0)
::UnmapViewOfFile (this->local_cb_.mapped_base_);
// Reset local pool statistics.
this->local_cb_.req_base_ =
ACE_DEFAULT_PAGEFILE_POOL_BASE;
this->local_cb_.mapped_base_ = 0;
this->local_cb_.sh_.max_size_ =
ACE_DEFAULT_PAGEFILE_POOL_SIZE;
this->local_cb_.sh_.mapped_size_ = 0;
this->local_cb_.sh_.free_offset_ =
this->local_cb_.sh_.mapped_size_;
this->local_cb_.sh_.free_size_ = 0;
// Release the pool
if (this->object_handle_ != 0)
{
::CloseHandle (this->object_handle_);
this->object_handle_ = 0;
}
return 0;
}
int
ACE_Pagefile_Memory_Pool::map (int &first_time,
size_t append_bytes)
{
size_t map_size;
void *map_addr;
// Create file mapping, if not yet done
if (object_handle_ == 0)
{
#if !defined (ACE_LACKS_WIN32_SECURITY_DESCRIPTORS)
// Allow access by all users.
SECURITY_ATTRIBUTES sa;
SECURITY_DESCRIPTOR sd;
::InitializeSecurityDescriptor (&sd,
SECURITY_DESCRIPTOR_REVISION);
::SetSecurityDescriptorDacl (&sd,
TRUE,
0,
FALSE);
sa.nLength = sizeof (SECURITY_ATTRIBUTES);
sa.lpSecurityDescriptor = &sd;
sa.bInheritHandle = FALSE;
#endif /* ACE_LACKS_WIN32_SECURITY_DESCRIPTORS */
// Get an object handle to the named reserved memory object.
DWORD size_high;
DWORD size_low;
#if defined (ACE_WIN64)
size_high = static_cast<DWORD> (this->local_cb_.sh_.max_size_ >> 32);
size_low = static_cast<DWORD> (this->local_cb_.sh_.max_size_ & 0xFFFFFFFF);
#else
size_high = 0;
size_low = ACE_Utils::truncate_cast<DWORD> (this->local_cb_.sh_.max_size_);
#endif
object_handle_ =
ACE_TEXT_CreateFileMapping (INVALID_HANDLE_VALUE,
#if !defined (ACE_LACKS_WIN32_SECURITY_DESCRIPTORS)
&sa,
#else
0,
#endif /* !ACE_LACKS_WIN32_SECURITY_DESCRIPTORS */
PAGE_READWRITE | SEC_RESERVE,
size_high,
size_low,
this->backing_store_name_);
if (object_handle_ == 0)
return -1;
first_time =
::GetLastError () == ERROR_ALREADY_EXISTS
? 0
: 1;
}
// Do the initial mapping.
if (this->shared_cb_ == 0)
{
// Map a view to the shared memory. Note: <MapViewOfFile[Ex]>
// does *not* commit the pages!
this->shared_cb_ = (ACE_Pagefile_Memory_Pool::Control_Block *)
ACE_MAP_FILE (this->object_handle_,
FILE_MAP_WRITE,
0,
0,
this->local_cb_.sh_.max_size_,
this->local_cb_.req_base_);
if (this->shared_cb_ == 0)
return -1;
// There was no previous mapping, so we map the first chunk and
// initialize the shared pool statistics.
if (first_time)
{
// 1st block is used to keep shared memory statistics.
map_size =
ACE_Pagefile_Memory_Pool::round_to_chunk_size
(ACE_Pagefile_Memory_Pool::round_to_page_size
((int) sizeof(Control_Block))
+ append_bytes);
if (::VirtualAlloc ((void *) this->shared_cb_,
map_size,
MEM_COMMIT,
PAGE_READWRITE) == 0)
return -1;
this->shared_cb_->req_base_ = 0;
this->shared_cb_->mapped_base_ = 0;
this->local_cb_.mapped_base_ = this->shared_cb_;
this->local_cb_.sh_.mapped_size_ = map_size;
this->local_cb_.sh_.free_offset_ =
round_to_page_size ((int) sizeof (Control_Block));
this->local_cb_.sh_.free_size_ =
this->local_cb_.sh_.mapped_size_ -
this->local_cb_.sh_.free_offset_;
this->shared_cb_->sh_ = this->local_cb_.sh_;
}
// The shared memory exists, so we map the first chunk to the
// base address of the pool to get the shared pool statistics.
else
{
// 1st block is used to keep shared memory statistics.
map_size =
ACE_Pagefile_Memory_Pool::round_to_chunk_size
((int) sizeof (Control_Block));
if (::VirtualAlloc ((void *) this->shared_cb_,
map_size,
MEM_COMMIT,
PAGE_READWRITE) == 0)
return -1;
this->local_cb_.mapped_base_ = this->shared_cb_;
this->local_cb_.sh_.mapped_size_ = map_size;
}
}
// If the shared memory is larger than the part we've already
// committed, we have to remap it.
if (this->shared_cb_->sh_.mapped_size_ >
this->local_cb_.sh_.mapped_size_
|| append_bytes > 0)
{
map_size =
(this->shared_cb_->sh_.mapped_size_ -
this->local_cb_.sh_.mapped_size_)
+ ACE_Pagefile_Memory_Pool::round_to_chunk_size
(append_bytes);
map_addr = (void *)((char *) this->shared_cb_ +
this->local_cb_.sh_.mapped_size_);
if (::VirtualAlloc (map_addr,
map_size,
MEM_COMMIT,
PAGE_READWRITE) == 0)
return -1;
else if (append_bytes > 0)
{
this->shared_cb_->sh_.mapped_size_ +=
round_to_chunk_size (append_bytes);
this->shared_cb_->sh_.free_size_ =
this->shared_cb_->sh_.mapped_size_ -
this->shared_cb_->sh_.free_offset_;
}
}
// Update local copy of the shared memory statistics.
this->local_cb_.sh_ =
this->shared_cb_->sh_;
#if (ACE_HAS_POSITION_INDEPENDENT_POINTERS == 1)
ACE_BASED_POINTER_REPOSITORY::instance ()->bind
(this->local_cb_.mapped_base_,
this->local_cb_.sh_.mapped_size_);
#endif /* ACE_HAS_POSITION_INDEPENDENT_POINTERS == 1 */
return 0;
}
ACE_END_VERSIONED_NAMESPACE_DECL
#endif /* ACE_WIN32 && !ACE_HAS_PHARLAP */
| gpl-2.0 |
CyanogenMod/htc-kernel-msm7x30 | net/atm/addr.c | 791 | 3912 | /* net/atm/addr.c - Local ATM address registry */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#include <linux/atm.h>
#include <linux/atmdev.h>
#include <asm/uaccess.h>
#include "signaling.h"
#include "addr.h"
static int check_addr(const struct sockaddr_atmsvc *addr)
{
int i;
if (addr->sas_family != AF_ATMSVC)
return -EAFNOSUPPORT;
if (!*addr->sas_addr.pub)
return *addr->sas_addr.prv ? 0 : -EINVAL;
for (i = 1; i < ATM_E164_LEN + 1; i++) /* make sure it's \0-terminated */
if (!addr->sas_addr.pub[i])
return 0;
return -EINVAL;
}
static int identical(const struct sockaddr_atmsvc *a, const struct sockaddr_atmsvc *b)
{
if (*a->sas_addr.prv)
if (memcmp(a->sas_addr.prv, b->sas_addr.prv, ATM_ESA_LEN))
return 0;
if (!*a->sas_addr.pub)
return !*b->sas_addr.pub;
if (!*b->sas_addr.pub)
return 0;
return !strcmp(a->sas_addr.pub, b->sas_addr.pub);
}
static void notify_sigd(const struct atm_dev *dev)
{
struct sockaddr_atmpvc pvc;
pvc.sap_addr.itf = dev->number;
sigd_enq(NULL, as_itf_notify, NULL, &pvc, NULL);
}
void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this, *p;
struct list_head *head;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry_safe(this, p, head, entry) {
list_del(&this->entry);
kfree(this);
}
spin_unlock_irqrestore(&dev->lock, flags);
if (head == &dev->local)
notify_sigd(dev);
}
int atm_add_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int error;
error = check_addr(addr);
if (error)
return error;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry) {
if (identical(&this->addr, addr)) {
spin_unlock_irqrestore(&dev->lock, flags);
return -EEXIST;
}
}
this = kmalloc(sizeof(struct atm_dev_addr), GFP_ATOMIC);
if (!this) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOMEM;
}
this->addr = *addr;
list_add(&this->entry, head);
spin_unlock_irqrestore(&dev->lock, flags);
if (head == &dev->local)
notify_sigd(dev);
return 0;
}
int atm_del_addr(struct atm_dev *dev, const struct sockaddr_atmsvc *addr,
enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int error;
error = check_addr(addr);
if (error)
return error;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry) {
if (identical(&this->addr, addr)) {
list_del(&this->entry);
spin_unlock_irqrestore(&dev->lock, flags);
kfree(this);
if (head == &dev->local)
notify_sigd(dev);
return 0;
}
}
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOENT;
}
int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user * buf,
size_t size, enum atm_addr_type_t atype)
{
unsigned long flags;
struct atm_dev_addr *this;
struct list_head *head;
int total = 0, error;
struct sockaddr_atmsvc *tmp_buf, *tmp_bufp;
spin_lock_irqsave(&dev->lock, flags);
if (atype == ATM_ADDR_LECS)
head = &dev->lecs;
else
head = &dev->local;
list_for_each_entry(this, head, entry)
total += sizeof(struct sockaddr_atmsvc);
tmp_buf = tmp_bufp = kmalloc(total, GFP_ATOMIC);
if (!tmp_buf) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ENOMEM;
}
list_for_each_entry(this, head, entry)
memcpy(tmp_bufp++, &this->addr, sizeof(struct sockaddr_atmsvc));
spin_unlock_irqrestore(&dev->lock, flags);
error = total > size ? -E2BIG : total;
if (copy_to_user(buf, tmp_buf, total < size ? total : size))
error = -EFAULT;
kfree(tmp_buf);
return error;
}
| gpl-2.0 |
Nick73/King_Kernel | fs/ocfs2/refcounttree.c | 1559 | 116185 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* refcounttree.c
*
* Copyright (C) 2009 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/sort.h>
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "inode.h"
#include "alloc.h"
#include "suballoc.h"
#include "journal.h"
#include "uptodate.h"
#include "super.h"
#include "buffer_head_io.h"
#include "blockcheck.h"
#include "refcounttree.h"
#include "sysfile.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "aops.h"
#include "xattr.h"
#include "namei.h"
#include "ocfs2_trace.h"
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/fsnotify.h>
#include <linux/quotaops.h>
#include <linux/namei.h>
#include <linux/mount.h>
struct ocfs2_cow_context {
struct inode *inode;
struct file *file;
u32 cow_start;
u32 cow_len;
struct ocfs2_extent_tree data_et;
struct ocfs2_refcount_tree *ref_tree;
struct buffer_head *ref_root_bh;
struct ocfs2_alloc_context *meta_ac;
struct ocfs2_alloc_context *data_ac;
struct ocfs2_cached_dealloc_ctxt dealloc;
void *cow_object;
struct ocfs2_post_refcount *post_refcount;
int extra_credits;
int (*get_clusters)(struct ocfs2_cow_context *context,
u32 v_cluster, u32 *p_cluster,
u32 *num_clusters,
unsigned int *extent_flags);
int (*cow_duplicate_clusters)(handle_t *handle,
struct file *file,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
};
static inline struct ocfs2_refcount_tree *
cache_info_to_refcount(struct ocfs2_caching_info *ci)
{
return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
}
static int ocfs2_validate_refcount_block(struct super_block *sb,
struct buffer_head *bh)
{
int rc;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)bh->b_data;
trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
if (rc) {
mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
(unsigned long long)bh->b_blocknr);
return rc;
}
if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
ocfs2_error(sb,
"Refcount block #%llu has bad signature %.*s",
(unsigned long long)bh->b_blocknr, 7,
rb->rf_signature);
return -EINVAL;
}
if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
ocfs2_error(sb,
"Refcount block #%llu has an invalid rf_blkno "
"of %llu",
(unsigned long long)bh->b_blocknr,
(unsigned long long)le64_to_cpu(rb->rf_blkno));
return -EINVAL;
}
if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
ocfs2_error(sb,
"Refcount block #%llu has an invalid "
"rf_fs_generation of #%u",
(unsigned long long)bh->b_blocknr,
le32_to_cpu(rb->rf_fs_generation));
return -EINVAL;
}
return 0;
}
static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
u64 rb_blkno,
struct buffer_head **bh)
{
int rc;
struct buffer_head *tmp = *bh;
rc = ocfs2_read_block(ci, rb_blkno, &tmp,
ocfs2_validate_refcount_block);
/* If ocfs2_read_block() got us a new bh, pass it up. */
if (!rc && !*bh)
*bh = tmp;
return rc;
}
static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
return rf->rf_blkno;
}
static struct super_block *
ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
return rf->rf_sb;
}
static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
spin_lock(&rf->rf_lock);
}
static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
spin_unlock(&rf->rf_lock);
}
static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
mutex_lock(&rf->rf_io_mutex);
}
static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
{
struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
mutex_unlock(&rf->rf_io_mutex);
}
static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
.co_owner = ocfs2_refcount_cache_owner,
.co_get_super = ocfs2_refcount_cache_get_super,
.co_cache_lock = ocfs2_refcount_cache_lock,
.co_cache_unlock = ocfs2_refcount_cache_unlock,
.co_io_lock = ocfs2_refcount_cache_io_lock,
.co_io_unlock = ocfs2_refcount_cache_io_unlock,
};
static struct ocfs2_refcount_tree *
ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
{
struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
struct ocfs2_refcount_tree *tree = NULL;
while (n) {
tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
if (blkno < tree->rf_blkno)
n = n->rb_left;
else if (blkno > tree->rf_blkno)
n = n->rb_right;
else
return tree;
}
return NULL;
}
/* osb_lock is already locked. */
static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *new)
{
u64 rf_blkno = new->rf_blkno;
struct rb_node *parent = NULL;
struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
struct ocfs2_refcount_tree *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct ocfs2_refcount_tree,
rf_node);
if (rf_blkno < tmp->rf_blkno)
p = &(*p)->rb_left;
else if (rf_blkno > tmp->rf_blkno)
p = &(*p)->rb_right;
else {
/* This should never happen! */
mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
(unsigned long long)rf_blkno);
BUG();
}
}
rb_link_node(&new->rf_node, parent, p);
rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
}
static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
{
ocfs2_metadata_cache_exit(&tree->rf_ci);
ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
ocfs2_lock_res_free(&tree->rf_lockres);
kfree(tree);
}
static inline void
ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree)
{
rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
osb->osb_ref_tree_lru = NULL;
}
static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree)
{
spin_lock(&osb->osb_lock);
ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
spin_unlock(&osb->osb_lock);
}
static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
{
struct ocfs2_refcount_tree *tree =
container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
ocfs2_free_refcount_tree(tree);
}
static inline void
ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
{
kref_get(&tree->rf_getcnt);
}
static inline void
ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
{
kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
}
static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
struct super_block *sb)
{
ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
mutex_init(&new->rf_io_mutex);
new->rf_sb = sb;
spin_lock_init(&new->rf_lock);
}
static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *new,
u64 rf_blkno, u32 generation)
{
init_rwsem(&new->rf_sem);
ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
rf_blkno, generation);
}
static struct ocfs2_refcount_tree*
ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
{
struct ocfs2_refcount_tree *new;
new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
if (!new)
return NULL;
new->rf_blkno = rf_blkno;
kref_init(&new->rf_getcnt);
ocfs2_init_refcount_tree_ci(new, osb->sb);
return new;
}
static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
struct ocfs2_refcount_tree **ret_tree)
{
int ret = 0;
struct ocfs2_refcount_tree *tree, *new = NULL;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_block *ref_rb;
spin_lock(&osb->osb_lock);
if (osb->osb_ref_tree_lru &&
osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
tree = osb->osb_ref_tree_lru;
else
tree = ocfs2_find_refcount_tree(osb, rf_blkno);
if (tree)
goto out;
spin_unlock(&osb->osb_lock);
new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
if (!new) {
ret = -ENOMEM;
mlog_errno(ret);
return ret;
}
/*
* We need the generation to create the refcount tree lock and since
* it isn't changed during the tree modification, we are safe here to
* read without protection.
* We also have to purge the cache after we create the lock since the
* refcount block may have the stale data. It can only be trusted when
* we hold the refcount lock.
*/
ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
if (ret) {
mlog_errno(ret);
ocfs2_metadata_cache_exit(&new->rf_ci);
kfree(new);
return ret;
}
ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
new->rf_generation);
ocfs2_metadata_cache_purge(&new->rf_ci);
spin_lock(&osb->osb_lock);
tree = ocfs2_find_refcount_tree(osb, rf_blkno);
if (tree)
goto out;
ocfs2_insert_refcount_tree(osb, new);
tree = new;
new = NULL;
out:
*ret_tree = tree;
osb->osb_ref_tree_lru = tree;
spin_unlock(&osb->osb_lock);
if (new)
ocfs2_free_refcount_tree(new);
brelse(ref_root_bh);
return ret;
}
static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
{
int ret;
struct buffer_head *di_bh = NULL;
struct ocfs2_dinode *di;
ret = ocfs2_read_inode_block(inode, &di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
di = (struct ocfs2_dinode *)di_bh->b_data;
*ref_blkno = le64_to_cpu(di->i_refcount_loc);
brelse(di_bh);
out:
return ret;
}
static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree, int rw)
{
int ret;
ret = ocfs2_refcount_lock(tree, rw);
if (ret) {
mlog_errno(ret);
goto out;
}
if (rw)
down_write(&tree->rf_sem);
else
down_read(&tree->rf_sem);
out:
return ret;
}
/*
* Lock the refcount tree pointed by ref_blkno and return the tree.
* In most case, we lock the tree and read the refcount block.
* So read it here if the caller really needs it.
*
* If the tree has been re-created by other node, it will free the
* old one and re-create it.
*/
int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
u64 ref_blkno, int rw,
struct ocfs2_refcount_tree **ret_tree,
struct buffer_head **ref_bh)
{
int ret, delete_tree = 0;
struct ocfs2_refcount_tree *tree = NULL;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_block *rb;
again:
ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
if (ret) {
mlog_errno(ret);
return ret;
}
ocfs2_refcount_tree_get(tree);
ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
if (ret) {
mlog_errno(ret);
ocfs2_refcount_tree_put(tree);
goto out;
}
ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
&ref_root_bh);
if (ret) {
mlog_errno(ret);
ocfs2_unlock_refcount_tree(osb, tree, rw);
ocfs2_refcount_tree_put(tree);
goto out;
}
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
/*
* If the refcount block has been freed and re-created, we may need
* to recreate the refcount tree also.
*
* Here we just remove the tree from the rb-tree, and the last
* kref holder will unlock and delete this refcount_tree.
* Then we goto "again" and ocfs2_get_refcount_tree will create
* the new refcount tree for us.
*/
if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
if (!tree->rf_removed) {
ocfs2_erase_refcount_tree_from_list(osb, tree);
tree->rf_removed = 1;
delete_tree = 1;
}
ocfs2_unlock_refcount_tree(osb, tree, rw);
/*
* We get an extra reference when we create the refcount
* tree, so another put will destroy it.
*/
if (delete_tree)
ocfs2_refcount_tree_put(tree);
brelse(ref_root_bh);
ref_root_bh = NULL;
goto again;
}
*ret_tree = tree;
if (ref_bh) {
*ref_bh = ref_root_bh;
ref_root_bh = NULL;
}
out:
brelse(ref_root_bh);
return ret;
}
void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
struct ocfs2_refcount_tree *tree, int rw)
{
if (rw)
up_write(&tree->rf_sem);
else
up_read(&tree->rf_sem);
ocfs2_refcount_unlock(tree, rw);
ocfs2_refcount_tree_put(tree);
}
void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
{
struct rb_node *node;
struct ocfs2_refcount_tree *tree;
struct rb_root *root = &osb->osb_rf_lock_tree;
while ((node = rb_last(root)) != NULL) {
tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
trace_ocfs2_purge_refcount_trees(
(unsigned long long) tree->rf_blkno);
rb_erase(&tree->rf_node, root);
ocfs2_free_refcount_tree(tree);
}
}
/*
* Create a refcount tree for an inode.
* We take for granted that the inode is already locked.
*/
static int ocfs2_create_refcount_tree(struct inode *inode,
struct buffer_head *di_bh)
{
int ret;
handle_t *handle = NULL;
struct ocfs2_alloc_context *meta_ac = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *new_bh = NULL;
struct ocfs2_refcount_block *rb;
struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
u16 suballoc_bit_start;
u32 num_got;
u64 suballoc_loc, first_blkno;
BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
trace_ocfs2_create_refcount_tree(
(unsigned long long)OCFS2_I(inode)->ip_blkno);
ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&first_blkno);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
if (!new_tree) {
ret = -ENOMEM;
mlog_errno(ret);
goto out_commit;
}
new_bh = sb_getblk(inode->i_sb, first_blkno);
ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
/* Initialize ocfs2_refcount_block. */
rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(rb, 0, inode->i_sb->s_blocksize);
strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
rb->rf_blkno = cpu_to_le64(first_blkno);
rb->rf_count = cpu_to_le32(1);
rb->rf_records.rl_count =
cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
spin_lock(&osb->osb_lock);
rb->rf_generation = osb->s_next_generation++;
spin_unlock(&osb->osb_lock);
ocfs2_journal_dirty(handle, new_bh);
spin_lock(&oi->ip_lock);
oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
di->i_refcount_loc = cpu_to_le64(first_blkno);
spin_unlock(&oi->ip_lock);
trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
ocfs2_journal_dirty(handle, di_bh);
/*
* We have to init the tree lock here since it will use
* the generation number to create it.
*/
new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
new_tree->rf_generation);
spin_lock(&osb->osb_lock);
tree = ocfs2_find_refcount_tree(osb, first_blkno);
/*
* We've just created a new refcount tree in this block. If
* we found a refcount tree on the ocfs2_super, it must be
* one we just deleted. We free the old tree before
* inserting the new tree.
*/
BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
if (tree)
ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
ocfs2_insert_refcount_tree(osb, new_tree);
spin_unlock(&osb->osb_lock);
new_tree = NULL;
if (tree)
ocfs2_refcount_tree_put(tree);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (new_tree) {
ocfs2_metadata_cache_exit(&new_tree->rf_ci);
kfree(new_tree);
}
brelse(new_bh);
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
return ret;
}
static int ocfs2_set_refcount_tree(struct inode *inode,
struct buffer_head *di_bh,
u64 refcount_loc)
{
int ret;
handle_t *handle = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_block *rb;
struct ocfs2_refcount_tree *ref_tree;
BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
&ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
return ret;
}
handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
le32_add_cpu(&rb->rf_count, 1);
ocfs2_journal_dirty(handle, ref_root_bh);
spin_lock(&oi->ip_lock);
oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
di->i_refcount_loc = cpu_to_le64(refcount_loc);
spin_unlock(&oi->ip_lock);
ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
return ret;
}
int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
{
int ret, delete_tree = 0;
handle_t *handle = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_refcount_block *rb;
struct inode *alloc_inode = NULL;
struct buffer_head *alloc_bh = NULL;
struct buffer_head *blk_bh = NULL;
struct ocfs2_refcount_tree *ref_tree;
int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
u16 bit = 0;
if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
return 0;
BUG_ON(!ref_blkno);
ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
if (ret) {
mlog_errno(ret);
return ret;
}
rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
/*
* If we are the last user, we need to free the block.
* So lock the allocator ahead.
*/
if (le32_to_cpu(rb->rf_count) == 1) {
blk = le64_to_cpu(rb->rf_blkno);
bit = le16_to_cpu(rb->rf_suballoc_bit);
if (rb->rf_suballoc_loc)
bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
else
bg_blkno = ocfs2_which_suballoc_group(blk, bit);
alloc_inode = ocfs2_get_system_file_inode(osb,
EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(rb->rf_suballoc_slot));
if (!alloc_inode) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
mutex_lock(&alloc_inode->i_mutex);
ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
if (ret) {
mlog_errno(ret);
goto out_mutex;
}
credits += OCFS2_SUBALLOC_FREE;
}
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out_unlock;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
spin_lock(&oi->ip_lock);
oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
di->i_refcount_loc = 0;
spin_unlock(&oi->ip_lock);
ocfs2_journal_dirty(handle, di_bh);
le32_add_cpu(&rb->rf_count , -1);
ocfs2_journal_dirty(handle, blk_bh);
if (!rb->rf_count) {
delete_tree = 1;
ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
alloc_bh, bit, bg_blkno, 1);
if (ret)
mlog_errno(ret);
}
out_commit:
ocfs2_commit_trans(osb, handle);
out_unlock:
if (alloc_inode) {
ocfs2_inode_unlock(alloc_inode, 1);
brelse(alloc_bh);
}
out_mutex:
if (alloc_inode) {
mutex_unlock(&alloc_inode->i_mutex);
iput(alloc_inode);
}
out:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
if (delete_tree)
ocfs2_refcount_tree_put(ref_tree);
brelse(blk_bh);
return ret;
}
static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
struct buffer_head *ref_leaf_bh,
u64 cpos, unsigned int len,
struct ocfs2_refcount_rec *ret_rec,
int *index)
{
int i = 0;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_rec *rec = NULL;
for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
rec = &rb->rf_records.rl_recs[i];
if (le64_to_cpu(rec->r_cpos) +
le32_to_cpu(rec->r_clusters) <= cpos)
continue;
else if (le64_to_cpu(rec->r_cpos) > cpos)
break;
/* ok, cpos fail in this rec. Just return. */
if (ret_rec)
*ret_rec = *rec;
goto out;
}
if (ret_rec) {
/* We meet with a hole here, so fake the rec. */
ret_rec->r_cpos = cpu_to_le64(cpos);
ret_rec->r_refcount = 0;
if (i < le16_to_cpu(rb->rf_records.rl_used) &&
le64_to_cpu(rec->r_cpos) < cpos + len)
ret_rec->r_clusters =
cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
else
ret_rec->r_clusters = cpu_to_le32(len);
}
out:
*index = i;
}
/*
* Try to remove refcount tree. The mechanism is:
* 1) Check whether i_clusters == 0, if no, exit.
* 2) check whether we have i_xattr_loc in dinode. if yes, exit.
* 3) Check whether we have inline xattr stored outside, if yes, exit.
* 4) Remove the tree.
*/
int ocfs2_try_remove_refcount_tree(struct inode *inode,
struct buffer_head *di_bh)
{
int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
down_write(&oi->ip_xattr_sem);
down_write(&oi->ip_alloc_sem);
if (oi->ip_clusters)
goto out;
if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
goto out;
if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
ocfs2_has_inline_xattr_value_outside(inode, di))
goto out;
ret = ocfs2_remove_refcount_tree(inode, di_bh);
if (ret)
mlog_errno(ret);
out:
up_write(&oi->ip_alloc_sem);
up_write(&oi->ip_xattr_sem);
return 0;
}
/*
* Find the end range for a leaf refcount block indicated by
* el->l_recs[index].e_blkno.
*/
static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct ocfs2_extent_block *eb,
struct ocfs2_extent_list *el,
int index, u32 *cpos_end)
{
int ret, i, subtree_root;
u32 cpos;
u64 blkno;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_path *left_path = NULL, *right_path = NULL;
struct ocfs2_extent_tree et;
struct ocfs2_extent_list *tmp_el;
if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
/*
* We have a extent rec after index, so just use the e_cpos
* of the next extent rec.
*/
*cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
return 0;
}
if (!eb || (eb && !eb->h_next_leaf_blk)) {
/*
* We are the last extent rec, so any high cpos should
* be stored in this leaf refcount block.
*/
*cpos_end = UINT_MAX;
return 0;
}
/*
* If the extent block isn't the last one, we have to find
* the subtree root between this extent block and the next
* leaf extent block and get the corresponding e_cpos from
* the subroot. Otherwise we may corrupt the b-tree.
*/
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
left_path = ocfs2_new_path_from_et(&et);
if (!left_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
ret = ocfs2_find_path(ci, left_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
right_path = ocfs2_new_path_from_path(left_path);
if (!right_path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(ci, right_path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
subtree_root = ocfs2_find_subtree_root(&et, left_path,
right_path);
tmp_el = left_path->p_node[subtree_root].el;
blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
*cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
break;
}
}
BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
out:
ocfs2_free_path(left_path);
ocfs2_free_path(right_path);
return ret;
}
/*
* Given a cpos and len, try to find the refcount record which contains cpos.
* 1. If cpos can be found in one refcount record, return the record.
* 2. If cpos can't be found, return a fake record which start from cpos
* and end at a small value between cpos+len and start of the next record.
* This fake record has r_refcount = 0.
*/
static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 cpos, unsigned int len,
struct ocfs2_refcount_rec *ret_rec,
int *index,
struct buffer_head **ret_bh)
{
int ret = 0, i, found;
u32 low_cpos, uninitialized_var(cpos_end);
struct ocfs2_extent_list *el;
struct ocfs2_extent_rec *rec = NULL;
struct ocfs2_extent_block *eb = NULL;
struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
ret_rec, index);
*ret_bh = ref_root_bh;
get_bh(ref_root_bh);
return 0;
}
el = &rb->rf_list;
low_cpos = cpos & OCFS2_32BIT_POS_MASK;
if (el->l_tree_depth) {
ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ocfs2_error(sb,
"refcount tree %llu has non zero tree "
"depth in leaf btree tree block %llu\n",
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)eb_bh->b_blocknr);
ret = -EROFS;
goto out;
}
}
found = 0;
for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
rec = &el->l_recs[i];
if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
found = 1;
break;
}
}
if (found) {
ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
eb, el, i, &cpos_end);
if (ret) {
mlog_errno(ret);
goto out;
}
if (cpos_end < low_cpos + len)
len = cpos_end - low_cpos;
}
ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
&ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
ret_rec, index);
*ret_bh = ref_leaf_bh;
out:
brelse(eb_bh);
return ret;
}
enum ocfs2_ref_rec_contig {
REF_CONTIG_NONE = 0,
REF_CONTIG_LEFT,
REF_CONTIG_RIGHT,
REF_CONTIG_LEFTRIGHT,
};
static enum ocfs2_ref_rec_contig
ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
int index)
{
if ((rb->rf_records.rl_recs[index].r_refcount ==
rb->rf_records.rl_recs[index + 1].r_refcount) &&
(le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
return REF_CONTIG_RIGHT;
return REF_CONTIG_NONE;
}
static enum ocfs2_ref_rec_contig
ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
int index)
{
enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
ret = ocfs2_refcount_rec_adjacent(rb, index);
if (index > 0) {
enum ocfs2_ref_rec_contig tmp;
tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
if (tmp == REF_CONTIG_RIGHT) {
if (ret == REF_CONTIG_RIGHT)
ret = REF_CONTIG_LEFTRIGHT;
else
ret = REF_CONTIG_LEFT;
}
}
return ret;
}
static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
int index)
{
BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
rb->rf_records.rl_recs[index+1].r_refcount);
le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
memmove(&rb->rf_records.rl_recs[index + 1],
&rb->rf_records.rl_recs[index + 2],
sizeof(struct ocfs2_refcount_rec) *
(le16_to_cpu(rb->rf_records.rl_used) - index - 2));
memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
0, sizeof(struct ocfs2_refcount_rec));
le16_add_cpu(&rb->rf_records.rl_used, -1);
}
/*
* Merge the refcount rec if we are contiguous with the adjacent recs.
*/
static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
int index)
{
enum ocfs2_ref_rec_contig contig =
ocfs2_refcount_rec_contig(rb, index);
if (contig == REF_CONTIG_NONE)
return;
if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
BUG_ON(index == 0);
index--;
}
ocfs2_rotate_refcount_rec_left(rb, index);
if (contig == REF_CONTIG_LEFTRIGHT)
ocfs2_rotate_refcount_rec_left(rb, index);
}
/*
* Change the refcount indexed by "index" in ref_bh.
* If refcount reaches 0, remove it.
*/
static int ocfs2_change_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_leaf_bh,
int index, int merge, int change)
{
int ret;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_list *rl = &rb->rf_records;
struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_change_refcount_rec(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
index, le32_to_cpu(rec->r_refcount), change);
le32_add_cpu(&rec->r_refcount, change);
if (!rec->r_refcount) {
if (index != le16_to_cpu(rl->rl_used) - 1) {
memmove(rec, rec + 1,
(le16_to_cpu(rl->rl_used) - index - 1) *
sizeof(struct ocfs2_refcount_rec));
memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
0, sizeof(struct ocfs2_refcount_rec));
}
le16_add_cpu(&rl->rl_used, -1);
} else if (merge)
ocfs2_refcount_rec_merge(rb, index);
ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
return ret;
}
static int ocfs2_expand_inline_ref_root(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head **ref_leaf_bh,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
u16 suballoc_bit_start;
u32 num_got;
u64 suballoc_loc, blkno;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct buffer_head *new_bh = NULL;
struct ocfs2_refcount_block *new_rb;
struct ocfs2_refcount_block *root_rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
new_bh = sb_getblk(sb, blkno);
if (new_bh == NULL) {
ret = -EIO;
mlog_errno(ret);
goto out;
}
ocfs2_set_new_buffer_uptodate(ci, new_bh);
ret = ocfs2_journal_access_rb(handle, ci, new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* Initialize ocfs2_refcount_block.
* It should contain the same information as the old root.
* so just memcpy it and change the corresponding field.
*/
memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_blkno = cpu_to_le64(blkno);
new_rb->rf_cpos = cpu_to_le32(0);
new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
ocfs2_journal_dirty(handle, new_bh);
/* Now change the root. */
memset(&root_rb->rf_list, 0, sb->s_blocksize -
offsetof(struct ocfs2_refcount_block, rf_list));
root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
root_rb->rf_clusters = cpu_to_le32(1);
root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
ocfs2_journal_dirty(handle, ref_root_bh);
trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
le16_to_cpu(new_rb->rf_records.rl_used));
*ref_leaf_bh = new_bh;
new_bh = NULL;
out:
brelse(new_bh);
return ret;
}
static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
struct ocfs2_refcount_rec *next)
{
if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
ocfs2_get_ref_rec_low_cpos(next))
return 1;
return 0;
}
static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
{
const struct ocfs2_refcount_rec *l = a, *r = b;
u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
if (l_cpos > r_cpos)
return 1;
if (l_cpos < r_cpos)
return -1;
return 0;
}
static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
{
const struct ocfs2_refcount_rec *l = a, *r = b;
u64 l_cpos = le64_to_cpu(l->r_cpos);
u64 r_cpos = le64_to_cpu(r->r_cpos);
if (l_cpos > r_cpos)
return 1;
if (l_cpos < r_cpos)
return -1;
return 0;
}
static void swap_refcount_rec(void *a, void *b, int size)
{
struct ocfs2_refcount_rec *l = a, *r = b, tmp;
tmp = *(struct ocfs2_refcount_rec *)l;
*(struct ocfs2_refcount_rec *)l =
*(struct ocfs2_refcount_rec *)r;
*(struct ocfs2_refcount_rec *)r = tmp;
}
/*
* The refcount cpos are ordered by their 64bit cpos,
* But we will use the low 32 bit to be the e_cpos in the b-tree.
* So we need to make sure that this pos isn't intersected with others.
*
* Note: The refcount block is already sorted by their low 32 bit cpos,
* So just try the middle pos first, and we will exit when we find
* the good position.
*/
static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
u32 *split_pos, int *split_index)
{
int num_used = le16_to_cpu(rl->rl_used);
int delta, middle = num_used / 2;
for (delta = 0; delta < middle; delta++) {
/* Let's check delta earlier than middle */
if (ocfs2_refcount_rec_no_intersect(
&rl->rl_recs[middle - delta - 1],
&rl->rl_recs[middle - delta])) {
*split_index = middle - delta;
break;
}
/* For even counts, don't walk off the end */
if ((middle + delta + 1) == num_used)
continue;
/* Now try delta past middle */
if (ocfs2_refcount_rec_no_intersect(
&rl->rl_recs[middle + delta],
&rl->rl_recs[middle + delta + 1])) {
*split_index = middle + delta + 1;
break;
}
}
if (delta >= middle)
return -ENOSPC;
*split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
return 0;
}
static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
struct buffer_head *new_bh,
u32 *split_cpos)
{
int split_index = 0, num_moved, ret;
u32 cpos = 0;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_list *rl = &rb->rf_records;
struct ocfs2_refcount_block *new_rb =
(struct ocfs2_refcount_block *)new_bh->b_data;
struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
trace_ocfs2_divide_leaf_refcount_block(
(unsigned long long)ref_leaf_bh->b_blocknr,
le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
/*
* XXX: Improvement later.
* If we know all the high 32 bit cpos is the same, no need to sort.
*
* In order to make the whole process safe, we do:
* 1. sort the entries by their low 32 bit cpos first so that we can
* find the split cpos easily.
* 2. call ocfs2_insert_extent to insert the new refcount block.
* 3. move the refcount rec to the new block.
* 4. sort the entries by their 64 bit cpos.
* 5. dirty the new_rb and rb.
*/
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
if (ret) {
mlog_errno(ret);
return ret;
}
new_rb->rf_cpos = cpu_to_le32(cpos);
/* move refcount records starting from split_index to the new block. */
num_moved = le16_to_cpu(rl->rl_used) - split_index;
memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
num_moved * sizeof(struct ocfs2_refcount_rec));
/*ok, remove the entries we just moved over to the other block. */
memset(&rl->rl_recs[split_index], 0,
num_moved * sizeof(struct ocfs2_refcount_rec));
/* change old and new rl_used accordingly. */
le16_add_cpu(&rl->rl_used, -num_moved);
new_rl->rl_used = cpu_to_le16(num_moved);
sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
cmp_refcount_rec_by_cpos, swap_refcount_rec);
sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
sizeof(struct ocfs2_refcount_rec),
cmp_refcount_rec_by_cpos, swap_refcount_rec);
*split_cpos = cpos;
return 0;
}
static int ocfs2_new_leaf_refcount_block(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
u16 suballoc_bit_start;
u32 num_got, new_cpos;
u64 suballoc_loc, blkno;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *root_rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
struct buffer_head *new_bh = NULL;
struct ocfs2_refcount_block *new_rb;
struct ocfs2_extent_tree ref_et;
BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
&suballoc_bit_start, &num_got,
&blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
new_bh = sb_getblk(sb, blkno);
if (new_bh == NULL) {
ret = -EIO;
mlog_errno(ret);
goto out;
}
ocfs2_set_new_buffer_uptodate(ci, new_bh);
ret = ocfs2_journal_access_rb(handle, ci, new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
goto out;
}
/* Initialize ocfs2_refcount_block. */
new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
memset(new_rb, 0, sb->s_blocksize);
strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
new_rb->rf_blkno = cpu_to_le64(blkno);
new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
new_rb->rf_records.rl_count =
cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
new_rb->rf_generation = root_rb->rf_generation;
ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_journal_dirty(handle, ref_leaf_bh);
ocfs2_journal_dirty(handle, new_bh);
ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
trace_ocfs2_new_leaf_refcount_block(
(unsigned long long)new_bh->b_blocknr, new_cpos);
/* Insert the new leaf block with the specific offset cpos. */
ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
1, 0, meta_ac);
if (ret)
mlog_errno(ret);
out:
brelse(new_bh);
return ret;
}
static int ocfs2_expand_refcount_tree(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
struct buffer_head *expand_bh = NULL;
if (ref_root_bh == ref_leaf_bh) {
/*
* the old root bh hasn't been expanded to a b-tree,
* so expand it first.
*/
ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
&expand_bh, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
} else {
expand_bh = ref_leaf_bh;
get_bh(expand_bh);
}
/* Now add a new refcount block into the tree.*/
ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
expand_bh, meta_ac);
if (ret)
mlog_errno(ret);
out:
brelse(expand_bh);
return ret;
}
/*
* Adjust the extent rec in b-tree representing ref_leaf_bh.
*
* Only called when we have inserted a new refcount rec at index 0
* which means ocfs2_extent_rec.e_cpos may need some change.
*/
static int ocfs2_adjust_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_refcount_rec *rec)
{
int ret = 0, i;
u32 new_cpos, old_cpos;
struct ocfs2_path *path = NULL;
struct ocfs2_extent_tree et;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_root_bh->b_data;
struct ocfs2_extent_list *el;
if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
goto out;
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
old_cpos = le32_to_cpu(rb->rf_cpos);
new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
if (old_cpos <= new_cpos)
goto out;
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
path = ocfs2_new_path_from_et(&et);
if (!path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(ci, path, old_cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* 2 more credits, one for the leaf refcount block, one for
* the extent block contains the extent rec.
*/
ret = ocfs2_extend_trans(handle, 2);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
/* change the leaf extent block first. */
el = path_leaf_el(path);
for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
break;
BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
/* change the r_cpos in the leaf block. */
rb->rf_cpos = cpu_to_le32(new_cpos);
ocfs2_journal_dirty(handle, path_leaf_bh(path));
ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
ocfs2_free_path(path);
return ret;
}
static int ocfs2_insert_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_refcount_rec *rec,
int index, int merge,
struct ocfs2_alloc_context *meta_ac)
{
int ret;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_list *rf_list = &rb->rf_records;
struct buffer_head *new_bh = NULL;
BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
if (rf_list->rl_used == rf_list->rl_count) {
u64 cpos = le64_to_cpu(rec->r_cpos);
u32 len = le32_to_cpu(rec->r_clusters);
ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
ref_leaf_bh, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, len, NULL, &index,
&new_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ref_leaf_bh = new_bh;
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
rf_list = &rb->rf_records;
}
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
if (index < le16_to_cpu(rf_list->rl_used))
memmove(&rf_list->rl_recs[index + 1],
&rf_list->rl_recs[index],
(le16_to_cpu(rf_list->rl_used) - index) *
sizeof(struct ocfs2_refcount_rec));
trace_ocfs2_insert_refcount_rec(
(unsigned long long)ref_leaf_bh->b_blocknr, index,
(unsigned long long)le64_to_cpu(rec->r_cpos),
le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
rf_list->rl_recs[index] = *rec;
le16_add_cpu(&rf_list->rl_used, 1);
if (merge)
ocfs2_refcount_rec_merge(rb, index);
ocfs2_journal_dirty(handle, ref_leaf_bh);
if (index == 0) {
ret = ocfs2_adjust_refcount_rec(handle, ci,
ref_root_bh,
ref_leaf_bh, rec);
if (ret)
mlog_errno(ret);
}
out:
brelse(new_bh);
return ret;
}
/*
* Split the refcount_rec indexed by "index" in ref_leaf_bh.
* This is much simple than our b-tree code.
* split_rec is the new refcount rec we want to insert.
* If split_rec->r_refcount > 0, we are changing the refcount(in case we
* increase refcount or decrease a refcount to non-zero).
* If split_rec->r_refcount == 0, we are punching a hole in current refcount
* rec( in case we decrease a refcount to zero).
*/
static int ocfs2_split_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_refcount_rec *split_rec,
int index, int merge,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, recs_need;
u32 len;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_list *rf_list = &rb->rf_records;
struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
struct ocfs2_refcount_rec *tail_rec = NULL;
struct buffer_head *new_bh = NULL;
BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
le32_to_cpu(orig_rec->r_clusters),
le32_to_cpu(orig_rec->r_refcount),
le64_to_cpu(split_rec->r_cpos),
le32_to_cpu(split_rec->r_clusters),
le32_to_cpu(split_rec->r_refcount));
/*
* If we just need to split the header or tail clusters,
* no more recs are needed, just split is OK.
* Otherwise we at least need one new recs.
*/
if (!split_rec->r_refcount &&
(split_rec->r_cpos == orig_rec->r_cpos ||
le64_to_cpu(split_rec->r_cpos) +
le32_to_cpu(split_rec->r_clusters) ==
le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
recs_need = 0;
else
recs_need = 1;
/*
* We need one more rec if we split in the middle and the new rec have
* some refcount in it.
*/
if (split_rec->r_refcount &&
(split_rec->r_cpos != orig_rec->r_cpos &&
le64_to_cpu(split_rec->r_cpos) +
le32_to_cpu(split_rec->r_clusters) !=
le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
recs_need++;
/* If the leaf block don't have enough record, expand it. */
if (le16_to_cpu(rf_list->rl_used) + recs_need >
le16_to_cpu(rf_list->rl_count)) {
struct ocfs2_refcount_rec tmp_rec;
u64 cpos = le64_to_cpu(orig_rec->r_cpos);
len = le32_to_cpu(orig_rec->r_clusters);
ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
ref_leaf_bh, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We have to re-get it since now cpos may be moved to
* another leaf block.
*/
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, len, &tmp_rec, &index,
&new_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ref_leaf_bh = new_bh;
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
rf_list = &rb->rf_records;
orig_rec = &rf_list->rl_recs[index];
}
ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
/*
* We have calculated out how many new records we need and store
* in recs_need, so spare enough space first by moving the records
* after "index" to the end.
*/
if (index != le16_to_cpu(rf_list->rl_used) - 1)
memmove(&rf_list->rl_recs[index + 1 + recs_need],
&rf_list->rl_recs[index + 1],
(le16_to_cpu(rf_list->rl_used) - index - 1) *
sizeof(struct ocfs2_refcount_rec));
len = (le64_to_cpu(orig_rec->r_cpos) +
le32_to_cpu(orig_rec->r_clusters)) -
(le64_to_cpu(split_rec->r_cpos) +
le32_to_cpu(split_rec->r_clusters));
/*
* If we have "len", the we will split in the tail and move it
* to the end of the space we have just spared.
*/
if (len) {
tail_rec = &rf_list->rl_recs[index + recs_need];
memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
le64_add_cpu(&tail_rec->r_cpos,
le32_to_cpu(tail_rec->r_clusters) - len);
tail_rec->r_clusters = cpu_to_le32(len);
}
/*
* If the split pos isn't the same as the original one, we need to
* split in the head.
*
* Note: We have the chance that split_rec.r_refcount = 0,
* recs_need = 0 and len > 0, which means we just cut the head from
* the orig_rec and in that case we have done some modification in
* orig_rec above, so the check for r_cpos is faked.
*/
if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
len = le64_to_cpu(split_rec->r_cpos) -
le64_to_cpu(orig_rec->r_cpos);
orig_rec->r_clusters = cpu_to_le32(len);
index++;
}
le16_add_cpu(&rf_list->rl_used, recs_need);
if (split_rec->r_refcount) {
rf_list->rl_recs[index] = *split_rec;
trace_ocfs2_split_refcount_rec_insert(
(unsigned long long)ref_leaf_bh->b_blocknr, index,
(unsigned long long)le64_to_cpu(split_rec->r_cpos),
le32_to_cpu(split_rec->r_clusters),
le32_to_cpu(split_rec->r_refcount));
if (merge)
ocfs2_refcount_rec_merge(rb, index);
}
ocfs2_journal_dirty(handle, ref_leaf_bh);
out:
brelse(new_bh);
return ret;
}
static int __ocfs2_increase_refcount(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 cpos, u32 len, int merge,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret = 0, index;
struct buffer_head *ref_leaf_bh = NULL;
struct ocfs2_refcount_rec rec;
unsigned int set_len = 0;
trace_ocfs2_increase_refcount_begin(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)cpos, len);
while (len) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, len, &rec, &index,
&ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
set_len = le32_to_cpu(rec.r_clusters);
/*
* Here we may meet with 3 situations:
*
* 1. If we find an already existing record, and the length
* is the same, cool, we just need to increase the r_refcount
* and it is OK.
* 2. If we find a hole, just insert it with r_refcount = 1.
* 3. If we are in the middle of one extent record, split
* it.
*/
if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
set_len <= len) {
trace_ocfs2_increase_refcount_change(
(unsigned long long)cpos, set_len,
le32_to_cpu(rec.r_refcount));
ret = ocfs2_change_refcount_rec(handle, ci,
ref_leaf_bh, index,
merge, 1);
if (ret) {
mlog_errno(ret);
goto out;
}
} else if (!rec.r_refcount) {
rec.r_refcount = cpu_to_le32(1);
trace_ocfs2_increase_refcount_insert(
(unsigned long long)le64_to_cpu(rec.r_cpos),
set_len);
ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
ref_leaf_bh,
&rec, index,
merge, meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
} else {
set_len = min((u64)(cpos + len),
le64_to_cpu(rec.r_cpos) + set_len) - cpos;
rec.r_cpos = cpu_to_le64(cpos);
rec.r_clusters = cpu_to_le32(set_len);
le32_add_cpu(&rec.r_refcount, 1);
trace_ocfs2_increase_refcount_split(
(unsigned long long)le64_to_cpu(rec.r_cpos),
set_len, le32_to_cpu(rec.r_refcount));
ret = ocfs2_split_refcount_rec(handle, ci,
ref_root_bh, ref_leaf_bh,
&rec, index, merge,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
}
cpos += set_len;
len -= set_len;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
}
out:
brelse(ref_leaf_bh);
return ret;
}
static int ocfs2_remove_refcount_extent(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_extent_tree et;
BUG_ON(rb->rf_records.rl_used);
trace_ocfs2_remove_refcount_extent(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)ref_leaf_bh->b_blocknr,
le32_to_cpu(rb->rf_cpos));
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
1, meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_remove_from_cache(ci, ref_leaf_bh);
/*
* add the freed block to the dealloc so that it will be freed
* when we run dealloc.
*/
ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
le16_to_cpu(rb->rf_suballoc_slot),
le64_to_cpu(rb->rf_suballoc_loc),
le64_to_cpu(rb->rf_blkno),
le16_to_cpu(rb->rf_suballoc_bit));
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out;
}
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
le32_add_cpu(&rb->rf_clusters, -1);
/*
* check whether we need to restore the root refcount block if
* there is no leaf extent block at atll.
*/
if (!rb->rf_list.l_next_free_rec) {
BUG_ON(rb->rf_clusters);
trace_ocfs2_restore_refcount_block(
(unsigned long long)ref_root_bh->b_blocknr);
rb->rf_flags = 0;
rb->rf_parent = 0;
rb->rf_cpos = 0;
memset(&rb->rf_records, 0, sb->s_blocksize -
offsetof(struct ocfs2_refcount_block, rf_records));
rb->rf_records.rl_count =
cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
}
ocfs2_journal_dirty(handle, ref_root_bh);
out:
return ret;
}
int ocfs2_increase_refcount(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
cpos, len, 1,
meta_ac, dealloc);
}
static int ocfs2_decrease_refcount_rec(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
struct buffer_head *ref_leaf_bh,
int index, u64 cpos, unsigned int len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
struct ocfs2_refcount_block *rb =
(struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
BUG_ON(cpos + len >
le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
trace_ocfs2_decrease_refcount_rec(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)cpos, len);
if (cpos == le64_to_cpu(rec->r_cpos) &&
len == le32_to_cpu(rec->r_clusters))
ret = ocfs2_change_refcount_rec(handle, ci,
ref_leaf_bh, index, 1, -1);
else {
struct ocfs2_refcount_rec split = *rec;
split.r_cpos = cpu_to_le64(cpos);
split.r_clusters = cpu_to_le32(len);
le32_add_cpu(&split.r_refcount, -1);
ret = ocfs2_split_refcount_rec(handle, ci,
ref_root_bh, ref_leaf_bh,
&split, index, 1,
meta_ac, dealloc);
}
if (ret) {
mlog_errno(ret);
goto out;
}
/* Remove the leaf refcount block if it contains no refcount record. */
if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
ref_leaf_bh, meta_ac,
dealloc);
if (ret)
mlog_errno(ret);
}
out:
return ret;
}
static int __ocfs2_decrease_refcount(handle_t *handle,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc,
int delete)
{
int ret = 0, index = 0;
struct ocfs2_refcount_rec rec;
unsigned int r_count = 0, r_len;
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
struct buffer_head *ref_leaf_bh = NULL;
trace_ocfs2_decrease_refcount(
(unsigned long long)ocfs2_metadata_cache_owner(ci),
(unsigned long long)cpos, len, delete);
while (len) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, len, &rec, &index,
&ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
r_count = le32_to_cpu(rec.r_refcount);
BUG_ON(r_count == 0);
if (!delete)
BUG_ON(r_count > 1);
r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - cpos;
ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
ref_leaf_bh, index,
cpos, r_len,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
ret = ocfs2_cache_cluster_dealloc(dealloc,
ocfs2_clusters_to_blocks(sb, cpos),
r_len);
if (ret) {
mlog_errno(ret);
goto out;
}
}
cpos += r_len;
len -= r_len;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
}
out:
brelse(ref_leaf_bh);
return ret;
}
/* Caller must hold refcount tree lock. */
int ocfs2_decrease_refcount(struct inode *inode,
handle_t *handle, u32 cpos, u32 len,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc,
int delete)
{
int ret;
u64 ref_blkno;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *tree;
BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
ret = ocfs2_get_refcount_block(inode, &ref_blkno);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
&ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
cpos, len, meta_ac, dealloc, delete);
if (ret)
mlog_errno(ret);
out:
brelse(ref_root_bh);
return ret;
}
/*
* Mark the already-existing extent at cpos as refcounted for len clusters.
* This adds the refcount extent flag.
*
* If the existing extent is larger than the request, initiate a
* split. An attempt will be made at merging with adjacent extents.
*
* The caller is responsible for passing down meta_ac if we'll need it.
*/
static int ocfs2_mark_extent_refcounted(struct inode *inode,
struct ocfs2_extent_tree *et,
handle_t *handle, u32 cpos,
u32 len, u32 phys,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
cpos, len, phys);
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
"tree, but the feature bit is not set in the "
"super block.", inode->i_ino);
ret = -EROFS;
goto out;
}
ret = ocfs2_change_extent_flag(handle, et, cpos,
len, phys, meta_ac, dealloc,
OCFS2_EXT_REFCOUNTED, 0);
if (ret)
mlog_errno(ret);
out:
return ret;
}
/*
* Given some contiguous physical clusters, calculate what we need
* for modifying their refcount.
*/
static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
struct ocfs2_caching_info *ci,
struct buffer_head *ref_root_bh,
u64 start_cpos,
u32 clusters,
int *meta_add,
int *credits)
{
int ret = 0, index, ref_blocks = 0, recs_add = 0;
u64 cpos = start_cpos;
struct ocfs2_refcount_block *rb;
struct ocfs2_refcount_rec rec;
struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
u32 len;
while (clusters) {
ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
cpos, clusters, &rec,
&index, &ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
if (ref_leaf_bh != prev_bh) {
/*
* Now we encounter a new leaf block, so calculate
* whether we need to extend the old leaf.
*/
if (prev_bh) {
rb = (struct ocfs2_refcount_block *)
prev_bh->b_data;
if (le16_to_cpu(rb->rf_records.rl_used) +
recs_add >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
}
recs_add = 0;
*credits += 1;
brelse(prev_bh);
prev_bh = ref_leaf_bh;
get_bh(prev_bh);
}
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
trace_ocfs2_calc_refcount_meta_credits_iterate(
recs_add, (unsigned long long)cpos, clusters,
(unsigned long long)le64_to_cpu(rec.r_cpos),
le32_to_cpu(rec.r_clusters),
le32_to_cpu(rec.r_refcount), index);
len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - cpos;
/*
* We record all the records which will be inserted to the
* same refcount block, so that we can tell exactly whether
* we need a new refcount block or not.
*
* If we will insert a new one, this is easy and only happens
* during adding refcounted flag to the extent, so we don't
* have a chance of spliting. We just need one record.
*
* If the refcount rec already exists, that would be a little
* complicated. we may have to:
* 1) split at the beginning if the start pos isn't aligned.
* we need 1 more record in this case.
* 2) split int the end if the end pos isn't aligned.
* we need 1 more record in this case.
* 3) split in the middle because of file system fragmentation.
* we need 2 more records in this case(we can't detect this
* beforehand, so always think of the worst case).
*/
if (rec.r_refcount) {
recs_add += 2;
/* Check whether we need a split at the beginning. */
if (cpos == start_cpos &&
cpos != le64_to_cpu(rec.r_cpos))
recs_add++;
/* Check whether we need a split in the end. */
if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters))
recs_add++;
} else
recs_add++;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
clusters -= len;
cpos += len;
}
if (prev_bh) {
rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
*credits += 1;
}
if (!ref_blocks)
goto out;
*meta_add += ref_blocks;
*credits += ref_blocks;
/*
* So we may need ref_blocks to insert into the tree.
* That also means we need to change the b-tree and add that number
* of records since we never merge them.
* We need one more block for expansion since the new created leaf
* block is also full and needs split.
*/
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
struct ocfs2_extent_tree et;
ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
*meta_add += ocfs2_extend_meta_needed(et.et_root_el);
*credits += ocfs2_calc_extend_credits(sb,
et.et_root_el,
ref_blocks);
} else {
*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
*meta_add += 1;
}
out:
trace_ocfs2_calc_refcount_meta_credits(
(unsigned long long)start_cpos, clusters,
*meta_add, *credits);
brelse(ref_leaf_bh);
brelse(prev_bh);
return ret;
}
/*
* For refcount tree, we will decrease some contiguous clusters
* refcount count, so just go through it to see how many blocks
* we gonna touch and whether we need to create new blocks.
*
* Normally the refcount blocks store these refcount should be
* contiguous also, so that we can get the number easily.
* We will at most add split 2 refcount records and 2 more
* refcount blocks, so just check it in a rough way.
*
* Caller must hold refcount tree lock.
*/
int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
u64 refcount_loc,
u64 phys_blkno,
u32 clusters,
int *credits,
int *ref_blocks)
{
int ret;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *tree;
u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
"tree, but the feature bit is not set in the "
"super block.", inode->i_ino);
ret = -EROFS;
goto out;
}
BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
refcount_loc, &tree);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
&ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
&tree->rf_ci,
ref_root_bh,
start_cpos, clusters,
ref_blocks, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
out:
brelse(ref_root_bh);
return ret;
}
#define MAX_CONTIG_BYTES 1048576
static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
{
return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
}
static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
{
return ~(ocfs2_cow_contig_clusters(sb) - 1);
}
/*
* Given an extent that starts at 'start' and an I/O that starts at 'cpos',
* find an offset (start + (n * contig_clusters)) that is closest to cpos
* while still being less than or equal to it.
*
* The goal is to break the extent at a multiple of contig_clusters.
*/
static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
unsigned int start,
unsigned int cpos)
{
BUG_ON(start > cpos);
return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
}
/*
* Given a cluster count of len, pad it out so that it is a multiple
* of contig_clusters.
*/
static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
unsigned int len)
{
unsigned int padded =
(len + (ocfs2_cow_contig_clusters(sb) - 1)) &
ocfs2_cow_contig_mask(sb);
/* Did we wrap? */
if (padded < len)
padded = UINT_MAX;
return padded;
}
/*
* Calculate out the start and number of virtual clusters we need to to CoW.
*
* cpos is vitual start cluster position we want to do CoW in a
* file and write_len is the cluster length.
* max_cpos is the place where we want to stop CoW intentionally.
*
* Normal we will start CoW from the beginning of extent record cotaining cpos.
* We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
* get good I/O from the resulting extent tree.
*/
static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
struct ocfs2_extent_list *el,
u32 cpos,
u32 write_len,
u32 max_cpos,
u32 *cow_start,
u32 *cow_len)
{
int ret = 0;
int tree_height = le16_to_cpu(el->l_tree_depth), i;
struct buffer_head *eb_bh = NULL;
struct ocfs2_extent_block *eb = NULL;
struct ocfs2_extent_rec *rec;
unsigned int want_clusters, rec_end = 0;
int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
int leaf_clusters;
BUG_ON(cpos + write_len > max_cpos);
if (tree_height > 0) {
ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
if (el->l_tree_depth) {
ocfs2_error(inode->i_sb,
"Inode %lu has non zero tree depth in "
"leaf block %llu\n", inode->i_ino,
(unsigned long long)eb_bh->b_blocknr);
ret = -EROFS;
goto out;
}
}
*cow_len = 0;
for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
rec = &el->l_recs[i];
if (ocfs2_is_empty_extent(rec)) {
mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
"index %d\n", inode->i_ino, i);
continue;
}
if (le32_to_cpu(rec->e_cpos) +
le16_to_cpu(rec->e_leaf_clusters) <= cpos)
continue;
if (*cow_len == 0) {
/*
* We should find a refcounted record in the
* first pass.
*/
BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
*cow_start = le32_to_cpu(rec->e_cpos);
}
/*
* If we encounter a hole, a non-refcounted record or
* pass the max_cpos, stop the search.
*/
if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
(*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
(max_cpos <= le32_to_cpu(rec->e_cpos)))
break;
leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
if (rec_end > max_cpos) {
rec_end = max_cpos;
leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
}
/*
* How many clusters do we actually need from
* this extent? First we see how many we actually
* need to complete the write. If that's smaller
* than contig_clusters, we try for contig_clusters.
*/
if (!*cow_len)
want_clusters = write_len;
else
want_clusters = (cpos + write_len) -
(*cow_start + *cow_len);
if (want_clusters < contig_clusters)
want_clusters = contig_clusters;
/*
* If the write does not cover the whole extent, we
* need to calculate how we're going to split the extent.
* We try to do it on contig_clusters boundaries.
*
* Any extent smaller than contig_clusters will be
* CoWed in its entirety.
*/
if (leaf_clusters <= contig_clusters)
*cow_len += leaf_clusters;
else if (*cow_len || (*cow_start == cpos)) {
/*
* This extent needs to be CoW'd from its
* beginning, so all we have to do is compute
* how many clusters to grab. We align
* want_clusters to the edge of contig_clusters
* to get better I/O.
*/
want_clusters = ocfs2_cow_align_length(inode->i_sb,
want_clusters);
if (leaf_clusters < want_clusters)
*cow_len += leaf_clusters;
else
*cow_len += want_clusters;
} else if ((*cow_start + contig_clusters) >=
(cpos + write_len)) {
/*
* Breaking off contig_clusters at the front
* of the extent will cover our write. That's
* easy.
*/
*cow_len = contig_clusters;
} else if ((rec_end - cpos) <= contig_clusters) {
/*
* Breaking off contig_clusters at the tail of
* this extent will cover cpos.
*/
*cow_start = rec_end - contig_clusters;
*cow_len = contig_clusters;
} else if ((rec_end - cpos) <= want_clusters) {
/*
* While we can't fit the entire write in this
* extent, we know that the write goes from cpos
* to the end of the extent. Break that off.
* We try to break it at some multiple of
* contig_clusters from the front of the extent.
* Failing that (ie, cpos is within
* contig_clusters of the front), we'll CoW the
* entire extent.
*/
*cow_start = ocfs2_cow_align_start(inode->i_sb,
*cow_start, cpos);
*cow_len = rec_end - *cow_start;
} else {
/*
* Ok, the entire write lives in the middle of
* this extent. Let's try to slice the extent up
* nicely. Optimally, our CoW region starts at
* m*contig_clusters from the beginning of the
* extent and goes for n*contig_clusters,
* covering the entire write.
*/
*cow_start = ocfs2_cow_align_start(inode->i_sb,
*cow_start, cpos);
want_clusters = (cpos + write_len) - *cow_start;
want_clusters = ocfs2_cow_align_length(inode->i_sb,
want_clusters);
if (*cow_start + want_clusters <= rec_end)
*cow_len = want_clusters;
else
*cow_len = rec_end - *cow_start;
}
/* Have we covered our entire write yet? */
if ((*cow_start + *cow_len) >= (cpos + write_len))
break;
/*
* If we reach the end of the extent block and don't get enough
* clusters, continue with the next extent block if possible.
*/
if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
eb && eb->h_next_leaf_blk) {
brelse(eb_bh);
eb_bh = NULL;
ret = ocfs2_read_extent_block(INODE_CACHE(inode),
le64_to_cpu(eb->h_next_leaf_blk),
&eb_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
eb = (struct ocfs2_extent_block *) eb_bh->b_data;
el = &eb->h_list;
i = -1;
}
}
out:
brelse(eb_bh);
return ret;
}
/*
* Prepare meta_ac, data_ac and calculate credits when we want to add some
* num_clusters in data_tree "et" and change the refcount for the old
* clusters(starting form p_cluster) in the refcount tree.
*
* Note:
* 1. since we may split the old tree, so we at most will need num_clusters + 2
* more new leaf records.
* 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
* just give data_ac = NULL.
*/
static int ocfs2_lock_refcount_allocators(struct super_block *sb,
u32 p_cluster, u32 num_clusters,
struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_alloc_context **meta_ac,
struct ocfs2_alloc_context **data_ac,
int *credits)
{
int ret = 0, meta_add = 0;
int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
if (num_free_extents < 0) {
ret = num_free_extents;
mlog_errno(ret);
goto out;
}
if (num_free_extents < num_clusters + 2)
meta_add =
ocfs2_extend_meta_needed(et->et_root_el);
*credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
num_clusters + 2);
ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
p_cluster, num_clusters,
&meta_add, credits);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
if (data_ac) {
ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
data_ac);
if (ret)
mlog_errno(ret);
}
out:
if (ret) {
if (*meta_ac) {
ocfs2_free_alloc_context(*meta_ac);
*meta_ac = NULL;
}
}
return ret;
}
static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
{
BUG_ON(buffer_dirty(bh));
clear_buffer_mapped(bh);
return 0;
}
int ocfs2_duplicate_clusters_by_page(handle_t *handle,
struct file *file,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0, partial;
struct inode *inode = file->f_path.dentry->d_inode;
struct ocfs2_caching_info *ci = INODE_CACHE(inode);
struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
struct page *page;
pgoff_t page_index;
unsigned int from, to, readahead_pages;
loff_t offset, end, map_end;
struct address_space *mapping = inode->i_mapping;
trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
new_cluster, new_len);
readahead_pages =
(ocfs2_cow_contig_clusters(sb) <<
OCFS2_SB(sb)->s_clustersize_bits) >> PAGE_CACHE_SHIFT;
offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
/*
* We only duplicate pages until we reach the page contains i_size - 1.
* So trim 'end' to i_size.
*/
if (end > i_size_read(inode))
end = i_size_read(inode);
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
if (map_end > end)
map_end = end;
/* from, to is the offset within the page. */
from = offset & (PAGE_CACHE_SIZE - 1);
to = PAGE_CACHE_SIZE;
if (map_end & (PAGE_CACHE_SIZE - 1))
to = map_end & (PAGE_CACHE_SIZE - 1);
page = find_or_create_page(mapping, page_index, GFP_NOFS);
/*
* In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
* can't be dirtied before we CoW it out.
*/
if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
BUG_ON(PageDirty(page));
if (PageReadahead(page)) {
page_cache_async_readahead(mapping,
&file->f_ra, file,
page, page_index,
readahead_pages);
}
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
if (ret) {
mlog_errno(ret);
goto unlock;
}
lock_page(page);
}
if (page_has_buffers(page)) {
ret = walk_page_buffers(handle, page_buffers(page),
from, to, &partial,
ocfs2_clear_cow_buffer);
if (ret) {
mlog_errno(ret);
goto unlock;
}
}
ocfs2_map_and_dirty_page(inode, handle, from, to,
page, 0, &new_block);
mark_page_accessed(page);
unlock:
unlock_page(page);
page_cache_release(page);
page = NULL;
offset = map_end;
if (ret)
break;
}
return ret;
}
int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
struct file *file,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0;
struct inode *inode = file->f_path.dentry->d_inode;
struct super_block *sb = inode->i_sb;
struct ocfs2_caching_info *ci = INODE_CACHE(inode);
int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
struct ocfs2_super *osb = OCFS2_SB(sb);
struct buffer_head *old_bh = NULL;
struct buffer_head *new_bh = NULL;
trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
new_cluster, new_len);
for (i = 0; i < blocks; i++, old_block++, new_block++) {
new_bh = sb_getblk(osb->sb, new_block);
if (new_bh == NULL) {
ret = -EIO;
mlog_errno(ret);
break;
}
ocfs2_set_new_buffer_uptodate(ci, new_bh);
ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
if (ret) {
mlog_errno(ret);
break;
}
ret = ocfs2_journal_access(handle, ci, new_bh,
OCFS2_JOURNAL_ACCESS_CREATE);
if (ret) {
mlog_errno(ret);
break;
}
memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
ocfs2_journal_dirty(handle, new_bh);
brelse(new_bh);
brelse(old_bh);
new_bh = NULL;
old_bh = NULL;
}
brelse(new_bh);
brelse(old_bh);
return ret;
}
static int ocfs2_clear_ext_refcount(handle_t *handle,
struct ocfs2_extent_tree *et,
u32 cpos, u32 p_cluster, u32 len,
unsigned int ext_flags,
struct ocfs2_alloc_context *meta_ac,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret, index;
struct ocfs2_extent_rec replace_rec;
struct ocfs2_path *path = NULL;
struct ocfs2_extent_list *el;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
cpos, len, p_cluster, ext_flags);
memset(&replace_rec, 0, sizeof(replace_rec));
replace_rec.e_cpos = cpu_to_le32(cpos);
replace_rec.e_leaf_clusters = cpu_to_le16(len);
replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
p_cluster));
replace_rec.e_flags = ext_flags;
replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
path = ocfs2_new_path_from_et(et);
if (!path) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_find_path(et->et_ci, path, cpos);
if (ret) {
mlog_errno(ret);
goto out;
}
el = path_leaf_el(path);
index = ocfs2_search_extent_list(el, cpos);
if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
ocfs2_error(sb,
"Inode %llu has an extent at cpos %u which can no "
"longer be found.\n",
(unsigned long long)ino, cpos);
ret = -EROFS;
goto out;
}
ret = ocfs2_split_extent(handle, et, path, index,
&replace_rec, meta_ac, dealloc);
if (ret)
mlog_errno(ret);
out:
ocfs2_free_path(path);
return ret;
}
static int ocfs2_replace_clusters(handle_t *handle,
struct ocfs2_cow_context *context,
u32 cpos, u32 old,
u32 new, u32 len,
unsigned int ext_flags)
{
int ret;
struct ocfs2_caching_info *ci = context->data_et.et_ci;
u64 ino = ocfs2_metadata_cache_owner(ci);
trace_ocfs2_replace_clusters((unsigned long long)ino,
cpos, old, new, len, ext_flags);
/*If the old clusters is unwritten, no need to duplicate. */
if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
ret = context->cow_duplicate_clusters(handle, context->file,
cpos, old, new, len);
if (ret) {
mlog_errno(ret);
goto out;
}
}
ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
cpos, new, len, ext_flags,
context->meta_ac, &context->dealloc);
if (ret)
mlog_errno(ret);
out:
return ret;
}
int ocfs2_cow_sync_writeback(struct super_block *sb,
struct inode *inode,
u32 cpos, u32 num_clusters)
{
int ret = 0;
loff_t offset, end, map_end;
pgoff_t page_index;
struct page *page;
if (ocfs2_should_order_data(inode))
return 0;
offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
ret = filemap_fdatawrite_range(inode->i_mapping,
offset, end - 1);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
if (map_end > end)
map_end = end;
page = find_or_create_page(inode->i_mapping,
page_index, GFP_NOFS);
BUG_ON(!page);
wait_on_page_writeback(page);
if (PageError(page)) {
ret = -EIO;
mlog_errno(ret);
} else
mark_page_accessed(page);
unlock_page(page);
page_cache_release(page);
page = NULL;
offset = map_end;
if (ret)
break;
}
return ret;
}
static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
u32 v_cluster, u32 *p_cluster,
u32 *num_clusters,
unsigned int *extent_flags)
{
return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
num_clusters, extent_flags);
}
static int ocfs2_make_clusters_writable(struct super_block *sb,
struct ocfs2_cow_context *context,
u32 cpos, u32 p_cluster,
u32 num_clusters, unsigned int e_flags)
{
int ret, delete, index, credits = 0;
u32 new_bit, new_len, orig_num_clusters;
unsigned int set_len;
struct ocfs2_super *osb = OCFS2_SB(sb);
handle_t *handle;
struct buffer_head *ref_leaf_bh = NULL;
struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
struct ocfs2_refcount_rec rec;
trace_ocfs2_make_clusters_writable(cpos, p_cluster,
num_clusters, e_flags);
ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
&context->data_et,
ref_ci,
context->ref_root_bh,
&context->meta_ac,
&context->data_ac, &credits);
if (ret) {
mlog_errno(ret);
return ret;
}
if (context->post_refcount)
credits += context->post_refcount->credits;
credits += context->extra_credits;
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
orig_num_clusters = num_clusters;
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
p_cluster, num_clusters,
&rec, &index, &ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
BUG_ON(!rec.r_refcount);
set_len = min((u64)p_cluster + num_clusters,
le64_to_cpu(rec.r_cpos) +
le32_to_cpu(rec.r_clusters)) - p_cluster;
/*
* There are many different situation here.
* 1. If refcount == 1, remove the flag and don't COW.
* 2. If refcount > 1, allocate clusters.
* Here we may not allocate r_len once at a time, so continue
* until we reach num_clusters.
*/
if (le32_to_cpu(rec.r_refcount) == 1) {
delete = 0;
ret = ocfs2_clear_ext_refcount(handle,
&context->data_et,
cpos, p_cluster,
set_len, e_flags,
context->meta_ac,
&context->dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
} else {
delete = 1;
ret = __ocfs2_claim_clusters(handle,
context->data_ac,
1, set_len,
&new_bit, &new_len);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_replace_clusters(handle, context,
cpos, p_cluster, new_bit,
new_len, e_flags);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
set_len = new_len;
}
ret = __ocfs2_decrease_refcount(handle, ref_ci,
context->ref_root_bh,
p_cluster, set_len,
context->meta_ac,
&context->dealloc, delete);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
cpos += set_len;
p_cluster += set_len;
num_clusters -= set_len;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
}
/* handle any post_cow action. */
if (context->post_refcount && context->post_refcount->func) {
ret = context->post_refcount->func(context->inode, handle,
context->post_refcount->para);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
}
/*
* Here we should write the new page out first if we are
* in write-back mode.
*/
if (context->get_clusters == ocfs2_di_get_clusters) {
ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
orig_num_clusters);
if (ret)
mlog_errno(ret);
}
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (context->data_ac) {
ocfs2_free_alloc_context(context->data_ac);
context->data_ac = NULL;
}
if (context->meta_ac) {
ocfs2_free_alloc_context(context->meta_ac);
context->meta_ac = NULL;
}
brelse(ref_leaf_bh);
return ret;
}
static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
{
int ret = 0;
struct inode *inode = context->inode;
u32 cow_start = context->cow_start, cow_len = context->cow_len;
u32 p_cluster, num_clusters;
unsigned int ext_flags;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
"tree, but the feature bit is not set in the "
"super block.", inode->i_ino);
return -EROFS;
}
ocfs2_init_dealloc_ctxt(&context->dealloc);
while (cow_len) {
ret = context->get_clusters(context, cow_start, &p_cluster,
&num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
break;
}
BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
if (cow_len < num_clusters)
num_clusters = cow_len;
ret = ocfs2_make_clusters_writable(inode->i_sb, context,
cow_start, p_cluster,
num_clusters, ext_flags);
if (ret) {
mlog_errno(ret);
break;
}
cow_len -= num_clusters;
cow_start += num_clusters;
}
if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &context->dealloc);
}
return ret;
}
static void ocfs2_readahead_for_cow(struct inode *inode,
struct file *file,
u32 start, u32 len)
{
struct address_space *mapping;
pgoff_t index;
unsigned long num_pages;
int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
if (!file)
return;
mapping = file->f_mapping;
num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
if (!num_pages)
num_pages = 1;
index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
page_cache_sync_readahead(mapping, &file->f_ra, file,
index, num_pages);
}
/*
* Starting at cpos, try to CoW write_len clusters. Don't CoW
* past max_cpos. This will stop when it runs into a hole or an
* unrefcounted extent.
*/
static int ocfs2_refcount_cow_hunk(struct inode *inode,
struct file *file,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
int ret;
u32 cow_start = 0, cow_len = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_refcount_tree *ref_tree;
struct ocfs2_cow_context *context = NULL;
BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
cpos, write_len, max_cpos,
&cow_start, &cow_len);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
cpos, write_len, max_cpos,
cow_start, cow_len);
BUG_ON(cow_len == 0);
ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
if (!context) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
1, &ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
context->inode = inode;
context->cow_start = cow_start;
context->cow_len = cow_len;
context->ref_tree = ref_tree;
context->ref_root_bh = ref_root_bh;
context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
context->get_clusters = ocfs2_di_get_clusters;
context->file = file;
ocfs2_init_dinode_extent_tree(&context->data_et,
INODE_CACHE(inode), di_bh);
ret = ocfs2_replace_cow(context);
if (ret)
mlog_errno(ret);
/*
* truncate the extent map here since no matter whether we meet with
* any error during the action, we shouldn't trust cached extent map
* any more.
*/
ocfs2_extent_map_trunc(inode, cow_start);
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
out:
kfree(context);
return ret;
}
/*
* CoW any and all clusters between cpos and cpos+write_len.
* Don't CoW past max_cpos. If this returns successfully, all
* clusters between cpos and cpos+write_len are safe to modify.
*/
int ocfs2_refcount_cow(struct inode *inode,
struct file *file,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
int ret = 0;
u32 p_cluster, num_clusters;
unsigned int ext_flags;
while (write_len) {
ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
&num_clusters, &ext_flags);
if (ret) {
mlog_errno(ret);
break;
}
if (write_len < num_clusters)
num_clusters = write_len;
if (ext_flags & OCFS2_EXT_REFCOUNTED) {
ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
num_clusters, max_cpos);
if (ret) {
mlog_errno(ret);
break;
}
}
write_len -= num_clusters;
cpos += num_clusters;
}
return ret;
}
static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
u32 v_cluster, u32 *p_cluster,
u32 *num_clusters,
unsigned int *extent_flags)
{
struct inode *inode = context->inode;
struct ocfs2_xattr_value_root *xv = context->cow_object;
return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
num_clusters, &xv->xr_list,
extent_flags);
}
/*
* Given a xattr value root, calculate the most meta/credits we need for
* refcount tree change if we truncate it to 0.
*/
int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_xattr_value_root *xv,
int *meta_add, int *credits)
{
int ret = 0, index, ref_blocks = 0;
u32 p_cluster, num_clusters;
u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
struct ocfs2_refcount_block *rb;
struct ocfs2_refcount_rec rec;
struct buffer_head *ref_leaf_bh = NULL;
while (cpos < clusters) {
ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
&num_clusters, &xv->xr_list,
NULL);
if (ret) {
mlog_errno(ret);
goto out;
}
cpos += num_clusters;
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
p_cluster, num_clusters,
&rec, &index,
&ref_leaf_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(!rec.r_refcount);
rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
/*
* We really don't know whether the other clusters is in
* this refcount block or not, so just take the worst
* case that all the clusters are in this block and each
* one will split a refcount rec, so totally we need
* clusters * 2 new refcount rec.
*/
if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
le16_to_cpu(rb->rf_records.rl_count))
ref_blocks++;
*credits += 1;
brelse(ref_leaf_bh);
ref_leaf_bh = NULL;
if (num_clusters <= le32_to_cpu(rec.r_clusters))
break;
else
num_clusters -= le32_to_cpu(rec.r_clusters);
p_cluster += num_clusters;
}
}
*meta_add += ref_blocks;
if (!ref_blocks)
goto out;
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
*credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
else {
struct ocfs2_extent_tree et;
ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
*credits += ocfs2_calc_extend_credits(inode->i_sb,
et.et_root_el,
ref_blocks);
}
out:
brelse(ref_leaf_bh);
return ret;
}
/*
* Do CoW for xattr.
*/
int ocfs2_refcount_cow_xattr(struct inode *inode,
struct ocfs2_dinode *di,
struct ocfs2_xattr_value_buf *vb,
struct ocfs2_refcount_tree *ref_tree,
struct buffer_head *ref_root_bh,
u32 cpos, u32 write_len,
struct ocfs2_post_refcount *post)
{
int ret;
struct ocfs2_xattr_value_root *xv = vb->vb_xv;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_cow_context *context = NULL;
u32 cow_start, cow_len;
BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
cpos, write_len, UINT_MAX,
&cow_start, &cow_len);
if (ret) {
mlog_errno(ret);
goto out;
}
BUG_ON(cow_len == 0);
context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
if (!context) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
context->inode = inode;
context->cow_start = cow_start;
context->cow_len = cow_len;
context->ref_tree = ref_tree;
context->ref_root_bh = ref_root_bh;
context->cow_object = xv;
context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
/* We need the extra credits for duplicate_clusters by jbd. */
context->extra_credits =
ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
context->get_clusters = ocfs2_xattr_value_get_clusters;
context->post_refcount = post;
ocfs2_init_xattr_value_extent_tree(&context->data_et,
INODE_CACHE(inode), vb);
ret = ocfs2_replace_cow(context);
if (ret)
mlog_errno(ret);
out:
kfree(context);
return ret;
}
/*
* Insert a new extent into refcount tree and mark a extent rec
* as refcounted in the dinode tree.
*/
int ocfs2_add_refcount_flag(struct inode *inode,
struct ocfs2_extent_tree *data_et,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
u32 cpos, u32 p_cluster, u32 num_clusters,
struct ocfs2_cached_dealloc_ctxt *dealloc,
struct ocfs2_post_refcount *post)
{
int ret;
handle_t *handle;
int credits = 1, ref_blocks = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_alloc_context *meta_ac = NULL;
ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
ref_ci, ref_root_bh,
p_cluster, num_clusters,
&ref_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
}
trace_ocfs2_add_refcount_flag(ref_blocks, credits);
if (ref_blocks) {
ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
ref_blocks, &meta_ac);
if (ret) {
mlog_errno(ret);
goto out;
}
}
if (post)
credits += post->credits;
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
cpos, num_clusters, p_cluster,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
p_cluster, num_clusters, 0,
meta_ac, dealloc);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
if (post && post->func) {
ret = post->func(inode, handle, post->para);
if (ret)
mlog_errno(ret);
}
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
return ret;
}
static int ocfs2_change_ctime(struct inode *inode,
struct buffer_head *di_bh)
{
int ret;
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
inode->i_ctime = CURRENT_TIME;
di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
ocfs2_journal_dirty(handle, di_bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
out:
return ret;
}
static int ocfs2_attach_refcount_tree(struct inode *inode,
struct buffer_head *di_bh)
{
int ret, data_changed = 0;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_refcount_tree *ref_tree;
unsigned int ext_flags;
loff_t size;
u32 cpos, num_clusters, clusters, p_cluster;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_extent_tree di_et;
ocfs2_init_dealloc_ctxt(&dealloc);
if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) {
ret = ocfs2_create_refcount_tree(inode, di_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
}
BUG_ON(!di->i_refcount_loc);
ret = ocfs2_lock_refcount_tree(osb,
le64_to_cpu(di->i_refcount_loc), 1,
&ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
goto attach_xattr;
ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
size = i_size_read(inode);
clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
cpos = 0;
while (cpos < clusters) {
ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
&num_clusters, &ext_flags);
if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
ret = ocfs2_add_refcount_flag(inode, &di_et,
&ref_tree->rf_ci,
ref_root_bh, cpos,
p_cluster, num_clusters,
&dealloc, NULL);
if (ret) {
mlog_errno(ret);
goto unlock;
}
data_changed = 1;
}
cpos += num_clusters;
}
attach_xattr:
if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
&ref_tree->rf_ci,
ref_root_bh,
&dealloc);
if (ret) {
mlog_errno(ret);
goto unlock;
}
}
if (data_changed) {
ret = ocfs2_change_ctime(inode, di_bh);
if (ret)
mlog_errno(ret);
}
unlock:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
}
out:
/*
* Empty the extent map so that we may get the right extent
* record from the disk.
*/
ocfs2_extent_map_trunc(inode, 0);
return ret;
}
static int ocfs2_add_refcounted_extent(struct inode *inode,
struct ocfs2_extent_tree *et,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
u32 cpos, u32 p_cluster, u32 num_clusters,
unsigned int ext_flags,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret;
handle_t *handle;
int credits = 0;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_alloc_context *meta_ac = NULL;
ret = ocfs2_lock_refcount_allocators(inode->i_sb,
p_cluster, num_clusters,
et, ref_ci,
ref_root_bh, &meta_ac,
NULL, &credits);
if (ret) {
mlog_errno(ret);
goto out;
}
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_insert_extent(handle, et, cpos,
ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
num_clusters, ext_flags, meta_ac);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
p_cluster, num_clusters,
meta_ac, dealloc);
if (ret)
mlog_errno(ret);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
if (meta_ac)
ocfs2_free_alloc_context(meta_ac);
return ret;
}
static int ocfs2_duplicate_inline_data(struct inode *s_inode,
struct buffer_head *s_bh,
struct inode *t_inode,
struct buffer_head *t_bh)
{
int ret;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
goto out;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
le16_to_cpu(s_di->id2.i_data.id_count));
spin_lock(&OCFS2_I(t_inode)->ip_lock);
OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
spin_unlock(&OCFS2_I(t_inode)->ip_lock);
ocfs2_journal_dirty(handle, t_bh);
out_commit:
ocfs2_commit_trans(osb, handle);
out:
return ret;
}
static int ocfs2_duplicate_extent_list(struct inode *s_inode,
struct inode *t_inode,
struct buffer_head *t_bh,
struct ocfs2_caching_info *ref_ci,
struct buffer_head *ref_root_bh,
struct ocfs2_cached_dealloc_ctxt *dealloc)
{
int ret = 0;
u32 p_cluster, num_clusters, clusters, cpos;
loff_t size;
unsigned int ext_flags;
struct ocfs2_extent_tree et;
ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
size = i_size_read(s_inode);
clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
cpos = 0;
while (cpos < clusters) {
ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
&num_clusters, &ext_flags);
if (p_cluster) {
ret = ocfs2_add_refcounted_extent(t_inode, &et,
ref_ci, ref_root_bh,
cpos, p_cluster,
num_clusters,
ext_flags,
dealloc);
if (ret) {
mlog_errno(ret);
goto out;
}
}
cpos += num_clusters;
}
out:
return ret;
}
/*
* change the new file's attributes to the src.
*
* reflink creates a snapshot of a file, that means the attributes
* must be identical except for three exceptions - nlink, ino, and ctime.
*/
static int ocfs2_complete_reflink(struct inode *s_inode,
struct buffer_head *s_bh,
struct inode *t_inode,
struct buffer_head *t_bh,
bool preserve)
{
int ret;
handle_t *handle;
struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
loff_t size = i_size_read(s_inode);
handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
OCFS2_INODE_UPDATE_CREDITS);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
mlog_errno(ret);
return ret;
}
ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
if (ret) {
mlog_errno(ret);
goto out_commit;
}
spin_lock(&OCFS2_I(t_inode)->ip_lock);
OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
spin_unlock(&OCFS2_I(t_inode)->ip_lock);
i_size_write(t_inode, size);
t_inode->i_blocks = s_inode->i_blocks;
di->i_xattr_inline_size = s_di->i_xattr_inline_size;
di->i_clusters = s_di->i_clusters;
di->i_size = s_di->i_size;
di->i_dyn_features = s_di->i_dyn_features;
di->i_attr = s_di->i_attr;
if (preserve) {
t_inode->i_uid = s_inode->i_uid;
t_inode->i_gid = s_inode->i_gid;
t_inode->i_mode = s_inode->i_mode;
di->i_uid = s_di->i_uid;
di->i_gid = s_di->i_gid;
di->i_mode = s_di->i_mode;
/*
* update time.
* we want mtime to appear identical to the source and
* update ctime.
*/
t_inode->i_ctime = CURRENT_TIME;
di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
t_inode->i_mtime = s_inode->i_mtime;
di->i_mtime = s_di->i_mtime;
di->i_mtime_nsec = s_di->i_mtime_nsec;
}
ocfs2_journal_dirty(handle, t_bh);
out_commit:
ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
return ret;
}
static int ocfs2_create_reflink_node(struct inode *s_inode,
struct buffer_head *s_bh,
struct inode *t_inode,
struct buffer_head *t_bh,
bool preserve)
{
int ret;
struct buffer_head *ref_root_bh = NULL;
struct ocfs2_cached_dealloc_ctxt dealloc;
struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
struct ocfs2_refcount_block *rb;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
struct ocfs2_refcount_tree *ref_tree;
ocfs2_init_dealloc_ctxt(&dealloc);
ret = ocfs2_set_refcount_tree(t_inode, t_bh,
le64_to_cpu(di->i_refcount_loc));
if (ret) {
mlog_errno(ret);
goto out;
}
if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
t_inode, t_bh);
if (ret)
mlog_errno(ret);
goto out;
}
ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
1, &ref_tree, &ref_root_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
&ref_tree->rf_ci, ref_root_bh,
&dealloc);
if (ret) {
mlog_errno(ret);
goto out_unlock_refcount;
}
out_unlock_refcount:
ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
brelse(ref_root_bh);
out:
if (ocfs2_dealloc_has_cluster(&dealloc)) {
ocfs2_schedule_truncate_log_flush(osb, 1);
ocfs2_run_deallocs(osb, &dealloc);
}
return ret;
}
static int __ocfs2_reflink(struct dentry *old_dentry,
struct buffer_head *old_bh,
struct inode *new_inode,
bool preserve)
{
int ret;
struct inode *inode = old_dentry->d_inode;
struct buffer_head *new_bh = NULL;
if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
ret = -EINVAL;
mlog_errno(ret);
goto out;
}
ret = filemap_fdatawrite(inode->i_mapping);
if (ret) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_attach_refcount_tree(inode, old_bh);
if (ret) {
mlog_errno(ret);
goto out;
}
mutex_lock_nested(&new_inode->i_mutex, I_MUTEX_CHILD);
ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
OI_LS_REFLINK_TARGET);
if (ret) {
mlog_errno(ret);
goto out_unlock;
}
ret = ocfs2_create_reflink_node(inode, old_bh,
new_inode, new_bh, preserve);
if (ret) {
mlog_errno(ret);
goto inode_unlock;
}
if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
ret = ocfs2_reflink_xattrs(inode, old_bh,
new_inode, new_bh,
preserve);
if (ret) {
mlog_errno(ret);
goto inode_unlock;
}
}
ret = ocfs2_complete_reflink(inode, old_bh,
new_inode, new_bh, preserve);
if (ret)
mlog_errno(ret);
inode_unlock:
ocfs2_inode_unlock(new_inode, 1);
brelse(new_bh);
out_unlock:
mutex_unlock(&new_inode->i_mutex);
out:
if (!ret) {
ret = filemap_fdatawait(inode->i_mapping);
if (ret)
mlog_errno(ret);
}
return ret;
}
static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry, bool preserve)
{
int error;
struct inode *inode = old_dentry->d_inode;
struct buffer_head *old_bh = NULL;
struct inode *new_orphan_inode = NULL;
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP;
error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
&new_orphan_inode);
if (error) {
mlog_errno(error);
goto out;
}
error = ocfs2_inode_lock(inode, &old_bh, 1);
if (error) {
mlog_errno(error);
goto out;
}
down_write(&OCFS2_I(inode)->ip_xattr_sem);
down_write(&OCFS2_I(inode)->ip_alloc_sem);
error = __ocfs2_reflink(old_dentry, old_bh,
new_orphan_inode, preserve);
up_write(&OCFS2_I(inode)->ip_alloc_sem);
up_write(&OCFS2_I(inode)->ip_xattr_sem);
ocfs2_inode_unlock(inode, 1);
brelse(old_bh);
if (error) {
mlog_errno(error);
goto out;
}
/* If the security isn't preserved, we need to re-initialize them. */
if (!preserve) {
error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
&new_dentry->d_name);
if (error)
mlog_errno(error);
}
out:
if (!error) {
error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
new_dentry);
if (error)
mlog_errno(error);
}
if (new_orphan_inode) {
/*
* We need to open_unlock the inode no matter whether we
* succeed or not, so that other nodes can delete it later.
*/
ocfs2_open_unlock(new_orphan_inode);
if (error)
iput(new_orphan_inode);
}
return error;
}
/*
* Below here are the bits used by OCFS2_IOC_REFLINK() to fake
* sys_reflink(). This will go away when vfs_reflink() exists in
* fs/namei.c.
*/
/* copied from may_create in VFS. */
static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
{
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
/* copied from user_path_parent. */
static int ocfs2_user_path_parent(const char __user *path,
struct nameidata *nd, char **name)
{
char *s = getname(path);
int error;
if (IS_ERR(s))
return PTR_ERR(s);
error = kern_path_parent(s, nd);
if (error)
putname(s);
else
*name = s;
return error;
}
/**
* ocfs2_vfs_reflink - Create a reference-counted link
*
* @old_dentry: source dentry + inode
* @dir: directory to create the target
* @new_dentry: target dentry
* @preserve: if true, preserve all file attributes
*/
static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry, bool preserve)
{
struct inode *inode = old_dentry->d_inode;
int error;
if (!inode)
return -ENOENT;
error = ocfs2_may_create(dir, new_dentry);
if (error)
return error;
if (dir->i_sb != inode->i_sb)
return -EXDEV;
/*
* A reflink to an append-only or immutable file cannot be created.
*/
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
/* Only regular files can be reflinked. */
if (!S_ISREG(inode->i_mode))
return -EPERM;
/*
* If the caller wants to preserve ownership, they require the
* rights to do so.
*/
if (preserve) {
if ((current_fsuid() != inode->i_uid) && !capable(CAP_CHOWN))
return -EPERM;
if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
return -EPERM;
}
/*
* If the caller is modifying any aspect of the attributes, they
* are not creating a snapshot. They need read permission on the
* file.
*/
if (!preserve) {
error = inode_permission(inode, MAY_READ);
if (error)
return error;
}
mutex_lock(&inode->i_mutex);
dquot_initialize(dir);
error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
mutex_unlock(&inode->i_mutex);
if (!error)
fsnotify_create(dir, new_dentry);
return error;
}
/*
* Most codes are copied from sys_linkat.
*/
int ocfs2_reflink_ioctl(struct inode *inode,
const char __user *oldname,
const char __user *newname,
bool preserve)
{
struct dentry *new_dentry;
struct nameidata nd;
struct path old_path;
int error;
char *to = NULL;
if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
return -EOPNOTSUPP;
error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
if (error) {
mlog_errno(error);
return error;
}
error = ocfs2_user_path_parent(newname, &nd, &to);
if (error) {
mlog_errno(error);
goto out;
}
error = -EXDEV;
if (old_path.mnt != nd.path.mnt)
goto out_release;
new_dentry = lookup_create(&nd, 0);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry)) {
mlog_errno(error);
goto out_unlock;
}
error = mnt_want_write(nd.path.mnt);
if (error) {
mlog_errno(error);
goto out_dput;
}
error = ocfs2_vfs_reflink(old_path.dentry,
nd.path.dentry->d_inode,
new_dentry, preserve);
mnt_drop_write(nd.path.mnt);
out_dput:
dput(new_dentry);
out_unlock:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
out_release:
path_put(&nd.path);
putname(to);
out:
path_put(&old_path);
return error;
}
| gpl-2.0 |
tchaari/android_kernel_samsung_crespo | arch/arm/mach-ux500/board-mop500-u8500uib.c | 2327 | 2615 | /*
* Copyright (C) ST-Ericsson SA 2010
*
* Board data for the U8500 UIB, also known as the New UIB
* License terms: GNU General Public License (GPL), version 2
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/mfd/tc3589x.h>
#include <linux/input/matrix_keypad.h>
#include <../drivers/staging/ste_rmi4/synaptics_i2c_rmi4.h>
#include <mach/gpio.h>
#include <mach/irqs.h>
#include "board-mop500.h"
/*
* Synaptics RMI4 touchscreen interface on the U8500 UIB
*/
/*
* Descriptor structure.
* Describes the number of i2c devices on the bus that speak RMI.
*/
static struct synaptics_rmi4_platform_data rmi4_i2c_dev_platformdata = {
.irq_number = NOMADIK_GPIO_TO_IRQ(84),
.irq_type = (IRQF_TRIGGER_FALLING | IRQF_SHARED),
.x_flip = false,
.y_flip = true,
.regulator_en = false,
};
static struct i2c_board_info __initdata mop500_i2c3_devices_u8500[] = {
{
I2C_BOARD_INFO("synaptics_rmi4_i2c", 0x4B),
.platform_data = &rmi4_i2c_dev_platformdata,
},
};
/*
* TC35893
*/
static const unsigned int u8500_keymap[] = {
KEY(3, 1, KEY_END),
KEY(4, 1, KEY_POWER),
KEY(6, 4, KEY_VOLUMEDOWN),
KEY(4, 2, KEY_EMAIL),
KEY(3, 3, KEY_RIGHT),
KEY(2, 5, KEY_BACKSPACE),
KEY(6, 7, KEY_MENU),
KEY(5, 0, KEY_ENTER),
KEY(4, 3, KEY_0),
KEY(3, 4, KEY_DOT),
KEY(5, 2, KEY_UP),
KEY(3, 5, KEY_DOWN),
KEY(4, 5, KEY_SEND),
KEY(0, 5, KEY_BACK),
KEY(6, 2, KEY_VOLUMEUP),
KEY(1, 3, KEY_SPACE),
KEY(7, 6, KEY_LEFT),
KEY(5, 5, KEY_SEARCH),
};
static struct matrix_keymap_data u8500_keymap_data = {
.keymap = u8500_keymap,
.keymap_size = ARRAY_SIZE(u8500_keymap),
};
static struct tc3589x_keypad_platform_data tc35893_data = {
.krow = TC_KPD_ROWS,
.kcol = TC_KPD_COLUMNS,
.debounce_period = TC_KPD_DEBOUNCE_PERIOD,
.settle_time = TC_KPD_SETTLE_TIME,
.irqtype = IRQF_TRIGGER_FALLING,
.enable_wakeup = true,
.keymap_data = &u8500_keymap_data,
.no_autorepeat = true,
};
static struct tc3589x_platform_data tc3589x_keypad_data = {
.block = TC3589x_BLOCK_KEYPAD,
.keypad = &tc35893_data,
.irq_base = MOP500_EGPIO_IRQ_BASE,
};
static struct i2c_board_info __initdata mop500_i2c0_devices_u8500[] = {
{
I2C_BOARD_INFO("tc3589x", 0x44),
.platform_data = &tc3589x_keypad_data,
.irq = NOMADIK_GPIO_TO_IRQ(218),
.flags = I2C_CLIENT_WAKE,
},
};
void __init mop500_u8500uib_init(void)
{
mop500_uib_i2c_add(3, mop500_i2c3_devices_u8500,
ARRAY_SIZE(mop500_i2c3_devices_u8500));
mop500_uib_i2c_add(0, mop500_i2c0_devices_u8500,
ARRAY_SIZE(mop500_i2c0_devices_u8500));
}
| gpl-2.0 |
nitinkamble/x32-linux | arch/arm/mach-at91/at91cap9_devices.c | 2327 | 32082 | /*
* arch/arm/mach-at91/at91cap9_devices.c
*
* Copyright (C) 2007 Stelian Pop <stelian.pop@leadtechdesign.com>
* Copyright (C) 2007 Lead Tech Design <www.leadtechdesign.com>
* Copyright (C) 2007 Atmel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/i2c-gpio.h>
#include <video/atmel_lcdc.h>
#include <mach/board.h>
#include <mach/cpu.h>
#include <mach/gpio.h>
#include <mach/at91cap9.h>
#include <mach/at91cap9_matrix.h>
#include <mach/at91sam9_smc.h>
#include "generic.h"
/* --------------------------------------------------------------------
* USB Host
* -------------------------------------------------------------------- */
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
static u64 ohci_dmamask = DMA_BIT_MASK(32);
static struct at91_usbh_data usbh_data;
static struct resource usbh_resources[] = {
[0] = {
.start = AT91CAP9_UHP_BASE,
.end = AT91CAP9_UHP_BASE + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_UHP,
.end = AT91CAP9_ID_UHP,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91_usbh_device = {
.name = "at91_ohci",
.id = -1,
.dev = {
.dma_mask = &ohci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &usbh_data,
},
.resource = usbh_resources,
.num_resources = ARRAY_SIZE(usbh_resources),
};
void __init at91_add_device_usbh(struct at91_usbh_data *data)
{
int i;
if (!data)
return;
if (cpu_is_at91cap9_revB())
irq_set_irq_type(AT91CAP9_ID_UHP, IRQ_TYPE_LEVEL_HIGH);
/* Enable VBus control for UHP ports */
for (i = 0; i < data->ports; i++) {
if (data->vbus_pin[i])
at91_set_gpio_output(data->vbus_pin[i], 0);
}
usbh_data = *data;
platform_device_register(&at91_usbh_device);
}
#else
void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
#endif
/* --------------------------------------------------------------------
* USB HS Device (Gadget)
* -------------------------------------------------------------------- */
#if defined(CONFIG_USB_GADGET_ATMEL_USBA) || defined(CONFIG_USB_GADGET_ATMEL_USBA_MODULE)
static struct resource usba_udc_resources[] = {
[0] = {
.start = AT91CAP9_UDPHS_FIFO,
.end = AT91CAP9_UDPHS_FIFO + SZ_512K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_BASE_UDPHS,
.end = AT91CAP9_BASE_UDPHS + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = AT91CAP9_ID_UDPHS,
.end = AT91CAP9_ID_UDPHS,
.flags = IORESOURCE_IRQ,
},
};
#define EP(nam, idx, maxpkt, maxbk, dma, isoc) \
[idx] = { \
.name = nam, \
.index = idx, \
.fifo_size = maxpkt, \
.nr_banks = maxbk, \
.can_dma = dma, \
.can_isoc = isoc, \
}
static struct usba_ep_data usba_udc_ep[] = {
EP("ep0", 0, 64, 1, 0, 0),
EP("ep1", 1, 1024, 3, 1, 1),
EP("ep2", 2, 1024, 3, 1, 1),
EP("ep3", 3, 1024, 2, 1, 1),
EP("ep4", 4, 1024, 2, 1, 1),
EP("ep5", 5, 1024, 2, 1, 0),
EP("ep6", 6, 1024, 2, 1, 0),
EP("ep7", 7, 1024, 2, 0, 0),
};
#undef EP
/*
* pdata doesn't have room for any endpoints, so we need to
* append room for the ones we need right after it.
*/
static struct {
struct usba_platform_data pdata;
struct usba_ep_data ep[8];
} usba_udc_data;
static struct platform_device at91_usba_udc_device = {
.name = "atmel_usba_udc",
.id = -1,
.dev = {
.platform_data = &usba_udc_data.pdata,
},
.resource = usba_udc_resources,
.num_resources = ARRAY_SIZE(usba_udc_resources),
};
void __init at91_add_device_usba(struct usba_platform_data *data)
{
if (cpu_is_at91cap9_revB()) {
irq_set_irq_type(AT91CAP9_ID_UDPHS, IRQ_TYPE_LEVEL_HIGH);
at91_sys_write(AT91_MATRIX_UDPHS, AT91_MATRIX_SELECT_UDPHS |
AT91_MATRIX_UDPHS_BYPASS_LOCK);
}
else
at91_sys_write(AT91_MATRIX_UDPHS, AT91_MATRIX_SELECT_UDPHS);
/*
* Invalid pins are 0 on AT91, but the usba driver is shared
* with AVR32, which use negative values instead. Once/if
* gpio_is_valid() is ported to AT91, revisit this code.
*/
usba_udc_data.pdata.vbus_pin = -EINVAL;
usba_udc_data.pdata.num_ep = ARRAY_SIZE(usba_udc_ep);
memcpy(usba_udc_data.ep, usba_udc_ep, sizeof(usba_udc_ep));
if (data && data->vbus_pin > 0) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
usba_udc_data.pdata.vbus_pin = data->vbus_pin;
}
/* Pullup pin is handled internally by USB device peripheral */
platform_device_register(&at91_usba_udc_device);
}
#else
void __init at91_add_device_usba(struct usba_platform_data *data) {}
#endif
/* --------------------------------------------------------------------
* Ethernet
* -------------------------------------------------------------------- */
#if defined(CONFIG_MACB) || defined(CONFIG_MACB_MODULE)
static u64 eth_dmamask = DMA_BIT_MASK(32);
static struct at91_eth_data eth_data;
static struct resource eth_resources[] = {
[0] = {
.start = AT91CAP9_BASE_EMAC,
.end = AT91CAP9_BASE_EMAC + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_EMAC,
.end = AT91CAP9_ID_EMAC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_eth_device = {
.name = "macb",
.id = -1,
.dev = {
.dma_mask = ð_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = ð_data,
},
.resource = eth_resources,
.num_resources = ARRAY_SIZE(eth_resources),
};
void __init at91_add_device_eth(struct at91_eth_data *data)
{
if (!data)
return;
if (data->phy_irq_pin) {
at91_set_gpio_input(data->phy_irq_pin, 0);
at91_set_deglitch(data->phy_irq_pin, 1);
}
/* Pins used for MII and RMII */
at91_set_A_periph(AT91_PIN_PB21, 0); /* ETXCK_EREFCK */
at91_set_A_periph(AT91_PIN_PB22, 0); /* ERXDV */
at91_set_A_periph(AT91_PIN_PB25, 0); /* ERX0 */
at91_set_A_periph(AT91_PIN_PB26, 0); /* ERX1 */
at91_set_A_periph(AT91_PIN_PB27, 0); /* ERXER */
at91_set_A_periph(AT91_PIN_PB28, 0); /* ETXEN */
at91_set_A_periph(AT91_PIN_PB23, 0); /* ETX0 */
at91_set_A_periph(AT91_PIN_PB24, 0); /* ETX1 */
at91_set_A_periph(AT91_PIN_PB30, 0); /* EMDIO */
at91_set_A_periph(AT91_PIN_PB29, 0); /* EMDC */
if (!data->is_rmii) {
at91_set_B_periph(AT91_PIN_PC25, 0); /* ECRS */
at91_set_B_periph(AT91_PIN_PC26, 0); /* ECOL */
at91_set_B_periph(AT91_PIN_PC22, 0); /* ERX2 */
at91_set_B_periph(AT91_PIN_PC23, 0); /* ERX3 */
at91_set_B_periph(AT91_PIN_PC27, 0); /* ERXCK */
at91_set_B_periph(AT91_PIN_PC20, 0); /* ETX2 */
at91_set_B_periph(AT91_PIN_PC21, 0); /* ETX3 */
at91_set_B_periph(AT91_PIN_PC24, 0); /* ETXER */
}
eth_data = *data;
platform_device_register(&at91cap9_eth_device);
}
#else
void __init at91_add_device_eth(struct at91_eth_data *data) {}
#endif
/* --------------------------------------------------------------------
* MMC / SD
* -------------------------------------------------------------------- */
#if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE)
static u64 mmc_dmamask = DMA_BIT_MASK(32);
static struct at91_mmc_data mmc0_data, mmc1_data;
static struct resource mmc0_resources[] = {
[0] = {
.start = AT91CAP9_BASE_MCI0,
.end = AT91CAP9_BASE_MCI0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_MCI0,
.end = AT91CAP9_ID_MCI0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_mmc0_device = {
.name = "at91_mci",
.id = 0,
.dev = {
.dma_mask = &mmc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &mmc0_data,
},
.resource = mmc0_resources,
.num_resources = ARRAY_SIZE(mmc0_resources),
};
static struct resource mmc1_resources[] = {
[0] = {
.start = AT91CAP9_BASE_MCI1,
.end = AT91CAP9_BASE_MCI1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_MCI1,
.end = AT91CAP9_ID_MCI1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_mmc1_device = {
.name = "at91_mci",
.id = 1,
.dev = {
.dma_mask = &mmc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &mmc1_data,
},
.resource = mmc1_resources,
.num_resources = ARRAY_SIZE(mmc1_resources),
};
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
{
if (!data)
return;
/* input/irq */
if (data->det_pin) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
if (data->wp_pin)
at91_set_gpio_input(data->wp_pin, 1);
if (data->vcc_pin)
at91_set_gpio_output(data->vcc_pin, 0);
if (mmc_id == 0) { /* MCI0 */
/* CLK */
at91_set_A_periph(AT91_PIN_PA2, 0);
/* CMD */
at91_set_A_periph(AT91_PIN_PA1, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_A_periph(AT91_PIN_PA0, 1);
if (data->wire4) {
at91_set_A_periph(AT91_PIN_PA3, 1);
at91_set_A_periph(AT91_PIN_PA4, 1);
at91_set_A_periph(AT91_PIN_PA5, 1);
}
mmc0_data = *data;
platform_device_register(&at91cap9_mmc0_device);
} else { /* MCI1 */
/* CLK */
at91_set_A_periph(AT91_PIN_PA16, 0);
/* CMD */
at91_set_A_periph(AT91_PIN_PA17, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_A_periph(AT91_PIN_PA18, 1);
if (data->wire4) {
at91_set_A_periph(AT91_PIN_PA19, 1);
at91_set_A_periph(AT91_PIN_PA20, 1);
at91_set_A_periph(AT91_PIN_PA21, 1);
}
mmc1_data = *data;
platform_device_register(&at91cap9_mmc1_device);
}
}
#else
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
#endif
/* --------------------------------------------------------------------
* NAND / SmartMedia
* -------------------------------------------------------------------- */
#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
static struct atmel_nand_data nand_data;
#define NAND_BASE AT91_CHIPSELECT_3
static struct resource nand_resources[] = {
[0] = {
.start = NAND_BASE,
.end = NAND_BASE + SZ_256M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91_BASE_SYS + AT91_ECC,
.end = AT91_BASE_SYS + AT91_ECC + SZ_512 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device at91cap9_nand_device = {
.name = "atmel_nand",
.id = -1,
.dev = {
.platform_data = &nand_data,
},
.resource = nand_resources,
.num_resources = ARRAY_SIZE(nand_resources),
};
void __init at91_add_device_nand(struct atmel_nand_data *data)
{
unsigned long csa;
if (!data)
return;
csa = at91_sys_read(AT91_MATRIX_EBICSA);
at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_CS3A_SMC_SMARTMEDIA);
/* enable pin */
if (data->enable_pin)
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
if (data->rdy_pin)
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
if (data->det_pin)
at91_set_gpio_input(data->det_pin, 1);
nand_data = *data;
platform_device_register(&at91cap9_nand_device);
}
#else
void __init at91_add_device_nand(struct atmel_nand_data *data) {}
#endif
/* --------------------------------------------------------------------
* TWI (i2c)
* -------------------------------------------------------------------- */
/*
* Prefer the GPIO code since the TWI controller isn't robust
* (gets overruns and underruns under load) and can only issue
* repeated STARTs in one scenario (the driver doesn't yet handle them).
*/
#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
static struct i2c_gpio_platform_data pdata = {
.sda_pin = AT91_PIN_PB4,
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PB5,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
};
static struct platform_device at91cap9_twi_device = {
.name = "i2c-gpio",
.id = -1,
.dev.platform_data = &pdata,
};
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
{
at91_set_GPIO_periph(AT91_PIN_PB4, 1); /* TWD (SDA) */
at91_set_multi_drive(AT91_PIN_PB4, 1);
at91_set_GPIO_periph(AT91_PIN_PB5, 1); /* TWCK (SCL) */
at91_set_multi_drive(AT91_PIN_PB5, 1);
i2c_register_board_info(0, devices, nr_devices);
platform_device_register(&at91cap9_twi_device);
}
#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
static struct resource twi_resources[] = {
[0] = {
.start = AT91CAP9_BASE_TWI,
.end = AT91CAP9_BASE_TWI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_TWI,
.end = AT91CAP9_ID_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_twi_device = {
.name = "at91_i2c",
.id = -1,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
{
/* pins used for TWI interface */
at91_set_B_periph(AT91_PIN_PB4, 0); /* TWD */
at91_set_multi_drive(AT91_PIN_PB4, 1);
at91_set_B_periph(AT91_PIN_PB5, 0); /* TWCK */
at91_set_multi_drive(AT91_PIN_PB5, 1);
i2c_register_board_info(0, devices, nr_devices);
platform_device_register(&at91cap9_twi_device);
}
#else
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {}
#endif
/* --------------------------------------------------------------------
* SPI
* -------------------------------------------------------------------- */
#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
static u64 spi_dmamask = DMA_BIT_MASK(32);
static struct resource spi0_resources[] = {
[0] = {
.start = AT91CAP9_BASE_SPI0,
.end = AT91CAP9_BASE_SPI0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_SPI0,
.end = AT91CAP9_ID_SPI0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_spi0_device = {
.name = "atmel_spi",
.id = 0,
.dev = {
.dma_mask = &spi_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = spi0_resources,
.num_resources = ARRAY_SIZE(spi0_resources),
};
static const unsigned spi0_standard_cs[4] = { AT91_PIN_PA5, AT91_PIN_PA3, AT91_PIN_PD0, AT91_PIN_PD1 };
static struct resource spi1_resources[] = {
[0] = {
.start = AT91CAP9_BASE_SPI1,
.end = AT91CAP9_BASE_SPI1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_SPI1,
.end = AT91CAP9_ID_SPI1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_spi1_device = {
.name = "atmel_spi",
.id = 1,
.dev = {
.dma_mask = &spi_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = spi1_resources,
.num_resources = ARRAY_SIZE(spi1_resources),
};
static const unsigned spi1_standard_cs[4] = { AT91_PIN_PB15, AT91_PIN_PB16, AT91_PIN_PB17, AT91_PIN_PB18 };
void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
{
int i;
unsigned long cs_pin;
short enable_spi0 = 0;
short enable_spi1 = 0;
/* Choose SPI chip-selects */
for (i = 0; i < nr_devices; i++) {
if (devices[i].controller_data)
cs_pin = (unsigned long) devices[i].controller_data;
else if (devices[i].bus_num == 0)
cs_pin = spi0_standard_cs[devices[i].chip_select];
else
cs_pin = spi1_standard_cs[devices[i].chip_select];
if (devices[i].bus_num == 0)
enable_spi0 = 1;
else
enable_spi1 = 1;
/* enable chip-select pin */
at91_set_gpio_output(cs_pin, 1);
/* pass chip-select pin to driver */
devices[i].controller_data = (void *) cs_pin;
}
spi_register_board_info(devices, nr_devices);
/* Configure SPI bus(es) */
if (enable_spi0) {
at91_set_B_periph(AT91_PIN_PA0, 0); /* SPI0_MISO */
at91_set_B_periph(AT91_PIN_PA1, 0); /* SPI0_MOSI */
at91_set_B_periph(AT91_PIN_PA2, 0); /* SPI0_SPCK */
platform_device_register(&at91cap9_spi0_device);
}
if (enable_spi1) {
at91_set_A_periph(AT91_PIN_PB12, 0); /* SPI1_MISO */
at91_set_A_periph(AT91_PIN_PB13, 0); /* SPI1_MOSI */
at91_set_A_periph(AT91_PIN_PB14, 0); /* SPI1_SPCK */
platform_device_register(&at91cap9_spi1_device);
}
}
#else
void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
#endif
/* --------------------------------------------------------------------
* Timer/Counter block
* -------------------------------------------------------------------- */
#ifdef CONFIG_ATMEL_TCLIB
static struct resource tcb_resources[] = {
[0] = {
.start = AT91CAP9_BASE_TCB0,
.end = AT91CAP9_BASE_TCB0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_TCB,
.end = AT91CAP9_ID_TCB,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_tcb_device = {
.name = "atmel_tcb",
.id = 0,
.resource = tcb_resources,
.num_resources = ARRAY_SIZE(tcb_resources),
};
static void __init at91_add_device_tc(void)
{
platform_device_register(&at91cap9_tcb_device);
}
#else
static void __init at91_add_device_tc(void) { }
#endif
/* --------------------------------------------------------------------
* RTT
* -------------------------------------------------------------------- */
static struct resource rtt_resources[] = {
{
.start = AT91_BASE_SYS + AT91_RTT,
.end = AT91_BASE_SYS + AT91_RTT + SZ_16 - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device at91cap9_rtt_device = {
.name = "at91_rtt",
.id = 0,
.resource = rtt_resources,
.num_resources = ARRAY_SIZE(rtt_resources),
};
static void __init at91_add_device_rtt(void)
{
platform_device_register(&at91cap9_rtt_device);
}
/* --------------------------------------------------------------------
* Watchdog
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91SAM9X_WATCHDOG) || defined(CONFIG_AT91SAM9X_WATCHDOG_MODULE)
static struct platform_device at91cap9_wdt_device = {
.name = "at91_wdt",
.id = -1,
.num_resources = 0,
};
static void __init at91_add_device_watchdog(void)
{
platform_device_register(&at91cap9_wdt_device);
}
#else
static void __init at91_add_device_watchdog(void) {}
#endif
/* --------------------------------------------------------------------
* PWM
* --------------------------------------------------------------------*/
#if defined(CONFIG_ATMEL_PWM)
static u32 pwm_mask;
static struct resource pwm_resources[] = {
[0] = {
.start = AT91CAP9_BASE_PWMC,
.end = AT91CAP9_BASE_PWMC + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_PWMC,
.end = AT91CAP9_ID_PWMC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_pwm0_device = {
.name = "atmel_pwm",
.id = -1,
.dev = {
.platform_data = &pwm_mask,
},
.resource = pwm_resources,
.num_resources = ARRAY_SIZE(pwm_resources),
};
void __init at91_add_device_pwm(u32 mask)
{
if (mask & (1 << AT91_PWM0))
at91_set_A_periph(AT91_PIN_PB19, 1); /* enable PWM0 */
if (mask & (1 << AT91_PWM1))
at91_set_B_periph(AT91_PIN_PB8, 1); /* enable PWM1 */
if (mask & (1 << AT91_PWM2))
at91_set_B_periph(AT91_PIN_PC29, 1); /* enable PWM2 */
if (mask & (1 << AT91_PWM3))
at91_set_B_periph(AT91_PIN_PA11, 1); /* enable PWM3 */
pwm_mask = mask;
platform_device_register(&at91cap9_pwm0_device);
}
#else
void __init at91_add_device_pwm(u32 mask) {}
#endif
/* --------------------------------------------------------------------
* AC97
* -------------------------------------------------------------------- */
#if defined(CONFIG_SND_ATMEL_AC97C) || defined(CONFIG_SND_ATMEL_AC97C_MODULE)
static u64 ac97_dmamask = DMA_BIT_MASK(32);
static struct ac97c_platform_data ac97_data;
static struct resource ac97_resources[] = {
[0] = {
.start = AT91CAP9_BASE_AC97C,
.end = AT91CAP9_BASE_AC97C + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_AC97C,
.end = AT91CAP9_ID_AC97C,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_ac97_device = {
.name = "atmel_ac97c",
.id = 1,
.dev = {
.dma_mask = &ac97_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &ac97_data,
},
.resource = ac97_resources,
.num_resources = ARRAY_SIZE(ac97_resources),
};
void __init at91_add_device_ac97(struct ac97c_platform_data *data)
{
if (!data)
return;
at91_set_A_periph(AT91_PIN_PA6, 0); /* AC97FS */
at91_set_A_periph(AT91_PIN_PA7, 0); /* AC97CK */
at91_set_A_periph(AT91_PIN_PA8, 0); /* AC97TX */
at91_set_A_periph(AT91_PIN_PA9, 0); /* AC97RX */
/* reset */
if (data->reset_pin)
at91_set_gpio_output(data->reset_pin, 0);
ac97_data = *data;
platform_device_register(&at91cap9_ac97_device);
}
#else
void __init at91_add_device_ac97(struct ac97c_platform_data *data) {}
#endif
/* --------------------------------------------------------------------
* LCD Controller
* -------------------------------------------------------------------- */
#if defined(CONFIG_FB_ATMEL) || defined(CONFIG_FB_ATMEL_MODULE)
static u64 lcdc_dmamask = DMA_BIT_MASK(32);
static struct atmel_lcdfb_info lcdc_data;
static struct resource lcdc_resources[] = {
[0] = {
.start = AT91CAP9_LCDC_BASE,
.end = AT91CAP9_LCDC_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_LCDC,
.end = AT91CAP9_ID_LCDC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91_lcdc_device = {
.name = "atmel_lcdfb",
.id = 0,
.dev = {
.dma_mask = &lcdc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &lcdc_data,
},
.resource = lcdc_resources,
.num_resources = ARRAY_SIZE(lcdc_resources),
};
void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
{
if (!data)
return;
if (cpu_is_at91cap9_revB())
irq_set_irq_type(AT91CAP9_ID_LCDC, IRQ_TYPE_LEVEL_HIGH);
at91_set_A_periph(AT91_PIN_PC1, 0); /* LCDHSYNC */
at91_set_A_periph(AT91_PIN_PC2, 0); /* LCDDOTCK */
at91_set_A_periph(AT91_PIN_PC3, 0); /* LCDDEN */
at91_set_B_periph(AT91_PIN_PB9, 0); /* LCDCC */
at91_set_A_periph(AT91_PIN_PC6, 0); /* LCDD2 */
at91_set_A_periph(AT91_PIN_PC7, 0); /* LCDD3 */
at91_set_A_periph(AT91_PIN_PC8, 0); /* LCDD4 */
at91_set_A_periph(AT91_PIN_PC9, 0); /* LCDD5 */
at91_set_A_periph(AT91_PIN_PC10, 0); /* LCDD6 */
at91_set_A_periph(AT91_PIN_PC11, 0); /* LCDD7 */
at91_set_A_periph(AT91_PIN_PC14, 0); /* LCDD10 */
at91_set_A_periph(AT91_PIN_PC15, 0); /* LCDD11 */
at91_set_A_periph(AT91_PIN_PC16, 0); /* LCDD12 */
at91_set_A_periph(AT91_PIN_PC17, 0); /* LCDD13 */
at91_set_A_periph(AT91_PIN_PC18, 0); /* LCDD14 */
at91_set_A_periph(AT91_PIN_PC19, 0); /* LCDD15 */
at91_set_A_periph(AT91_PIN_PC22, 0); /* LCDD18 */
at91_set_A_periph(AT91_PIN_PC23, 0); /* LCDD19 */
at91_set_A_periph(AT91_PIN_PC24, 0); /* LCDD20 */
at91_set_A_periph(AT91_PIN_PC25, 0); /* LCDD21 */
at91_set_A_periph(AT91_PIN_PC26, 0); /* LCDD22 */
at91_set_A_periph(AT91_PIN_PC27, 0); /* LCDD23 */
lcdc_data = *data;
platform_device_register(&at91_lcdc_device);
}
#else
void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data) {}
#endif
/* --------------------------------------------------------------------
* SSC -- Synchronous Serial Controller
* -------------------------------------------------------------------- */
#if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE)
static u64 ssc0_dmamask = DMA_BIT_MASK(32);
static struct resource ssc0_resources[] = {
[0] = {
.start = AT91CAP9_BASE_SSC0,
.end = AT91CAP9_BASE_SSC0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_SSC0,
.end = AT91CAP9_ID_SSC0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_ssc0_device = {
.name = "ssc",
.id = 0,
.dev = {
.dma_mask = &ssc0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = ssc0_resources,
.num_resources = ARRAY_SIZE(ssc0_resources),
};
static inline void configure_ssc0_pins(unsigned pins)
{
if (pins & ATMEL_SSC_TF)
at91_set_A_periph(AT91_PIN_PB0, 1);
if (pins & ATMEL_SSC_TK)
at91_set_A_periph(AT91_PIN_PB1, 1);
if (pins & ATMEL_SSC_TD)
at91_set_A_periph(AT91_PIN_PB2, 1);
if (pins & ATMEL_SSC_RD)
at91_set_A_periph(AT91_PIN_PB3, 1);
if (pins & ATMEL_SSC_RK)
at91_set_A_periph(AT91_PIN_PB4, 1);
if (pins & ATMEL_SSC_RF)
at91_set_A_periph(AT91_PIN_PB5, 1);
}
static u64 ssc1_dmamask = DMA_BIT_MASK(32);
static struct resource ssc1_resources[] = {
[0] = {
.start = AT91CAP9_BASE_SSC1,
.end = AT91CAP9_BASE_SSC1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_SSC1,
.end = AT91CAP9_ID_SSC1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91cap9_ssc1_device = {
.name = "ssc",
.id = 1,
.dev = {
.dma_mask = &ssc1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = ssc1_resources,
.num_resources = ARRAY_SIZE(ssc1_resources),
};
static inline void configure_ssc1_pins(unsigned pins)
{
if (pins & ATMEL_SSC_TF)
at91_set_A_periph(AT91_PIN_PB6, 1);
if (pins & ATMEL_SSC_TK)
at91_set_A_periph(AT91_PIN_PB7, 1);
if (pins & ATMEL_SSC_TD)
at91_set_A_periph(AT91_PIN_PB8, 1);
if (pins & ATMEL_SSC_RD)
at91_set_A_periph(AT91_PIN_PB9, 1);
if (pins & ATMEL_SSC_RK)
at91_set_A_periph(AT91_PIN_PB10, 1);
if (pins & ATMEL_SSC_RF)
at91_set_A_periph(AT91_PIN_PB11, 1);
}
/*
* SSC controllers are accessed through library code, instead of any
* kind of all-singing/all-dancing driver. For example one could be
* used by a particular I2S audio codec's driver, while another one
* on the same system might be used by a custom data capture driver.
*/
void __init at91_add_device_ssc(unsigned id, unsigned pins)
{
struct platform_device *pdev;
/*
* NOTE: caller is responsible for passing information matching
* "pins" to whatever will be using each particular controller.
*/
switch (id) {
case AT91CAP9_ID_SSC0:
pdev = &at91cap9_ssc0_device;
configure_ssc0_pins(pins);
break;
case AT91CAP9_ID_SSC1:
pdev = &at91cap9_ssc1_device;
configure_ssc1_pins(pins);
break;
default:
return;
}
platform_device_register(pdev);
}
#else
void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#endif
/* --------------------------------------------------------------------
* UART
* -------------------------------------------------------------------- */
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
.start = AT91_VA_BASE_SYS + AT91_DBGU,
.end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91_ID_SYS,
.end = AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data dbgu_data = {
.use_dma_tx = 0,
.use_dma_rx = 0, /* DBGU not capable of receive DMA */
.regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
};
static u64 dbgu_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91cap9_dbgu_device = {
.name = "atmel_usart",
.id = 0,
.dev = {
.dma_mask = &dbgu_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &dbgu_data,
},
.resource = dbgu_resources,
.num_resources = ARRAY_SIZE(dbgu_resources),
};
static inline void configure_dbgu_pins(void)
{
at91_set_A_periph(AT91_PIN_PC30, 0); /* DRXD */
at91_set_A_periph(AT91_PIN_PC31, 1); /* DTXD */
}
static struct resource uart0_resources[] = {
[0] = {
.start = AT91CAP9_BASE_US0,
.end = AT91CAP9_BASE_US0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_US0,
.end = AT91CAP9_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart0_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart0_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91cap9_uart0_device = {
.name = "atmel_usart",
.id = 1,
.dev = {
.dma_mask = &uart0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart0_data,
},
.resource = uart0_resources,
.num_resources = ARRAY_SIZE(uart0_resources),
};
static inline void configure_usart0_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PA22, 1); /* TXD0 */
at91_set_A_periph(AT91_PIN_PA23, 0); /* RXD0 */
if (pins & ATMEL_UART_RTS)
at91_set_A_periph(AT91_PIN_PA24, 0); /* RTS0 */
if (pins & ATMEL_UART_CTS)
at91_set_A_periph(AT91_PIN_PA25, 0); /* CTS0 */
}
static struct resource uart1_resources[] = {
[0] = {
.start = AT91CAP9_BASE_US1,
.end = AT91CAP9_BASE_US1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_US1,
.end = AT91CAP9_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart1_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart1_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91cap9_uart1_device = {
.name = "atmel_usart",
.id = 2,
.dev = {
.dma_mask = &uart1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart1_data,
},
.resource = uart1_resources,
.num_resources = ARRAY_SIZE(uart1_resources),
};
static inline void configure_usart1_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PD0, 1); /* TXD1 */
at91_set_A_periph(AT91_PIN_PD1, 0); /* RXD1 */
if (pins & ATMEL_UART_RTS)
at91_set_B_periph(AT91_PIN_PD7, 0); /* RTS1 */
if (pins & ATMEL_UART_CTS)
at91_set_B_periph(AT91_PIN_PD8, 0); /* CTS1 */
}
static struct resource uart2_resources[] = {
[0] = {
.start = AT91CAP9_BASE_US2,
.end = AT91CAP9_BASE_US2 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91CAP9_ID_US2,
.end = AT91CAP9_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart2_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart2_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91cap9_uart2_device = {
.name = "atmel_usart",
.id = 3,
.dev = {
.dma_mask = &uart2_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart2_data,
},
.resource = uart2_resources,
.num_resources = ARRAY_SIZE(uart2_resources),
};
static inline void configure_usart2_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PD2, 1); /* TXD2 */
at91_set_A_periph(AT91_PIN_PD3, 0); /* RXD2 */
if (pins & ATMEL_UART_RTS)
at91_set_B_periph(AT91_PIN_PD5, 0); /* RTS2 */
if (pins & ATMEL_UART_CTS)
at91_set_B_periph(AT91_PIN_PD6, 0); /* CTS2 */
}
static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
struct platform_device *atmel_default_console_device; /* the serial console device */
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
struct atmel_uart_data *pdata;
switch (id) {
case 0: /* DBGU */
pdev = &at91cap9_dbgu_device;
configure_dbgu_pins();
break;
case AT91CAP9_ID_US0:
pdev = &at91cap9_uart0_device;
configure_usart0_pins(pins);
break;
case AT91CAP9_ID_US1:
pdev = &at91cap9_uart1_device;
configure_usart1_pins(pins);
break;
case AT91CAP9_ID_US2:
pdev = &at91cap9_uart2_device;
configure_usart2_pins(pins);
break;
default:
return;
}
pdata = pdev->dev.platform_data;
pdata->num = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
}
void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART) {
atmel_default_console_device = at91_uarts[portnr];
at91cap9_set_console_clock(at91_uarts[portnr]->id);
}
}
void __init at91_add_device_serial(void)
{
int i;
for (i = 0; i < ATMEL_MAX_UART; i++) {
if (at91_uarts[i])
platform_device_register(at91_uarts[i]);
}
if (!atmel_default_console_device)
printk(KERN_INFO "AT91: No default serial console defined.\n");
}
#else
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
void __init at91_set_serial_console(unsigned portnr) {}
void __init at91_add_device_serial(void) {}
#endif
/* -------------------------------------------------------------------- */
/*
* These devices are always present and don't need any board-specific
* setup.
*/
static int __init at91_add_standard_devices(void)
{
at91_add_device_rtt();
at91_add_device_watchdog();
at91_add_device_tc();
return 0;
}
arch_initcall(at91_add_standard_devices);
| gpl-2.0 |
omnirom/android_kernel_huawei_angler | drivers/net/wireless/brcm80211/brcmsmac/channel.c | 2327 | 21373 | /*
* Copyright (c) 2010 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/types.h>
#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <net/regulatory.h>
#include <defs.h>
#include "pub.h"
#include "phy/phy_hal.h"
#include "main.h"
#include "stf.h"
#include "channel.h"
#include "mac80211_if.h"
#include "debug.h"
/* QDB() macro takes a dB value and converts to a quarter dB value */
#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
#define LOCALE_MIMO_IDX_bn 0
#define LOCALE_MIMO_IDX_11n 0
/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14
/* maxpwr mapping to 5GHz band channels:
* maxpwr[0] - channels [34-48]
* maxpwr[1] - channels [52-60]
* maxpwr[2] - channels [62-64]
* maxpwr[3] - channels [100-140]
* maxpwr[4] - channels [149-165]
*/
#define BAND_5G_PWR_LVLS 5 /* 5 power levels for 5G */
#define LC(id) LOCALE_MIMO_IDX_ ## id
#define LOCALES(mimo2, mimo5) \
{LC(mimo2), LC(mimo5)}
/* macro to get 5 GHz channel group index for tx power */
#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \
(((c) < 62) ? 1 : \
(((c) < 100) ? 2 : \
(((c) < 149) ? 3 : 4))))
#define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0)
#define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \
NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_NO_IBSS)
#define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \
NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_NO_IBSS)
#define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \
NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_DFS | \
NL80211_RRF_NO_IBSS)
#define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \
NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_DFS | \
NL80211_RRF_NO_IBSS)
#define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \
NL80211_RRF_PASSIVE_SCAN | \
NL80211_RRF_NO_IBSS)
static const struct ieee80211_regdomain brcms_regdom_x2 = {
.n_reg_rules = 6,
.alpha2 = "X2",
.reg_rules = {
BRCM_2GHZ_2412_2462,
BRCM_2GHZ_2467_2472,
BRCM_5GHZ_5180_5240,
BRCM_5GHZ_5260_5320,
BRCM_5GHZ_5500_5700,
BRCM_5GHZ_5745_5825,
}
};
/* locale per-channel tx power limits for MIMO frames
* maxpwr arrays are index by channel for 2.4 GHz limits, and
* by sub-band for 5 GHz limits using CHANNEL_POWER_IDX_5G(channel)
*/
struct locale_mimo_info {
/* tx 20 MHz power limits, qdBm units */
s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE];
/* tx 40 MHz power limits, qdBm units */
s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE];
};
/* Country names and abbreviations with locale defined from ISO 3166 */
struct country_info {
const u8 locale_mimo_2G; /* 2.4G mimo info */
const u8 locale_mimo_5G; /* 5G mimo info */
};
struct brcms_regd {
struct country_info country;
const struct ieee80211_regdomain *regdomain;
};
struct brcms_cm_info {
struct brcms_pub *pub;
struct brcms_c_info *wlc;
const struct brcms_regd *world_regd;
};
/*
* MIMO Locale Definitions - 2.4 GHz
*/
static const struct locale_mimo_info locale_bn = {
{QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
QDB(13), QDB(13), QDB(13)},
{0, 0, QDB(13), QDB(13), QDB(13),
QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
QDB(13), 0, 0},
};
static const struct locale_mimo_info *g_mimo_2g_table[] = {
&locale_bn
};
/*
* MIMO Locale Definitions - 5 GHz
*/
static const struct locale_mimo_info locale_11n = {
{ /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)},
{QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)},
};
static const struct locale_mimo_info *g_mimo_5g_table[] = {
&locale_11n
};
static const struct brcms_regd cntry_locales[] = {
/* Worldwide RoW 2, must always be at index 0 */
{
.country = LOCALES(bn, 11n),
.regdomain = &brcms_regdom_x2,
},
};
static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table))
return NULL;
return g_mimo_2g_table[locale_idx];
}
static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_mimo_5g_table))
return NULL;
return g_mimo_5g_table[locale_idx];
}
/*
* Indicates whether the country provided is valid to pass
* to cfg80211 or not.
*
* returns true if valid; false if not.
*/
static bool brcms_c_country_valid(const char *ccode)
{
/*
* only allow ascii alpha uppercase for the first 2
* chars.
*/
if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A &&
(0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A))
return false;
/*
* do not match ISO 3166-1 user assigned country codes
* that may be in the driver table
*/
if (!strcmp("AA", ccode) || /* AA */
!strcmp("ZZ", ccode) || /* ZZ */
ccode[0] == 'X' || /* XA - XZ */
(ccode[0] == 'Q' && /* QM - QZ */
(ccode[1] >= 'M' && ccode[1] <= 'Z')))
return false;
if (!strcmp("NA", ccode))
return false;
return true;
}
static const struct brcms_regd *brcms_world_regd(const char *regdom, int len)
{
const struct brcms_regd *regd = NULL;
int i;
for (i = 0; i < ARRAY_SIZE(cntry_locales); i++) {
if (!strncmp(regdom, cntry_locales[i].regdomain->alpha2, len)) {
regd = &cntry_locales[i];
break;
}
}
return regd;
}
static const struct brcms_regd *brcms_default_world_regd(void)
{
return &cntry_locales[0];
}
/* JP, J1 - J10 are Japan ccodes */
static bool brcms_c_japan_ccode(const char *ccode)
{
return (ccode[0] == 'J' &&
(ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9')));
}
static void
brcms_c_channel_min_txpower_limits_with_local_constraint(
struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
u8 local_constraint_qdbm)
{
int j;
/* CCK Rates */
for (j = 0; j < WL_TX_POWER_CCK_NUM; j++)
txpwr->cck[j] = min(txpwr->cck[j], local_constraint_qdbm);
/* 20 MHz Legacy OFDM SISO */
for (j = 0; j < WL_TX_POWER_OFDM_NUM; j++)
txpwr->ofdm[j] = min(txpwr->ofdm[j], local_constraint_qdbm);
/* 20 MHz Legacy OFDM CDD */
for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++)
txpwr->ofdm_cdd[j] =
min(txpwr->ofdm_cdd[j], local_constraint_qdbm);
/* 40 MHz Legacy OFDM SISO */
for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++)
txpwr->ofdm_40_siso[j] =
min(txpwr->ofdm_40_siso[j], local_constraint_qdbm);
/* 40 MHz Legacy OFDM CDD */
for (j = 0; j < BRCMS_NUM_RATES_OFDM; j++)
txpwr->ofdm_40_cdd[j] =
min(txpwr->ofdm_40_cdd[j], local_constraint_qdbm);
/* 20MHz MCS 0-7 SISO */
for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
txpwr->mcs_20_siso[j] =
min(txpwr->mcs_20_siso[j], local_constraint_qdbm);
/* 20MHz MCS 0-7 CDD */
for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
txpwr->mcs_20_cdd[j] =
min(txpwr->mcs_20_cdd[j], local_constraint_qdbm);
/* 20MHz MCS 0-7 STBC */
for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
txpwr->mcs_20_stbc[j] =
min(txpwr->mcs_20_stbc[j], local_constraint_qdbm);
/* 20MHz MCS 8-15 MIMO */
for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++)
txpwr->mcs_20_mimo[j] =
min(txpwr->mcs_20_mimo[j], local_constraint_qdbm);
/* 40MHz MCS 0-7 SISO */
for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
txpwr->mcs_40_siso[j] =
min(txpwr->mcs_40_siso[j], local_constraint_qdbm);
/* 40MHz MCS 0-7 CDD */
for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
txpwr->mcs_40_cdd[j] =
min(txpwr->mcs_40_cdd[j], local_constraint_qdbm);
/* 40MHz MCS 0-7 STBC */
for (j = 0; j < BRCMS_NUM_RATES_MCS_1_STREAM; j++)
txpwr->mcs_40_stbc[j] =
min(txpwr->mcs_40_stbc[j], local_constraint_qdbm);
/* 40MHz MCS 8-15 MIMO */
for (j = 0; j < BRCMS_NUM_RATES_MCS_2_STREAM; j++)
txpwr->mcs_40_mimo[j] =
min(txpwr->mcs_40_mimo[j], local_constraint_qdbm);
/* 40MHz MCS 32 */
txpwr->mcs32 = min(txpwr->mcs32, local_constraint_qdbm);
}
/*
* set the driver's current country and regulatory information
* using a country code as the source. Look up built in country
* information found with the country code.
*/
static void
brcms_c_set_country(struct brcms_cm_info *wlc_cm,
const struct brcms_regd *regd)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
if ((wlc->pub->_n_enab & SUPPORT_11N) !=
wlc->protection->nmode_user)
brcms_c_set_nmode(wlc);
brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
return;
}
struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
{
struct brcms_cm_info *wlc_cm;
struct brcms_pub *pub = wlc->pub;
struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
const char *ccode = sprom->alpha2;
int ccode_len = sizeof(sprom->alpha2);
wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
if (wlc_cm == NULL)
return NULL;
wlc_cm->pub = pub;
wlc_cm->wlc = wlc;
wlc->cmi = wlc_cm;
/* store the country code for passing up as a regulatory hint */
wlc_cm->world_regd = brcms_world_regd(ccode, ccode_len);
if (brcms_c_country_valid(ccode))
strncpy(wlc->pub->srom_ccode, ccode, ccode_len);
/*
* If no custom world domain is found in the SROM, use the
* default "X2" domain.
*/
if (!wlc_cm->world_regd) {
wlc_cm->world_regd = brcms_default_world_regd();
ccode = wlc_cm->world_regd->regdomain->alpha2;
ccode_len = BRCM_CNTRY_BUF_SZ - 1;
}
/* save default country for exiting 11d regulatory mode */
strncpy(wlc->country_default, ccode, ccode_len);
/* initialize autocountry_default to driver default */
strncpy(wlc->autocountry_default, ccode, ccode_len);
brcms_c_set_country(wlc_cm, wlc_cm->world_regd);
return wlc_cm;
}
void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm)
{
kfree(wlc_cm);
}
void
brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
u8 local_constraint_qdbm)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan;
struct txpwr_limits txpwr;
brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
brcms_c_channel_min_txpower_limits_with_local_constraint(
wlc_cm, &txpwr, local_constraint_qdbm
);
/* set or restore gmode as required by regulatory */
if (ch->flags & IEEE80211_CHAN_NO_OFDM)
brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
else
brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
brcms_b_set_chanspec(wlc->hw, chanspec,
!!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN),
&txpwr);
}
void
brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
struct txpwr_limits *txpwr)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.chandef.chan;
uint i;
uint chan;
int maxpwr;
int delta;
const struct country_info *country;
struct brcms_band *band;
int conducted_max = BRCMS_TXPWR_MAX;
const struct locale_mimo_info *li_mimo;
int maxpwr20, maxpwr40;
int maxpwr_idx;
uint j;
memset(txpwr, 0, sizeof(struct txpwr_limits));
if (WARN_ON(!ch))
return;
country = &wlc_cm->world_regd->country;
chan = CHSPEC_CHANNEL(chanspec);
band = wlc->bandstate[chspec_bandunit(chanspec)];
li_mimo = (band->bandtype == BRCM_BAND_5G) ?
brcms_c_get_mimo_5g(country->locale_mimo_5G) :
brcms_c_get_mimo_2g(country->locale_mimo_2G);
delta = band->antgain;
if (band->bandtype == BRCM_BAND_2G)
conducted_max = QDB(22);
maxpwr = QDB(ch->max_power) - delta;
maxpwr = max(maxpwr, 0);
maxpwr = min(maxpwr, conducted_max);
/* CCK txpwr limits for 2.4G band */
if (band->bandtype == BRCM_BAND_2G) {
for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
txpwr->cck[i] = (u8) maxpwr;
}
for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
txpwr->ofdm[i] = (u8) maxpwr;
/*
* OFDM 40 MHz SISO has the same power as the corresponding
* MCS0-7 rate unless overriden by the locale specific code.
* We set this value to 0 as a flag (presumably 0 dBm isn't
* a possibility) and then copy the MCS0-7 value to the 40 MHz
* value if it wasn't explicitly set.
*/
txpwr->ofdm_40_siso[i] = 0;
txpwr->ofdm_cdd[i] = (u8) maxpwr;
txpwr->ofdm_40_cdd[i] = 0;
}
delta = 0;
if (band->antgain > QDB(6))
delta = band->antgain - QDB(6); /* Excess over 6 dB */
if (band->bandtype == BRCM_BAND_2G)
maxpwr_idx = (chan - 1);
else
maxpwr_idx = CHANNEL_POWER_IDX_5G(chan);
maxpwr20 = li_mimo->maxpwr20[maxpwr_idx];
maxpwr40 = li_mimo->maxpwr40[maxpwr_idx];
maxpwr20 = maxpwr20 - delta;
maxpwr20 = max(maxpwr20, 0);
maxpwr40 = maxpwr40 - delta;
maxpwr40 = max(maxpwr40, 0);
/* Fill in the MCS 0-7 (SISO) rates */
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
/*
* 20 MHz has the same power as the corresponding OFDM rate
* unless overriden by the locale specific code.
*/
txpwr->mcs_20_siso[i] = txpwr->ofdm[i];
txpwr->mcs_40_siso[i] = 0;
}
/* Fill in the MCS 0-7 CDD rates */
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_cdd[i] = (u8) maxpwr20;
txpwr->mcs_40_cdd[i] = (u8) maxpwr40;
}
/*
* These locales have SISO expressed in the
* table and override CDD later
*/
if (li_mimo == &locale_bn) {
if (li_mimo == &locale_bn) {
maxpwr20 = QDB(16);
maxpwr40 = 0;
if (chan >= 3 && chan <= 11)
maxpwr40 = QDB(16);
}
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_siso[i] = (u8) maxpwr20;
txpwr->mcs_40_siso[i] = (u8) maxpwr40;
}
}
/* Fill in the MCS 0-7 STBC rates */
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
txpwr->mcs_20_stbc[i] = 0;
txpwr->mcs_40_stbc[i] = 0;
}
/* Fill in the MCS 8-15 SDM rates */
for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++) {
txpwr->mcs_20_mimo[i] = (u8) maxpwr20;
txpwr->mcs_40_mimo[i] = (u8) maxpwr40;
}
/* Fill in MCS32 */
txpwr->mcs32 = (u8) maxpwr40;
for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) {
if (txpwr->ofdm_40_cdd[i] == 0)
txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j];
if (i == 0) {
i = i + 1;
if (txpwr->ofdm_40_cdd[i] == 0)
txpwr->ofdm_40_cdd[i] = txpwr->mcs_40_cdd[j];
}
}
/*
* Copy the 40 MHZ MCS 0-7 CDD value to the 40 MHZ MCS 0-7 SISO
* value if it wasn't provided explicitly.
*/
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
if (txpwr->mcs_40_siso[i] == 0)
txpwr->mcs_40_siso[i] = txpwr->mcs_40_cdd[i];
}
for (i = 0, j = 0; i < BRCMS_NUM_RATES_OFDM; i++, j++) {
if (txpwr->ofdm_40_siso[i] == 0)
txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j];
if (i == 0) {
i = i + 1;
if (txpwr->ofdm_40_siso[i] == 0)
txpwr->ofdm_40_siso[i] = txpwr->mcs_40_siso[j];
}
}
/*
* Copy the 20 and 40 MHz MCS0-7 CDD values to the corresponding
* STBC values if they weren't provided explicitly.
*/
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++) {
if (txpwr->mcs_20_stbc[i] == 0)
txpwr->mcs_20_stbc[i] = txpwr->mcs_20_cdd[i];
if (txpwr->mcs_40_stbc[i] == 0)
txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
}
return;
}
/*
* Verify the chanspec is using a legal set of parameters, i.e. that the
* chanspec specified a band, bw, ctl_sb and channel and that the
* combination could be legal given any set of circumstances.
* RETURNS: true is the chanspec is malformed, false if it looks good.
*/
static bool brcms_c_chspec_malformed(u16 chanspec)
{
/* must be 2G or 5G band */
if (!CHSPEC_IS5G(chanspec) && !CHSPEC_IS2G(chanspec))
return true;
/* must be 20 or 40 bandwidth */
if (!CHSPEC_IS40(chanspec) && !CHSPEC_IS20(chanspec))
return true;
/* 20MHZ b/w must have no ctl sb, 40 must have a ctl sb */
if (CHSPEC_IS20(chanspec)) {
if (!CHSPEC_SB_NONE(chanspec))
return true;
} else if (!CHSPEC_SB_UPPER(chanspec) && !CHSPEC_SB_LOWER(chanspec)) {
return true;
}
return false;
}
/*
* Validate the chanspec for this locale, for 40MHZ we need to also
* check that the sidebands are valid 20MZH channels in this locale
* and they are also a legal HT combination
*/
static bool
brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
u8 channel = CHSPEC_CHANNEL(chspec);
/* check the chanspec */
if (brcms_c_chspec_malformed(chspec)) {
brcms_err(wlc->hw->d11core, "wl%d: malformed chanspec 0x%x\n",
wlc->pub->unit, chspec);
return false;
}
if (CHANNEL_BANDUNIT(wlc_cm->wlc, channel) !=
chspec_bandunit(chspec))
return false;
return true;
}
bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec)
{
return brcms_c_valid_chanspec_ext(wlc_cm, chspec);
}
static bool brcms_is_radar_freq(u16 center_freq)
{
return center_freq >= 5260 && center_freq <= 5700;
}
static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
int i;
sband = wiphy->bands[IEEE80211_BAND_5GHZ];
if (!sband)
return;
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
if (!brcms_is_radar_freq(ch->center_freq))
continue;
/*
* All channels in this range should be passive and have
* DFS enabled.
*/
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch->flags |= IEEE80211_CHAN_RADAR |
IEEE80211_CHAN_NO_IBSS |
IEEE80211_CHAN_PASSIVE_SCAN;
}
}
static void
brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
const struct ieee80211_reg_rule *rule;
int band, i;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
sband = wiphy->bands[band];
if (!sband)
continue;
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
if (ch->flags &
(IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR))
continue;
if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
rule = freq_reg_info(wiphy, ch->center_freq);
if (IS_ERR(rule))
continue;
if (!(rule->flags & NL80211_RRF_NO_IBSS))
ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN))
ch->flags &=
~IEEE80211_CHAN_PASSIVE_SCAN;
} else if (ch->beacon_found) {
ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
IEEE80211_CHAN_PASSIVE_SCAN);
}
}
}
}
static void brcms_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct brcms_info *wl = hw->priv;
struct brcms_c_info *wlc = wl->wlc;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
int band, i;
bool ch_found = false;
brcms_reg_apply_radar_flags(wiphy);
if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
/* Disable radio if all channels disallowed by regulatory */
for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
sband = wiphy->bands[band];
if (!sband)
continue;
for (i = 0; !ch_found && i < sband->n_channels; i++) {
ch = &sband->channels[i];
if (!(ch->flags & IEEE80211_CHAN_DISABLED))
ch_found = true;
}
}
if (ch_found) {
mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
} else {
mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
brcms_err(wlc->hw->d11core,
"wl%d: %s: no valid channel for \"%s\"\n",
wlc->pub->unit, __func__, request->alpha2);
}
if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
brcms_c_japan_ccode(request->alpha2));
}
void brcms_c_regd_init(struct brcms_c_info *wlc)
{
struct wiphy *wiphy = wlc->wiphy;
const struct brcms_regd *regd = wlc->cmi->world_regd;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct brcms_chanvec sup_chan;
struct brcms_band *band;
int band_idx, i;
/* Disable any channels not supported by the phy */
for (band_idx = 0; band_idx < wlc->pub->_nbands; band_idx++) {
band = wlc->bandstate[band_idx];
wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
&sup_chan);
if (band_idx == BAND_2G_INDEX)
sband = wiphy->bands[IEEE80211_BAND_2GHZ];
else
sband = wiphy->bands[IEEE80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
if (!isset(sup_chan.vec, ch->hw_value))
ch->flags |= IEEE80211_CHAN_DISABLED;
}
}
wlc->wiphy->reg_notifier = brcms_reg_notifier;
wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_STRICT_REGULATORY;
wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain);
brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER);
}
| gpl-2.0 |
Pesach85/ph85-p880-kernel-project | drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c | 3095 | 1864 | /* ati-tv-wonder-hd-600.h - Keytable for ati_tv_wonder_hd_600 Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
/* ATI TV Wonder HD 600 USB
Devin Heitmueller <devin.heitmueller@gmail.com>
*/
static struct rc_map_table ati_tv_wonder_hd_600[] = {
{ 0x00, KEY_RECORD}, /* Row 1 */
{ 0x01, KEY_PLAYPAUSE},
{ 0x02, KEY_STOP},
{ 0x03, KEY_POWER},
{ 0x04, KEY_PREVIOUS}, /* Row 2 */
{ 0x05, KEY_REWIND},
{ 0x06, KEY_FORWARD},
{ 0x07, KEY_NEXT},
{ 0x08, KEY_EPG}, /* Row 3 */
{ 0x09, KEY_HOME},
{ 0x0a, KEY_MENU},
{ 0x0b, KEY_CHANNELUP},
{ 0x0c, KEY_BACK}, /* Row 4 */
{ 0x0d, KEY_UP},
{ 0x0e, KEY_INFO},
{ 0x0f, KEY_CHANNELDOWN},
{ 0x10, KEY_LEFT}, /* Row 5 */
{ 0x11, KEY_SELECT},
{ 0x12, KEY_RIGHT},
{ 0x13, KEY_VOLUMEUP},
{ 0x14, KEY_LAST}, /* Row 6 */
{ 0x15, KEY_DOWN},
{ 0x16, KEY_MUTE},
{ 0x17, KEY_VOLUMEDOWN},
};
static struct rc_map_list ati_tv_wonder_hd_600_map = {
.map = {
.scan = ati_tv_wonder_hd_600,
.size = ARRAY_SIZE(ati_tv_wonder_hd_600),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_ATI_TV_WONDER_HD_600,
}
};
static int __init init_rc_map_ati_tv_wonder_hd_600(void)
{
return rc_map_register(&ati_tv_wonder_hd_600_map);
}
static void __exit exit_rc_map_ati_tv_wonder_hd_600(void)
{
rc_map_unregister(&ati_tv_wonder_hd_600_map);
}
module_init(init_rc_map_ati_tv_wonder_hd_600)
module_exit(exit_rc_map_ati_tv_wonder_hd_600)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
haldric/I9300 | drivers/staging/tidspbridge/core/tiomap3430.c | 3095 | 53191 | /*
* tiomap.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Processor Manager Driver for TI OMAP3430 EVM.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <plat/dsp.h>
#include <linux/types.h>
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/drv.h>
#include <dspbridge/sync.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdefs.h>
#include <dspbridge/dspchnl.h>
#include <dspbridge/dspdeh.h>
#include <dspbridge/dspio.h>
#include <dspbridge/dspmsg.h>
#include <dspbridge/pwr.h>
#include <dspbridge/io_sm.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/dspapi.h>
#include <dspbridge/dmm.h>
#include <dspbridge/wdt.h>
/* ----------------------------------- Local */
#include "_tiomap.h"
#include "_tiomap_pwr.h"
#include "tiomap_io.h"
/* Offset in shared mem to write to in order to synchronize start with DSP */
#define SHMSYNCOFFSET 4 /* GPP byte offset */
#define BUFFERSIZE 1024
#define TIHELEN_ACKTIMEOUT 10000
#define MMU_SECTION_ADDR_MASK 0xFFF00000
#define MMU_SSECTION_ADDR_MASK 0xFF000000
#define MMU_LARGE_PAGE_MASK 0xFFFF0000
#define MMU_SMALL_PAGE_MASK 0xFFFFF000
#define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
#define PAGES_II_LVL_TABLE 512
#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
/*
* This is a totally ugly layer violation, but needed until
* omap_ctrl_set_dsp_boot*() are provided.
*/
#define OMAP3_IVA2_BOOTMOD_IDLE 1
#define OMAP2_CONTROL_GENERAL 0x270
#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
#define OMAP343X_CTRL_REGADDR(reg) \
OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
/* Forward Declarations: */
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
u8 *host_buff,
u32 dsp_addr, u32 ul_num_bytes,
u32 mem_type);
static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
u32 dsp_addr);
static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
int *board_state);
static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff,
u32 dsp_addr, u32 ul_num_bytes,
u32 mem_type);
static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
u32 brd_state);
static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
u32 dsp_dest_addr, u32 dsp_src_addr,
u32 ul_num_bytes, u32 mem_type);
static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type);
static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes, u32 ul_map_attr,
struct page **mapped_pages);
static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
u32 virt_addr, u32 ul_num_bytes);
static int bridge_dev_create(struct bridge_dev_context
**dev_cntxt,
struct dev_object *hdev_obj,
struct cfg_hostres *config_param);
static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
u32 dw_cmd, void *pargs);
static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
static u32 user_va2_pa(struct mm_struct *mm, u32 address);
static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
u32 va, u32 size,
struct hw_mmu_map_attrs_t *map_attrs);
static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
u32 size, struct hw_mmu_map_attrs_t *attrs);
static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes,
struct hw_mmu_map_attrs_t *hw_attrs);
bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
/* ----------------------------------- Globals */
/* Attributes of L2 page tables for DSP MMU */
struct page_info {
u32 num_entries; /* Number of valid PTEs in the L2 PT */
};
/* Attributes used to manage the DSP MMU page tables */
struct pg_table_attrs {
spinlock_t pg_lock; /* Critical section object handle */
u32 l1_base_pa; /* Physical address of the L1 PT */
u32 l1_base_va; /* Virtual address of the L1 PT */
u32 l1_size; /* Size of the L1 PT */
u32 l1_tbl_alloc_pa;
/* Physical address of Allocated mem for L1 table. May not be aligned */
u32 l1_tbl_alloc_va;
/* Virtual address of Allocated mem for L1 table. May not be aligned */
u32 l1_tbl_alloc_sz;
/* Size of consistent memory allocated for L1 table.
* May not be aligned */
u32 l2_base_pa; /* Physical address of the L2 PT */
u32 l2_base_va; /* Virtual address of the L2 PT */
u32 l2_size; /* Size of the L2 PT */
u32 l2_tbl_alloc_pa;
/* Physical address of Allocated mem for L2 table. May not be aligned */
u32 l2_tbl_alloc_va;
/* Virtual address of Allocated mem for L2 table. May not be aligned */
u32 l2_tbl_alloc_sz;
/* Size of consistent memory allocated for L2 table.
* May not be aligned */
u32 l2_num_pages; /* Number of allocated L2 PT */
/* Array [l2_num_pages] of L2 PT info structs */
struct page_info *pg_info;
};
/*
* This Bridge driver's function interface table.
*/
static struct bridge_drv_interface drv_interface_fxns = {
/* Bridge API ver. for which this bridge driver is built. */
BRD_API_MAJOR_VERSION,
BRD_API_MINOR_VERSION,
bridge_dev_create,
bridge_dev_destroy,
bridge_dev_ctrl,
bridge_brd_monitor,
bridge_brd_start,
bridge_brd_stop,
bridge_brd_status,
bridge_brd_read,
bridge_brd_write,
bridge_brd_set_state,
bridge_brd_mem_copy,
bridge_brd_mem_write,
bridge_brd_mem_map,
bridge_brd_mem_un_map,
/* The following CHNL functions are provided by chnl_io.lib: */
bridge_chnl_create,
bridge_chnl_destroy,
bridge_chnl_open,
bridge_chnl_close,
bridge_chnl_add_io_req,
bridge_chnl_get_ioc,
bridge_chnl_cancel_io,
bridge_chnl_flush_io,
bridge_chnl_get_info,
bridge_chnl_get_mgr_info,
bridge_chnl_idle,
bridge_chnl_register_notify,
/* The following IO functions are provided by chnl_io.lib: */
bridge_io_create,
bridge_io_destroy,
bridge_io_on_loaded,
bridge_io_get_proc_load,
/* The following msg_ctrl functions are provided by chnl_io.lib: */
bridge_msg_create,
bridge_msg_create_queue,
bridge_msg_delete,
bridge_msg_delete_queue,
bridge_msg_get,
bridge_msg_put,
bridge_msg_register_notify,
bridge_msg_set_queue_id,
};
static struct notifier_block dsp_mbox_notifier = {
.notifier_call = io_mbox_msg,
};
static inline void flush_all(struct bridge_dev_context *dev_context)
{
if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
dev_context->brd_state == BRD_HIBERNATION)
wake_dsp(dev_context, NULL);
hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base);
}
static void bad_page_dump(u32 pa, struct page *pg)
{
pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
pr_emerg("Bad page state in process '%s'\n"
"page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
"Backtrace:\n",
current->comm, pg, (int)(2 * sizeof(unsigned long)),
(unsigned long)pg->flags, pg->mapping,
page_mapcount(pg), page_count(pg));
dump_stack();
}
/*
* ======== bridge_drv_entry ========
* purpose:
* Bridge Driver entry point.
*/
void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
const char *driver_file_name)
{
DBC_REQUIRE(driver_file_name != NULL);
if (strcmp(driver_file_name, "UMA") == 0)
*drv_intf = &drv_interface_fxns;
else
dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
}
/*
* ======== bridge_brd_monitor ========
* purpose:
* This bridge_brd_monitor puts DSP into a Loadable state.
* i.e Application can load and start the device.
*
* Preconditions:
* Device in 'OFF' state.
*/
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
{
struct bridge_dev_context *dev_context = dev_ctxt;
u32 temp;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_POWERSTATEST_MASK;
if (!(temp & 0x02)) {
/* IVA2 is not in ON state */
/* Read and set PM_PWSTCTRL_IVA2 to ON */
(*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
/* Set the SW supervised state transition */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
/* Wait until the state has moved to ON */
while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_INTRANSITION_MASK)
;
/* Disable Automatic transition */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
}
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clk_enable(DSP_CLK_IVA2);
/* set the device state to IDLE */
dev_context->brd_state = BRD_IDLE;
return 0;
}
/*
* ======== bridge_brd_read ========
* purpose:
* Reads buffers for DSP memory.
*/
static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
u32 offset;
u32 dsp_base_addr = dev_ctxt->dsp_base_addr;
if (dsp_addr < dev_context->dsp_start_add) {
status = -EPERM;
return status;
}
/* change here to account for the 3 bands of the DSP internal memory */
if ((dsp_addr - dev_context->dsp_start_add) <
dev_context->internal_size) {
offset = dsp_addr - dev_context->dsp_start_add;
} else {
status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
ul_num_bytes, mem_type);
return status;
}
/* copy the data from DSP memory, */
memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
return status;
}
/*
* ======== bridge_brd_set_state ========
* purpose:
* This routine updates the Board status.
*/
static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
u32 brd_state)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
dev_context->brd_state = brd_state;
return status;
}
/*
* ======== bridge_brd_start ========
* purpose:
* Initializes DSP MMU and Starts DSP.
*
* Preconditions:
* a) DSP domain is 'ACTIVE'.
* b) DSP_RST1 is asserted.
* b) DSP_RST2 is released.
*/
static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
u32 dsp_addr)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
u32 dw_sync_addr = 0;
u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
/* Offset of shm_base_virt from tlb_base_virt */
u32 ul_shm_offset_virt;
s32 entry_ndx;
s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
struct cfg_hostres *resources = NULL;
u32 temp;
u32 ul_dsp_clk_rate;
u32 ul_dsp_clk_addr;
u32 ul_bios_gp_timer;
u32 clk_cmd;
struct io_mgr *hio_mgr;
u32 ul_load_monitor_timer;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
/* The device context contains all the mmu setup info from when the
* last dsp base image was loaded. The first entry is always
* SHMMEM base. */
/* Get SHM_BEG - convert to byte address */
(void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
&ul_shm_base_virt);
ul_shm_base_virt *= DSPWORDSIZE;
DBC_ASSERT(ul_shm_base_virt != 0);
/* DSP Virtual address */
ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
ul_shm_offset_virt =
ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
/* Kernel logical address */
ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
DBC_ASSERT(ul_shm_base != 0);
/* 2nd wd is used as sync field */
dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
/* Write a signature into the shm base + offset; this will
* get cleared when the DSP program starts. */
if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
pr_err("%s: Illegal SM base\n", __func__);
status = -EPERM;
} else
__raw_writel(0xffffffff, dw_sync_addr);
if (!status) {
resources = dev_context->resources;
if (!resources)
status = -EPERM;
/* Assert RST1 i.e only the RST only for DSP megacell */
if (!status) {
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
OMAP2_RM_RSTCTRL);
/* Mask address with 1K for compatibility */
__raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
OMAP343X_CTRL_REGADDR(
OMAP343X_CONTROL_IVA2_BOOTADDR));
/*
* Set bootmode to self loop if dsp_debug flag is true
*/
__raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
OMAP343X_CTRL_REGADDR(
OMAP343X_CONTROL_IVA2_BOOTMOD));
}
}
if (!status) {
/* Reset and Unreset the RST2, so that BOOTADDR is copied to
* IVA2 SYSC register */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
udelay(100);
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
udelay(100);
/* Disbale the DSP MMU */
hw_mmu_disable(resources->dmmu_base);
/* Disable TWL */
hw_mmu_twl_disable(resources->dmmu_base);
/* Only make TLB entry if both addresses are non-zero */
for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
entry_ndx++) {
struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
struct hw_mmu_map_attrs_t map_attrs = {
.endianism = e->endianism,
.element_size = e->elem_size,
.mixed_size = e->mixed_mode,
};
if (!e->gpp_pa || !e->dsp_va)
continue;
dev_dbg(bridge,
"MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
itmp_entry_ndx,
e->gpp_pa,
e->dsp_va,
e->size);
hw_mmu_tlb_add(dev_context->dsp_mmu_base,
e->gpp_pa,
e->dsp_va,
e->size,
itmp_entry_ndx,
&map_attrs, 1, 1);
itmp_entry_ndx++;
}
}
/* Lock the above TLB entries and get the BIOS and load monitor timer
* information */
if (!status) {
hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx);
hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx);
hw_mmu_ttb_set(resources->dmmu_base,
dev_context->pt_attrs->l1_base_pa);
hw_mmu_twl_enable(resources->dmmu_base);
/* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
temp = __raw_readl((resources->dmmu_base) + 0x10);
temp = (temp & 0xFFFFFFEF) | 0x11;
__raw_writel(temp, (resources->dmmu_base) + 0x10);
/* Let the DSP MMU run */
hw_mmu_enable(resources->dmmu_base);
/* Enable the BIOS clock */
(void)dev_get_symbol(dev_context->dev_obj,
BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
(void)dev_get_symbol(dev_context->dev_obj,
BRIDGEINIT_LOADMON_GPTIMER,
&ul_load_monitor_timer);
}
if (!status) {
if (ul_load_monitor_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_load_monitor_timer;
dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
} else {
dev_dbg(bridge, "Not able to get the symbol for Load "
"Monitor Timer\n");
}
}
if (!status) {
if (ul_bios_gp_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_bios_gp_timer;
dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
} else {
dev_dbg(bridge,
"Not able to get the symbol for BIOS Timer\n");
}
}
if (!status) {
/* Set the DSP clock rate */
(void)dev_get_symbol(dev_context->dev_obj,
"_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
/*Set Autoidle Mode for IVA2 PLL */
(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
if ((unsigned int *)ul_dsp_clk_addr != NULL) {
/* Get the clock rate */
ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
__func__, ul_dsp_clk_rate);
(void)bridge_brd_write(dev_context,
(u8 *) &ul_dsp_clk_rate,
ul_dsp_clk_addr, sizeof(u32), 0);
}
/*
* Enable Mailbox events and also drain any pending
* stale messages.
*/
dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
if (IS_ERR(dev_context->mbox)) {
dev_context->mbox = NULL;
pr_err("%s: Failed to get dsp mailbox handle\n",
__func__);
status = -EPERM;
}
}
if (!status) {
/*PM_IVA2GRPSEL_PER = 0xC0;*/
temp = readl(resources->per_pm_base + 0xA8);
temp = (temp & 0xFFFFFF30) | 0xC0;
writel(temp, resources->per_pm_base + 0xA8);
/*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
temp = readl(resources->per_pm_base + 0xA4);
temp = (temp & 0xFFFFFF3F);
writel(temp, resources->per_pm_base + 0xA4);
/*CM_SLEEPDEP_PER |= 0x04; */
temp = readl(resources->per_base + 0x44);
temp = (temp & 0xFFFFFFFB) | 0x04;
writel(temp, resources->per_base + 0x44);
/*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
/* Let DSP go */
dev_dbg(bridge, "%s Unreset\n", __func__);
/* Enable DSP MMU Interrupts */
hw_mmu_event_enable(resources->dmmu_base,
HW_MMU_ALL_INTERRUPTS);
/* release the RST1, DSP starts executing now .. */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
if (dsp_debug)
while (__raw_readw(dw_sync_addr))
;
/* Wait for DSP to clear word in shared memory */
/* Read the Location */
if (!wait_for_start(dev_context, dw_sync_addr))
status = -ETIMEDOUT;
/* Start wdt */
dsp_wdt_sm_set((void *)ul_shm_base);
dsp_wdt_enable(true);
status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (hio_mgr) {
io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
/* Write the synchronization bit to indicate the
* completion of OPP table update to DSP
*/
__raw_writel(0XCAFECAFE, dw_sync_addr);
/* update board state */
dev_context->brd_state = BRD_RUNNING;
/* (void)chnlsm_enable_interrupt(dev_context); */
} else {
dev_context->brd_state = BRD_UNKNOWN;
}
}
return status;
}
/*
* ======== bridge_brd_stop ========
* purpose:
* Puts DSP in self loop.
*
* Preconditions :
* a) None
*/
static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
struct pg_table_attrs *pt_attrs;
u32 dsp_pwr_state;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
if (dev_context->brd_state == BRD_STOPPED)
return status;
/* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
* before turning off the clocks.. This is to ensure that there are no
* pending L3 or other transactons from IVA2 */
dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_POWERSTATEST_MASK;
if (dsp_pwr_state != PWRDM_POWER_OFF) {
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
mdelay(10);
/* IVA2 is not in OFF state */
/* Set PM_PWSTCTRL_IVA2 to OFF */
(*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
/* Set the SW supervised state transition for Sleep */
(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
}
udelay(10);
/* Release the Ext Base virtual Address as the next DSP Program
* may have a different load address */
if (dev_context->dsp_ext_base_addr)
dev_context->dsp_ext_base_addr = 0;
dev_context->brd_state = BRD_STOPPED; /* update board state */
dsp_wdt_enable(false);
/* This is a good place to clear the MMU page tables as well */
if (dev_context->pt_attrs) {
pt_attrs = dev_context->pt_attrs;
memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
memset((u8 *) pt_attrs->pg_info, 0x00,
(pt_attrs->l2_num_pages * sizeof(struct page_info)));
}
/* Disable the mailbox interrupts */
if (dev_context->mbox) {
omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
dev_context->mbox = NULL;
}
/* Reset IVA2 clocks*/
(*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clock_disable_all(dev_context->dsp_per_clks);
dsp_clk_disable(DSP_CLK_IVA2);
return status;
}
/*
* ======== bridge_brd_status ========
* Returns the board status.
*/
static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
int *board_state)
{
struct bridge_dev_context *dev_context = dev_ctxt;
*board_state = dev_context->brd_state;
return 0;
}
/*
* ======== bridge_brd_write ========
* Copies the buffers to DSP internal or external memory.
*/
static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
if (dsp_addr < dev_context->dsp_start_add) {
status = -EPERM;
return status;
}
if ((dsp_addr - dev_context->dsp_start_add) <
dev_context->internal_size) {
status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
ul_num_bytes, mem_type);
} else {
status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
ul_num_bytes, mem_type, false);
}
return status;
}
/*
* ======== bridge_dev_create ========
* Creates a driver object. Puts DSP in self loop.
*/
static int bridge_dev_create(struct bridge_dev_context
**dev_cntxt,
struct dev_object *hdev_obj,
struct cfg_hostres *config_param)
{
int status = 0;
struct bridge_dev_context *dev_context = NULL;
s32 entry_ndx;
struct cfg_hostres *resources = config_param;
struct pg_table_attrs *pt_attrs;
u32 pg_tbl_pa;
u32 pg_tbl_va;
u32 align_size;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
/* Allocate and initialize a data structure to contain the bridge driver
* state, which becomes the context for later calls into this driver */
dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
if (!dev_context) {
status = -ENOMEM;
goto func_end;
}
dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
dev_context->self_loop = (u32) NULL;
dev_context->dsp_per_clks = 0;
dev_context->internal_size = OMAP_DSP_SIZE;
/* Clear dev context MMU table entries.
* These get set on bridge_io_on_loaded() call after program loaded. */
for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
dev_context->atlb_entry[entry_ndx].gpp_pa =
dev_context->atlb_entry[entry_ndx].dsp_va = 0;
}
dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
(config_param->
mem_base
[3]),
config_param->
mem_length
[3]);
if (!dev_context->dsp_base_addr)
status = -EPERM;
pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
if (pt_attrs != NULL) {
pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
align_size = pt_attrs->l1_size;
/* Align sizes are expected to be power of 2 */
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
align_size, &pg_tbl_pa);
/* Check if the PA is aligned for us */
if ((pg_tbl_pa) & (align_size - 1)) {
/* PA not aligned to page table size ,
* try with more allocation and align */
mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
pt_attrs->l1_size);
/* we like to get aligned on L1 table size */
pg_tbl_va =
(u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
align_size, &pg_tbl_pa);
/* We should be able to get aligned table now */
pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
/* Align the PA to the next 'align' boundary */
pt_attrs->l1_base_pa =
((pg_tbl_pa) +
(align_size - 1)) & (~(align_size - 1));
pt_attrs->l1_base_va =
pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
} else {
/* We got aligned PA, cool */
pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
pt_attrs->l1_base_pa = pg_tbl_pa;
pt_attrs->l1_base_va = pg_tbl_va;
}
if (pt_attrs->l1_base_va)
memset((u8 *) pt_attrs->l1_base_va, 0x00,
pt_attrs->l1_size);
/* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
* L4 pages */
pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
pt_attrs->l2_num_pages;
align_size = 4; /* Make it u32 aligned */
/* we like to get aligned on L1 table size */
pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
align_size, &pg_tbl_pa);
pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
pt_attrs->l2_base_pa = pg_tbl_pa;
pt_attrs->l2_base_va = pg_tbl_va;
if (pt_attrs->l2_base_va)
memset((u8 *) pt_attrs->l2_base_va, 0x00,
pt_attrs->l2_size);
pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
sizeof(struct page_info), GFP_KERNEL);
dev_dbg(bridge,
"L1 pa %x, va %x, size %x\n L2 pa %x, va "
"%x, size %x\n", pt_attrs->l1_base_pa,
pt_attrs->l1_base_va, pt_attrs->l1_size,
pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
pt_attrs->l2_size);
dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
}
if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
(pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
dev_context->pt_attrs = pt_attrs;
else
status = -ENOMEM;
if (!status) {
spin_lock_init(&pt_attrs->pg_lock);
dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
/* Set the Clock Divisor for the DSP module */
udelay(5);
/* MMU address is obtained from the host
* resources struct */
dev_context->dsp_mmu_base = resources->dmmu_base;
}
if (!status) {
dev_context->dev_obj = hdev_obj;
/* Store current board state. */
dev_context->brd_state = BRD_UNKNOWN;
dev_context->resources = resources;
dsp_clk_enable(DSP_CLK_IVA2);
bridge_brd_stop(dev_context);
/* Return ptr to our device state to the DSP API for storage */
*dev_cntxt = dev_context;
} else {
if (pt_attrs != NULL) {
kfree(pt_attrs->pg_info);
if (pt_attrs->l2_tbl_alloc_va) {
mem_free_phys_mem((void *)
pt_attrs->l2_tbl_alloc_va,
pt_attrs->l2_tbl_alloc_pa,
pt_attrs->l2_tbl_alloc_sz);
}
if (pt_attrs->l1_tbl_alloc_va) {
mem_free_phys_mem((void *)
pt_attrs->l1_tbl_alloc_va,
pt_attrs->l1_tbl_alloc_pa,
pt_attrs->l1_tbl_alloc_sz);
}
}
kfree(pt_attrs);
kfree(dev_context);
}
func_end:
return status;
}
/*
* ======== bridge_dev_ctrl ========
* Receives device specific commands.
*/
static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
u32 dw_cmd, void *pargs)
{
int status = 0;
struct bridge_ioctl_extproc *pa_ext_proc =
(struct bridge_ioctl_extproc *)pargs;
s32 ndx;
switch (dw_cmd) {
case BRDIOCTL_CHNLREAD:
break;
case BRDIOCTL_CHNLWRITE:
break;
case BRDIOCTL_SETMMUCONFIG:
/* store away dsp-mmu setup values for later use */
for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
dev_context->atlb_entry[ndx] = *pa_ext_proc;
break;
case BRDIOCTL_DEEPSLEEP:
case BRDIOCTL_EMERGENCYSLEEP:
/* Currently only DSP Idle is supported Need to update for
* later releases */
status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
break;
case BRDIOCTL_WAKEUP:
status = wake_dsp(dev_context, pargs);
break;
case BRDIOCTL_CLK_CTRL:
status = 0;
/* Looking For Baseport Fix for Clocks */
status = dsp_peripheral_clk_ctrl(dev_context, pargs);
break;
case BRDIOCTL_PWR_HIBERNATE:
status = handle_hibernation_from_dsp(dev_context);
break;
case BRDIOCTL_PRESCALE_NOTIFY:
status = pre_scale_dsp(dev_context, pargs);
break;
case BRDIOCTL_POSTSCALE_NOTIFY:
status = post_scale_dsp(dev_context, pargs);
break;
case BRDIOCTL_CONSTRAINT_REQUEST:
status = handle_constraints_set(dev_context, pargs);
break;
default:
status = -EPERM;
break;
}
return status;
}
/*
* ======== bridge_dev_destroy ========
* Destroys the driver object.
*/
static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
{
struct pg_table_attrs *pt_attrs;
int status = 0;
struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
dev_ctxt;
struct cfg_hostres *host_res;
u32 shm_size;
struct drv_data *drv_datap = dev_get_drvdata(bridge);
/* It should never happen */
if (!dev_ctxt)
return -EFAULT;
/* first put the device to stop state */
bridge_brd_stop(dev_context);
if (dev_context->pt_attrs) {
pt_attrs = dev_context->pt_attrs;
kfree(pt_attrs->pg_info);
if (pt_attrs->l2_tbl_alloc_va) {
mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
pt_attrs->l2_tbl_alloc_pa,
pt_attrs->l2_tbl_alloc_sz);
}
if (pt_attrs->l1_tbl_alloc_va) {
mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
pt_attrs->l1_tbl_alloc_pa,
pt_attrs->l1_tbl_alloc_sz);
}
kfree(pt_attrs);
}
if (dev_context->resources) {
host_res = dev_context->resources;
shm_size = drv_datap->shm_size;
if (shm_size >= 0x10000) {
if ((host_res->mem_base[1]) &&
(host_res->mem_phys[1])) {
mem_free_phys_mem((void *)
host_res->mem_base
[1],
host_res->mem_phys
[1], shm_size);
}
} else {
dev_dbg(bridge, "%s: Error getting shm size "
"from registry: %x. Not calling "
"mem_free_phys_mem\n", __func__,
status);
}
host_res->mem_base[1] = 0;
host_res->mem_phys[1] = 0;
if (host_res->mem_base[0])
iounmap((void *)host_res->mem_base[0]);
if (host_res->mem_base[2])
iounmap((void *)host_res->mem_base[2]);
if (host_res->mem_base[3])
iounmap((void *)host_res->mem_base[3]);
if (host_res->mem_base[4])
iounmap((void *)host_res->mem_base[4]);
if (host_res->dmmu_base)
iounmap(host_res->dmmu_base);
if (host_res->per_base)
iounmap(host_res->per_base);
if (host_res->per_pm_base)
iounmap((void *)host_res->per_pm_base);
if (host_res->core_pm_base)
iounmap((void *)host_res->core_pm_base);
host_res->mem_base[0] = (u32) NULL;
host_res->mem_base[2] = (u32) NULL;
host_res->mem_base[3] = (u32) NULL;
host_res->mem_base[4] = (u32) NULL;
host_res->dmmu_base = NULL;
kfree(host_res);
}
/* Free the driver's device context: */
kfree(drv_datap->base_img);
kfree(drv_datap);
dev_set_drvdata(bridge, NULL);
kfree((void *)dev_ctxt);
return status;
}
static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
u32 dsp_dest_addr, u32 dsp_src_addr,
u32 ul_num_bytes, u32 mem_type)
{
int status = 0;
u32 src_addr = dsp_src_addr;
u32 dest_addr = dsp_dest_addr;
u32 copy_bytes = 0;
u32 total_bytes = ul_num_bytes;
u8 host_buf[BUFFERSIZE];
struct bridge_dev_context *dev_context = dev_ctxt;
while (total_bytes > 0 && !status) {
copy_bytes =
total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
/* Read from External memory */
status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
copy_bytes, mem_type);
if (!status) {
if (dest_addr < (dev_context->dsp_start_add +
dev_context->internal_size)) {
/* Write to Internal memory */
status = write_dsp_data(dev_ctxt, host_buf,
dest_addr, copy_bytes,
mem_type);
} else {
/* Write to External memory */
status =
write_ext_dsp_data(dev_ctxt, host_buf,
dest_addr, copy_bytes,
mem_type, false);
}
}
total_bytes -= copy_bytes;
src_addr += copy_bytes;
dest_addr += copy_bytes;
}
return status;
}
/* Mem Write does not halt the DSP to write unlike bridge_brd_write */
static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
u8 *host_buff, u32 dsp_addr,
u32 ul_num_bytes, u32 mem_type)
{
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
u32 ul_remain_bytes = 0;
u32 ul_bytes = 0;
ul_remain_bytes = ul_num_bytes;
while (ul_remain_bytes > 0 && !status) {
ul_bytes =
ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
if (dsp_addr < (dev_context->dsp_start_add +
dev_context->internal_size)) {
status =
write_dsp_data(dev_ctxt, host_buff, dsp_addr,
ul_bytes, mem_type);
} else {
status = write_ext_dsp_data(dev_ctxt, host_buff,
dsp_addr, ul_bytes,
mem_type, true);
}
ul_remain_bytes -= ul_bytes;
dsp_addr += ul_bytes;
host_buff = host_buff + ul_bytes;
}
return status;
}
/*
* ======== bridge_brd_mem_map ========
* This function maps MPU buffer to the DSP address space. It performs
* linear to physical address translation if required. It translates each
* page since linear addresses can be physically non-contiguous
* All address & size arguments are assumed to be page aligned (in proc.c)
*
* TODO: Disable MMU while updating the page tables (but that'll stall DSP)
*/
static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes, u32 ul_map_attr,
struct page **mapped_pages)
{
u32 attrs;
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
struct hw_mmu_map_attrs_t hw_attrs;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
u32 write = 0;
u32 num_usr_pgs = 0;
struct page *mapped_page, *pg;
s32 pg_num;
u32 va = virt_addr;
struct task_struct *curr_task = current;
u32 pg_i = 0;
u32 mpu_addr, pa;
dev_dbg(bridge,
"%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
__func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
ul_map_attr);
if (ul_num_bytes == 0)
return -EINVAL;
if (ul_map_attr & DSP_MAP_DIR_MASK) {
attrs = ul_map_attr;
} else {
/* Assign default attributes */
attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
}
/* Take mapping properties */
if (attrs & DSP_MAPBIGENDIAN)
hw_attrs.endianism = HW_BIG_ENDIAN;
else
hw_attrs.endianism = HW_LITTLE_ENDIAN;
hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
/* Ignore element_size if mixed_size is enabled */
if (hw_attrs.mixed_size == 0) {
if (attrs & DSP_MAPELEMSIZE8) {
/* Size is 8 bit */
hw_attrs.element_size = HW_ELEM_SIZE8BIT;
} else if (attrs & DSP_MAPELEMSIZE16) {
/* Size is 16 bit */
hw_attrs.element_size = HW_ELEM_SIZE16BIT;
} else if (attrs & DSP_MAPELEMSIZE32) {
/* Size is 32 bit */
hw_attrs.element_size = HW_ELEM_SIZE32BIT;
} else if (attrs & DSP_MAPELEMSIZE64) {
/* Size is 64 bit */
hw_attrs.element_size = HW_ELEM_SIZE64BIT;
} else {
/*
* Mixedsize isn't enabled, so size can't be
* zero here
*/
return -EINVAL;
}
}
if (attrs & DSP_MAPDONOTLOCK)
hw_attrs.donotlockmpupage = 1;
else
hw_attrs.donotlockmpupage = 0;
if (attrs & DSP_MAPVMALLOCADDR) {
return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
ul_num_bytes, &hw_attrs);
}
/*
* Do OS-specific user-va to pa translation.
* Combine physically contiguous regions to reduce TLBs.
* Pass the translated pa to pte_update.
*/
if ((attrs & DSP_MAPPHYSICALADDR)) {
status = pte_update(dev_context, ul_mpu_addr, virt_addr,
ul_num_bytes, &hw_attrs);
goto func_cont;
}
/*
* Important Note: ul_mpu_addr is mapped from user application process
* to current process - it must lie completely within the current
* virtual memory address space in order to be of use to us here!
*/
down_read(&mm->mmap_sem);
vma = find_vma(mm, ul_mpu_addr);
if (vma)
dev_dbg(bridge,
"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
ul_num_bytes, vma->vm_start, vma->vm_end,
vma->vm_flags);
/*
* It is observed that under some circumstances, the user buffer is
* spread across several VMAs. So loop through and check if the entire
* user buffer is covered
*/
while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
/* jump to the next VMA region */
vma = find_vma(mm, vma->vm_end + 1);
dev_dbg(bridge,
"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
ul_num_bytes, vma->vm_start, vma->vm_end,
vma->vm_flags);
}
if (!vma) {
pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
__func__, ul_mpu_addr, ul_num_bytes);
status = -EINVAL;
up_read(&mm->mmap_sem);
goto func_cont;
}
if (vma->vm_flags & VM_IO) {
num_usr_pgs = ul_num_bytes / PG_SIZE4K;
mpu_addr = ul_mpu_addr;
/* Get the physical addresses for user buffer */
for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
pa = user_va2_pa(mm, mpu_addr);
if (!pa) {
status = -EPERM;
pr_err("DSPBRIDGE: VM_IO mapping physical"
"address is invalid\n");
break;
}
if (pfn_valid(__phys_to_pfn(pa))) {
pg = PHYS_TO_PAGE(pa);
get_page(pg);
if (page_count(pg) < 1) {
pr_err("Bad page in VM_IO buffer\n");
bad_page_dump(pa, pg);
}
}
status = pte_set(dev_context->pt_attrs, pa,
va, HW_PAGE_SIZE4KB, &hw_attrs);
if (status)
break;
va += HW_PAGE_SIZE4KB;
mpu_addr += HW_PAGE_SIZE4KB;
pa += HW_PAGE_SIZE4KB;
}
} else {
num_usr_pgs = ul_num_bytes / PG_SIZE4K;
if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
write = 1;
for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
write, 1, &mapped_page, NULL);
if (pg_num > 0) {
if (page_count(mapped_page) < 1) {
pr_err("Bad page count after doing"
"get_user_pages on"
"user buffer\n");
bad_page_dump(page_to_phys(mapped_page),
mapped_page);
}
status = pte_set(dev_context->pt_attrs,
page_to_phys(mapped_page), va,
HW_PAGE_SIZE4KB, &hw_attrs);
if (status)
break;
if (mapped_pages)
mapped_pages[pg_i] = mapped_page;
va += HW_PAGE_SIZE4KB;
ul_mpu_addr += HW_PAGE_SIZE4KB;
} else {
pr_err("DSPBRIDGE: get_user_pages FAILED,"
"MPU addr = 0x%x,"
"vma->vm_flags = 0x%lx,"
"get_user_pages Err"
"Value = %d, Buffer"
"size=0x%x\n", ul_mpu_addr,
vma->vm_flags, pg_num, ul_num_bytes);
status = -EPERM;
break;
}
}
}
up_read(&mm->mmap_sem);
func_cont:
if (status) {
/*
* Roll out the mapped pages incase it failed in middle of
* mapping
*/
if (pg_i) {
bridge_brd_mem_un_map(dev_context, virt_addr,
(pg_i * PG_SIZE4K));
}
status = -EPERM;
}
/*
* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
* repetition while mapping non-contiguous physical regions of a virtual
* region
*/
flush_all(dev_context);
dev_dbg(bridge, "%s status %x\n", __func__, status);
return status;
}
/*
* ======== bridge_brd_mem_un_map ========
* Invalidate the PTEs for the DSP VA block to be unmapped.
*
* PTEs of a mapped memory block are contiguous in any page table
* So, instead of looking up the PTE address for every 4K block,
* we clear consecutive PTEs until we unmap all the bytes
*/
static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
u32 virt_addr, u32 ul_num_bytes)
{
u32 l1_base_va;
u32 l2_base_va;
u32 l2_base_pa;
u32 l2_page_num;
u32 pte_val;
u32 pte_size;
u32 pte_count;
u32 pte_addr_l1;
u32 pte_addr_l2 = 0;
u32 rem_bytes;
u32 rem_bytes_l2;
u32 va_curr;
struct page *pg = NULL;
int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
struct pg_table_attrs *pt = dev_context->pt_attrs;
u32 temp;
u32 paddr;
u32 numof4k_pages = 0;
va_curr = virt_addr;
rem_bytes = ul_num_bytes;
rem_bytes_l2 = 0;
l1_base_va = pt->l1_base_va;
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
"pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
ul_num_bytes, l1_base_va, pte_addr_l1);
while (rem_bytes && !status) {
u32 va_curr_orig = va_curr;
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
pte_val = *(u32 *) pte_addr_l1;
pte_size = hw_mmu_pte_size_l1(pte_val);
if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
goto skip_coarse_page;
/*
* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA
*/
l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
l2_page_num =
(l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
/*
* Find the L2 PTE address from which we will start
* clearing, the number of PTEs to be cleared on this
* page, and the size of VA space that needs to be
* cleared on this L2 page
*/
pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
if (rem_bytes < (pte_count * PG_SIZE4K))
pte_count = rem_bytes / PG_SIZE4K;
rem_bytes_l2 = pte_count * PG_SIZE4K;
/*
* Unmap the VA space on this L2 PT. A quicker way
* would be to clear pte_count entries starting from
* pte_addr_l2. However, below code checks that we don't
* clear invalid entries or less than 64KB for a 64KB
* entry. Similar checking is done for L1 PTEs too
* below
*/
while (rem_bytes_l2 && !status) {
pte_val = *(u32 *) pte_addr_l2;
pte_size = hw_mmu_pte_size_l2(pte_val);
/* va_curr aligned to pte_size? */
if (pte_size == 0 || rem_bytes_l2 < pte_size ||
va_curr & (pte_size - 1)) {
status = -EPERM;
break;
}
/* Collect Physical addresses from VA */
paddr = (pte_val & ~(pte_size - 1));
if (pte_size == HW_PAGE_SIZE64KB)
numof4k_pages = 16;
else
numof4k_pages = 1;
temp = 0;
while (temp++ < numof4k_pages) {
if (!pfn_valid(__phys_to_pfn(paddr))) {
paddr += HW_PAGE_SIZE4KB;
continue;
}
pg = PHYS_TO_PAGE(paddr);
if (page_count(pg) < 1) {
pr_info("DSPBRIDGE: UNMAP function: "
"COUNT 0 FOR PA 0x%x, size = "
"0x%x\n", paddr, ul_num_bytes);
bad_page_dump(paddr, pg);
} else {
set_page_dirty(pg);
page_cache_release(pg);
}
paddr += HW_PAGE_SIZE4KB;
}
if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
status = -EPERM;
goto EXIT_LOOP;
}
status = 0;
rem_bytes_l2 -= pte_size;
va_curr += pte_size;
pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
}
spin_lock(&pt->pg_lock);
if (rem_bytes_l2 == 0) {
pt->pg_info[l2_page_num].num_entries -= pte_count;
if (pt->pg_info[l2_page_num].num_entries == 0) {
/*
* Clear the L1 PTE pointing to the L2 PT
*/
if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
HW_MMU_COARSE_PAGE_SIZE))
status = 0;
else {
status = -EPERM;
spin_unlock(&pt->pg_lock);
goto EXIT_LOOP;
}
}
rem_bytes -= pte_count * PG_SIZE4K;
} else
status = -EPERM;
spin_unlock(&pt->pg_lock);
continue;
skip_coarse_page:
/* va_curr aligned to pte_size? */
/* pte_size = 1 MB or 16 MB */
if (pte_size == 0 || rem_bytes < pte_size ||
va_curr & (pte_size - 1)) {
status = -EPERM;
break;
}
if (pte_size == HW_PAGE_SIZE1MB)
numof4k_pages = 256;
else
numof4k_pages = 4096;
temp = 0;
/* Collect Physical addresses from VA */
paddr = (pte_val & ~(pte_size - 1));
while (temp++ < numof4k_pages) {
if (pfn_valid(__phys_to_pfn(paddr))) {
pg = PHYS_TO_PAGE(paddr);
if (page_count(pg) < 1) {
pr_info("DSPBRIDGE: UNMAP function: "
"COUNT 0 FOR PA 0x%x, size = "
"0x%x\n", paddr, ul_num_bytes);
bad_page_dump(paddr, pg);
} else {
set_page_dirty(pg);
page_cache_release(pg);
}
}
paddr += HW_PAGE_SIZE4KB;
}
if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
status = 0;
rem_bytes -= pte_size;
va_curr += pte_size;
} else {
status = -EPERM;
goto EXIT_LOOP;
}
}
/*
* It is better to flush the TLB here, so that any stale old entries
* get flushed
*/
EXIT_LOOP:
flush_all(dev_context);
dev_dbg(bridge,
"%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
" rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
pte_addr_l2, rem_bytes, rem_bytes_l2, status);
return status;
}
/*
* ======== user_va2_pa ========
* Purpose:
* This function walks through the page tables to convert a userland
* virtual address to physical address
*/
static u32 user_va2_pa(struct mm_struct *mm, u32 address)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *ptep, pte;
pgd = pgd_offset(mm, address);
if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
pmd = pmd_offset(pgd, address);
if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
ptep = pte_offset_map(pmd, address);
if (ptep) {
pte = *ptep;
if (pte_present(pte))
return pte & PAGE_MASK;
}
}
}
return 0;
}
/*
* ======== pte_update ========
* This function calculates the optimum page-aligned addresses and sizes
* Caller must pass page-aligned values
*/
static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
u32 va, u32 size,
struct hw_mmu_map_attrs_t *map_attrs)
{
u32 i;
u32 all_bits;
u32 pa_curr = pa;
u32 va_curr = va;
u32 num_bytes = size;
struct bridge_dev_context *dev_context = dev_ctxt;
int status = 0;
u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
while (num_bytes && !status) {
/* To find the max. page size with which both PA & VA are
* aligned */
all_bits = pa_curr | va_curr;
for (i = 0; i < 4; i++) {
if ((num_bytes >= page_size[i]) && ((all_bits &
(page_size[i] -
1)) == 0)) {
status =
pte_set(dev_context->pt_attrs, pa_curr,
va_curr, page_size[i], map_attrs);
pa_curr += page_size[i];
va_curr += page_size[i];
num_bytes -= page_size[i];
/* Don't try smaller sizes. Hopefully we have
* reached an address aligned to a bigger page
* size */
break;
}
}
}
return status;
}
/*
* ======== pte_set ========
* This function calculates PTE address (MPU virtual) to be updated
* It also manages the L2 page tables
*/
static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
u32 size, struct hw_mmu_map_attrs_t *attrs)
{
u32 i;
u32 pte_val;
u32 pte_addr_l1;
u32 pte_size;
/* Base address of the PT that will be updated */
u32 pg_tbl_va;
u32 l1_base_va;
/* Compiler warns that the next three variables might be used
* uninitialized in this function. Doesn't seem so. Working around,
* anyways. */
u32 l2_base_va = 0;
u32 l2_base_pa = 0;
u32 l2_page_num = 0;
int status = 0;
l1_base_va = pt->l1_base_va;
pg_tbl_va = l1_base_va;
if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
pte_val = *(u32 *) pte_addr_l1;
pte_size = hw_mmu_pte_size_l1(pte_val);
} else {
return -EPERM;
}
spin_lock(&pt->pg_lock);
if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
/* Get the L2 PA from the L1 PTE, and find
* corresponding L2 VA */
l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
l2_base_va =
l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
l2_page_num =
(l2_base_pa -
pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
} else if (pte_size == 0) {
/* L1 PTE is invalid. Allocate a L2 PT and
* point the L1 PTE to it */
/* Find a free L2 PT. */
for (i = 0; (i < pt->l2_num_pages) &&
(pt->pg_info[i].num_entries != 0); i++)
;
if (i < pt->l2_num_pages) {
l2_page_num = i;
l2_base_pa = pt->l2_base_pa + (l2_page_num *
HW_MMU_COARSE_PAGE_SIZE);
l2_base_va = pt->l2_base_va + (l2_page_num *
HW_MMU_COARSE_PAGE_SIZE);
/* Endianness attributes are ignored for
* HW_MMU_COARSE_PAGE_SIZE */
status =
hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
HW_MMU_COARSE_PAGE_SIZE,
attrs);
} else {
status = -ENOMEM;
}
} else {
/* Found valid L1 PTE of another size.
* Should not overwrite it. */
status = -EPERM;
}
if (!status) {
pg_tbl_va = l2_base_va;
if (size == HW_PAGE_SIZE64KB)
pt->pg_info[l2_page_num].num_entries += 16;
else
pt->pg_info[l2_page_num].num_entries++;
dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
"%x, num_entries %x\n", l2_base_va,
l2_base_pa, l2_page_num,
pt->pg_info[l2_page_num].num_entries);
}
spin_unlock(&pt->pg_lock);
}
if (!status) {
dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
pg_tbl_va, pa, va, size);
dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
"mixed_size %x\n", attrs->endianism,
attrs->element_size, attrs->mixed_size);
status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
}
return status;
}
/* Memory map kernel VA -- memory allocated with vmalloc */
static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
u32 ul_mpu_addr, u32 virt_addr,
u32 ul_num_bytes,
struct hw_mmu_map_attrs_t *hw_attrs)
{
int status = 0;
struct page *page[1];
u32 i;
u32 pa_curr;
u32 pa_next;
u32 va_curr;
u32 size_curr;
u32 num_pages;
u32 pa;
u32 num_of4k_pages;
u32 temp = 0;
/*
* Do Kernel va to pa translation.
* Combine physically contiguous regions to reduce TLBs.
* Pass the translated pa to pte_update.
*/
num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
i = 0;
va_curr = ul_mpu_addr;
page[0] = vmalloc_to_page((void *)va_curr);
pa_next = page_to_phys(page[0]);
while (!status && (i < num_pages)) {
/*
* Reuse pa_next from the previous iteraion to avoid
* an extra va2pa call
*/
pa_curr = pa_next;
size_curr = PAGE_SIZE;
/*
* If the next page is physically contiguous,
* map it with the current one by increasing
* the size of the region to be mapped
*/
while (++i < num_pages) {
page[0] =
vmalloc_to_page((void *)(va_curr + size_curr));
pa_next = page_to_phys(page[0]);
if (pa_next == (pa_curr + size_curr))
size_curr += PAGE_SIZE;
else
break;
}
if (pa_next == 0) {
status = -ENOMEM;
break;
}
pa = pa_curr;
num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
while (temp++ < num_of4k_pages) {
get_page(PHYS_TO_PAGE(pa));
pa += HW_PAGE_SIZE4KB;
}
status = pte_update(dev_context, pa_curr, virt_addr +
(va_curr - ul_mpu_addr), size_curr,
hw_attrs);
va_curr += size_curr;
}
/*
* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
* repetition while mapping non-contiguous physical regions of a virtual
* region
*/
flush_all(dev_context);
dev_dbg(bridge, "%s status %x\n", __func__, status);
return status;
}
/*
* ======== wait_for_start ========
* Wait for the singal from DSP that it has started, or time out.
*/
bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
{
u16 timeout = TIHELEN_ACKTIMEOUT;
/* Wait for response from board */
while (__raw_readw(dw_sync_addr) && --timeout)
udelay(10);
/* If timed out: return false */
if (!timeout) {
pr_err("%s: Timed out waiting DSP to Start\n", __func__);
return false;
}
return true;
}
| gpl-2.0 |
MrHyde03/android_kernel_samsung_konawifixx | drivers/gpu/drm/i915/intel_opregion.c | 3863 | 14189 | /*
* Copyright 2008 Intel Corporation <hong.liu@intel.com>
* Copyright 2008 Red Hat <mjg@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/acpi.h>
#include <linux/acpi_io.h>
#include <acpi/video.h>
#include "drmP.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_drv.h"
#define PCI_ASLE 0xe4
#define PCI_ASLS 0xfc
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
#define ACPI_CLID 0x01ac /* current lid state indicator */
#define ACPI_CDCK 0x01b0 /* current docking state indicator */
#define OPREGION_SWSCI_OFFSET 0x200
#define OPREGION_ASLE_OFFSET 0x300
#define OPREGION_VBT_OFFSET 0x400
#define OPREGION_SIGNATURE "IntelGraphicsMem"
#define MBOX_ACPI (1<<0)
#define MBOX_SWSCI (1<<1)
#define MBOX_ASLE (1<<2)
struct opregion_header {
u8 signature[16];
u32 size;
u32 opregion_ver;
u8 bios_ver[32];
u8 vbios_ver[16];
u8 driver_ver[16];
u32 mboxes;
u8 reserved[164];
} __attribute__((packed));
/* OpRegion mailbox #1: public ACPI methods */
struct opregion_acpi {
u32 drdy; /* driver readiness */
u32 csts; /* notification status */
u32 cevt; /* current event */
u8 rsvd1[20];
u32 didl[8]; /* supported display devices ID list */
u32 cpdl[8]; /* currently presented display list */
u32 cadl[8]; /* currently active display list */
u32 nadl[8]; /* next active devices list */
u32 aslp; /* ASL sleep time-out */
u32 tidx; /* toggle table index */
u32 chpd; /* current hotplug enable indicator */
u32 clid; /* current lid state*/
u32 cdck; /* current docking state */
u32 sxsw; /* Sx state resume */
u32 evts; /* ASL supported events */
u32 cnot; /* current OS notification */
u32 nrdy; /* driver status */
u8 rsvd2[60];
} __attribute__((packed));
/* OpRegion mailbox #2: SWSCI */
struct opregion_swsci {
u32 scic; /* SWSCI command|status|data */
u32 parm; /* command parameters */
u32 dslp; /* driver sleep time-out */
u8 rsvd[244];
} __attribute__((packed));
/* OpRegion mailbox #3: ASLE */
struct opregion_asle {
u32 ardy; /* driver readiness */
u32 aslc; /* ASLE interrupt command */
u32 tche; /* technology enabled indicator */
u32 alsi; /* current ALS illuminance reading */
u32 bclp; /* backlight brightness to set */
u32 pfit; /* panel fitting state */
u32 cblv; /* current brightness level */
u16 bclm[20]; /* backlight level duty cycle mapping table */
u32 cpfm; /* current panel fitting mode */
u32 epfm; /* enabled panel fitting modes */
u8 plut[74]; /* panel LUT and identifier */
u32 pfmb; /* PWM freq and min brightness */
u8 rsvd[102];
} __attribute__((packed));
/* ASLE irq request bits */
#define ASLE_SET_ALS_ILLUM (1 << 0)
#define ASLE_SET_BACKLIGHT (1 << 1)
#define ASLE_SET_PFIT (1 << 2)
#define ASLE_SET_PWM_FREQ (1 << 3)
#define ASLE_REQ_MSK 0xf
/* response bits of ASLE irq request */
#define ASLE_ALS_ILLUM_FAILED (1<<10)
#define ASLE_BACKLIGHT_FAILED (1<<12)
#define ASLE_PFIT_FAILED (1<<14)
#define ASLE_PWM_FREQ_FAILED (1<<16)
/* ASLE backlight brightness to set */
#define ASLE_BCLP_VALID (1<<31)
#define ASLE_BCLP_MSK (~(1<<31))
/* ASLE panel fitting request */
#define ASLE_PFIT_VALID (1<<31)
#define ASLE_PFIT_CENTER (1<<0)
#define ASLE_PFIT_STRETCH_TEXT (1<<1)
#define ASLE_PFIT_STRETCH_GFX (1<<2)
/* PWM frequency and minimum brightness */
#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
#define ASLE_PFMB_PWM_VALID (1<<31)
#define ASLE_CBLV_VALID (1<<31)
#define ACPI_OTHER_OUTPUT (0<<8)
#define ACPI_VGA_OUTPUT (1<<8)
#define ACPI_TV_OUTPUT (2<<8)
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
#ifdef CONFIG_ACPI
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 max;
if (!(bclp & ASLE_BCLP_VALID))
return ASLE_BACKLIGHT_FAILED;
bclp &= ASLE_BCLP_MSK;
if (bclp > 255)
return ASLE_BACKLIGHT_FAILED;
max = intel_panel_get_max_backlight(dev);
intel_panel_set_backlight(dev, bclp * max / 255);
asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
return 0;
}
static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
return 0;
}
static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pfmb & ASLE_PFMB_PWM_VALID) {
u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
pwm = pwm >> 9;
/* FIXME - what do we do with the PWM? */
}
return 0;
}
static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
if (!(pfit & ASLE_PFIT_VALID))
return ASLE_PFIT_FAILED;
return 0;
}
void intel_opregion_asle_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
asle_req = asle->aslc & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
if (asle_req & ASLE_SET_ALS_ILLUM)
asle_stat |= asle_set_als_illum(dev, asle->alsi);
if (asle_req & ASLE_SET_BACKLIGHT)
asle_stat |= asle_set_backlight(dev, asle->bclp);
if (asle_req & ASLE_SET_PFIT)
asle_stat |= asle_set_pfit(dev, asle->pfit);
if (asle_req & ASLE_SET_PWM_FREQ)
asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
asle->aslc = asle_stat;
}
void intel_opregion_gse_intr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
u32 asle_stat = 0;
u32 asle_req;
if (!asle)
return;
asle_req = asle->aslc & ASLE_REQ_MSK;
if (!asle_req) {
DRM_DEBUG_DRIVER("non asle set request??\n");
return;
}
if (asle_req & ASLE_SET_ALS_ILLUM) {
DRM_DEBUG_DRIVER("Illum is not supported\n");
asle_stat |= ASLE_ALS_ILLUM_FAILED;
}
if (asle_req & ASLE_SET_BACKLIGHT)
asle_stat |= asle_set_backlight(dev, asle->bclp);
if (asle_req & ASLE_SET_PFIT) {
DRM_DEBUG_DRIVER("Pfit is not supported\n");
asle_stat |= ASLE_PFIT_FAILED;
}
if (asle_req & ASLE_SET_PWM_FREQ) {
DRM_DEBUG_DRIVER("PWM freq is not supported\n");
asle_stat |= ASLE_PWM_FREQ_FAILED;
}
asle->aslc = asle_stat;
}
#define ASLE_ALS_EN (1<<0)
#define ASLE_BLC_EN (1<<1)
#define ASLE_PFIT_EN (1<<2)
#define ASLE_PFMB_EN (1<<3)
void intel_opregion_enable_asle(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
if (IS_MOBILE(dev))
intel_enable_asle(dev);
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
asle->ardy = 1;
}
}
#define ACPI_EV_DISPLAY_SWITCH (1<<0)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
static struct intel_opregion *system_opregion;
static int intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
{
/* The only video events relevant to opregion are 0x80. These indicate
either a docking event, lid switch or display switch request. In
Linux, these are handled by the dock, button and video drivers.
*/
struct opregion_acpi *acpi;
struct acpi_bus_event *event = data;
int ret = NOTIFY_OK;
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
if (!system_opregion)
return NOTIFY_DONE;
acpi = system_opregion->acpi;
if (event->type == 0x80 && !(acpi->cevt & 0x1))
ret = NOTIFY_BAD;
acpi->csts = 0;
return ret;
}
static struct notifier_block intel_opregion_notifier = {
.notifier_call = intel_opregion_video_event,
};
/*
* Initialise the DIDL field in opregion. This passes a list of devices to
* the firmware. Values are defined by section B.4.2 of the ACPI specification
* (version 3)
*/
static void intel_didl_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
struct drm_connector *connector;
acpi_handle handle;
struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
unsigned long long device_id;
acpi_status status;
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
return;
if (acpi_is_video_device(acpi_dev))
acpi_video_bus = acpi_dev;
else {
list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
if (acpi_is_video_device(acpi_cdev)) {
acpi_video_bus = acpi_cdev;
break;
}
}
}
if (!acpi_video_bus) {
printk(KERN_WARNING "No ACPI video bus found\n");
return;
}
list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
if (i >= 8) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
return;
}
status =
acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
NULL, &device_id);
if (ACPI_SUCCESS(status)) {
if (!device_id)
goto blind_set;
opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
i++;
}
}
end:
/* If fewer than 8 outputs, the list must be null terminated */
if (i < 8)
opregion->acpi->didl[i] = 0;
return;
blind_set:
i = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
int output_type = ACPI_OTHER_OUTPUT;
if (i >= 8) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"More than 8 outputs detected\n");
return;
}
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
output_type = ACPI_VGA_OUTPUT;
break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_9PinDIN:
output_type = ACPI_TV_OUTPUT;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
output_type = ACPI_DIGITAL_OUTPUT;
break;
case DRM_MODE_CONNECTOR_LVDS:
output_type = ACPI_LVDS_OUTPUT;
break;
}
opregion->acpi->didl[i] |= (1<<31) | output_type | i;
i++;
}
goto end;
}
void intel_opregion_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
if (drm_core_check_feature(dev, DRIVER_MODESET))
intel_didl_outputs(dev);
/* Notify BIOS we are ready to handle ACPI video ext notifs.
* Right now, all the events are handled by the ACPI video module.
* We don't actually need to do anything with them. */
opregion->acpi->csts = 0;
opregion->acpi->drdy = 1;
system_opregion = opregion;
register_acpi_notifier(&intel_opregion_notifier);
}
if (opregion->asle)
intel_opregion_enable_asle(dev);
}
void intel_opregion_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
return;
if (opregion->acpi) {
opregion->acpi->drdy = 0;
system_opregion = NULL;
unregister_acpi_notifier(&intel_opregion_notifier);
}
/* just clear all opregion memory pointers now */
iounmap(opregion->header);
opregion->header = NULL;
opregion->acpi = NULL;
opregion->swsci = NULL;
opregion->asle = NULL;
opregion->vbt = NULL;
}
#endif
int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
void *base;
u32 asls, mboxes;
int err = 0;
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
if (asls == 0) {
DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
base = acpi_os_ioremap(asls, OPREGION_SIZE);
if (!base)
return -ENOMEM;
if (memcmp(base, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = base;
opregion->vbt = base + OPREGION_VBT_OFFSET;
opregion->lid_state = base + ACPI_CLID;
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
}
if (mboxes & MBOX_SWSCI) {
DRM_DEBUG_DRIVER("SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
}
if (mboxes & MBOX_ASLE) {
DRM_DEBUG_DRIVER("ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
}
return 0;
err_out:
iounmap(base);
return err;
}
| gpl-2.0 |
Flemmard/htc7x30-3.0 | arch/x86/lib/usercopy_64.c | 4119 | 4072 | /*
* User address space access functions.
*
* Copyright 1997 Andi Kleen <ak@muc.de>
* Copyright 1997 Linus Torvalds
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
#include <linux/module.h>
#include <asm/uaccess.h>
/*
* Copy a null terminated string from userspace.
*/
#define __do_strncpy_from_user(dst,src,count,res) \
do { \
long __d0, __d1, __d2; \
might_fault(); \
__asm__ __volatile__( \
" testq %1,%1\n" \
" jz 2f\n" \
"0: lodsb\n" \
" stosb\n" \
" testb %%al,%%al\n" \
" jz 1f\n" \
" decq %1\n" \
" jnz 0b\n" \
"1: subq %1,%0\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movq %5,%0\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(0b,3b) \
: "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
"=&D" (__d2) \
: "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
: "memory"); \
} while (0)
long
__strncpy_from_user(char *dst, const char __user *src, long count)
{
long res;
__do_strncpy_from_user(dst, src, count, res);
return res;
}
EXPORT_SYMBOL(__strncpy_from_user);
long
strncpy_from_user(char *dst, const char __user *src, long count)
{
long res = -EFAULT;
if (access_ok(VERIFY_READ, src, 1))
return __strncpy_from_user(dst, src, count);
return res;
}
EXPORT_SYMBOL(strncpy_from_user);
/*
* Zero Userspace
*/
unsigned long __clear_user(void __user *addr, unsigned long size)
{
long __d0;
might_fault();
/* no memory constraint because it doesn't change any memory gcc knows
about */
asm volatile(
" testq %[size8],%[size8]\n"
" jz 4f\n"
"0: movq %[zero],(%[dst])\n"
" addq %[eight],%[dst]\n"
" decl %%ecx ; jnz 0b\n"
"4: movq %[size1],%%rcx\n"
" testl %%ecx,%%ecx\n"
" jz 2f\n"
"1: movb %b[zero],(%[dst])\n"
" incq %[dst]\n"
" decl %%ecx ; jnz 1b\n"
"2:\n"
".section .fixup,\"ax\"\n"
"3: lea 0(%[size1],%[size8],8),%[size8]\n"
" jmp 2b\n"
".previous\n"
_ASM_EXTABLE(0b,3b)
_ASM_EXTABLE(1b,2b)
: [size8] "=&c"(size), [dst] "=&D" (__d0)
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
[zero] "r" (0UL), [eight] "r" (8UL));
return size;
}
EXPORT_SYMBOL(__clear_user);
unsigned long clear_user(void __user *to, unsigned long n)
{
if (access_ok(VERIFY_WRITE, to, n))
return __clear_user(to, n);
return n;
}
EXPORT_SYMBOL(clear_user);
/*
* Return the size of a string (including the ending 0)
*
* Return 0 on exception, a value greater than N if too long
*/
long __strnlen_user(const char __user *s, long n)
{
long res = 0;
char c;
while (1) {
if (res>n)
return n+1;
if (__get_user(c, s))
return 0;
if (!c)
return res+1;
res++;
s++;
}
}
EXPORT_SYMBOL(__strnlen_user);
long strnlen_user(const char __user *s, long n)
{
if (!access_ok(VERIFY_READ, s, 1))
return 0;
return __strnlen_user(s, n);
}
EXPORT_SYMBOL(strnlen_user);
long strlen_user(const char __user *s)
{
long res = 0;
char c;
for (;;) {
if (get_user(c, s))
return 0;
if (!c)
return res+1;
res++;
s++;
}
}
EXPORT_SYMBOL(strlen_user);
unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
{
if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
return copy_user_generic((__force void *)to, (__force void *)from, len);
}
return len;
}
EXPORT_SYMBOL(copy_in_user);
/*
* Try to copy last bytes and clear the rest if needed.
* Since protection fault in copy_from/to_user is not a normal situation,
* it is not necessary to optimize tail handling.
*/
unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
{
char c;
unsigned zero_len;
for (; len; --len) {
if (__get_user_nocheck(c, from++, sizeof(char)))
break;
if (__put_user_nocheck(c, to++, sizeof(char)))
break;
}
for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
if (__put_user_nocheck(c, to++, sizeof(char)))
break;
return len;
}
| gpl-2.0 |
ptmr3/GalaxyS2-GalaxyNote_Kernel | arch/ia64/kernel/machvec.c | 4631 | 1791 | #include <linux/module.h>
#include <linux/dma-mapping.h>
#include <asm/machvec.h>
#include <asm/system.h>
#ifdef CONFIG_IA64_GENERIC
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/page.h>
struct ia64_machine_vector ia64_mv;
EXPORT_SYMBOL(ia64_mv);
static struct ia64_machine_vector * __init
lookup_machvec (const char *name)
{
extern struct ia64_machine_vector machvec_start[];
extern struct ia64_machine_vector machvec_end[];
struct ia64_machine_vector *mv;
for (mv = machvec_start; mv < machvec_end; ++mv)
if (strcmp (mv->name, name) == 0)
return mv;
return 0;
}
void __init
machvec_init (const char *name)
{
struct ia64_machine_vector *mv;
if (!name)
name = acpi_get_sysname();
mv = lookup_machvec(name);
if (!mv)
panic("generic kernel failed to find machine vector for"
" platform %s!", name);
ia64_mv = *mv;
printk(KERN_INFO "booting generic kernel on platform %s\n", name);
}
void __init
machvec_init_from_cmdline(const char *cmdline)
{
char str[64];
const char *start;
char *end;
if (! (start = strstr(cmdline, "machvec=")) )
return machvec_init(NULL);
strlcpy(str, start + strlen("machvec="), sizeof(str));
if ( (end = strchr(str, ' ')) )
*end = '\0';
return machvec_init(str);
}
#endif /* CONFIG_IA64_GENERIC */
void
machvec_setup (char **arg)
{
}
EXPORT_SYMBOL(machvec_setup);
void
machvec_timer_interrupt (int irq, void *dev_id)
{
}
EXPORT_SYMBOL(machvec_timer_interrupt);
void
machvec_dma_sync_single(struct device *hwdev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
mb();
}
EXPORT_SYMBOL(machvec_dma_sync_single);
void
machvec_dma_sync_sg(struct device *hwdev, struct scatterlist *sg, int n,
enum dma_data_direction dir)
{
mb();
}
EXPORT_SYMBOL(machvec_dma_sync_sg);
| gpl-2.0 |
marlontoe/MAD-LEGACY | drivers/net/ethernet/amd/ni65.c | 5143 | 30577 | /*
* ni6510 (am7990 'lance' chip) driver for Linux-net-3
* BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
* copyrights (c) 1994,1995,1996 by M.Hipp
*
* This driver can handle the old ni6510 board and the newer ni6510
* EtherBlaster. (probably it also works with every full NE2100
* compatible card)
*
* driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
*
* This is an extension to the Linux operating system, and is covered by the
* same GNU General Public License that covers the Linux-kernel.
*
* comments/bugs/suggestions can be sent to:
* Michael Hipp
* email: hippm@informatik.uni-tuebingen.de
*
* sources:
* some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
* and from the original drivers by D.Becker
*
* known problems:
* - on some PCI boards (including my own) the card/board/ISA-bridge has
* problems with bus master DMA. This results in lotsa overruns.
* It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
* the XMT and RCV_VIA_SKB option .. this reduces driver performance.
* Or just play with your BIOS options to optimize ISA-DMA access.
* Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
* defines -> please report me your experience then
* - Harald reported for ASUS SP3G mainboards, that you should use
* the 'optimal settings' from the user's manual on page 3-12!
*
* credits:
* thanx to Jason Sullivan for sending me a ni6510 card!
* lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
*
* simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
* average: FTP -> 8384421 bytes received in 8.5 seconds
* (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
* peak: FTP -> 8384421 bytes received in 7.5 seconds
* (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
*/
/*
* 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
* 96.Sept.29: virt_to_bus stuff added for new memory modell
* 96.April.29: Added Harald Koenig's Patches (MH)
* 96.April.13: enhanced error handling .. more tests (MH)
* 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
* 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
* 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
* hopefully no more 16MB limit
*
* 95.Nov.18: multicast tweaked (AC).
*
* 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
*
* 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/dma.h>
#include "ni65.h"
/*
* the current setting allows an acceptable performance
* for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
* the header of this file
* 'invert' the defines for max. performance. This may cause DMA problems
* on some boards (e.g on my ASUS SP3G)
*/
#undef XMT_VIA_SKB
#undef RCV_VIA_SKB
#define RCV_PARANOIA_CHECK
#define MID_PERFORMANCE
#if defined( LOW_PERFORMANCE )
static int isa0=7,isa1=7,csr80=0x0c10;
#elif defined( MID_PERFORMANCE )
static int isa0=5,isa1=5,csr80=0x2810;
#else /* high performance */
static int isa0=4,isa1=4,csr80=0x0017;
#endif
/*
* a few card/vendor specific defines
*/
#define NI65_ID0 0x00
#define NI65_ID1 0x55
#define NI65_EB_ID0 0x52
#define NI65_EB_ID1 0x44
#define NE2100_ID0 0x57
#define NE2100_ID1 0x57
#define PORT p->cmdr_addr
/*
* buffer configuration
*/
#if 1
#define RMDNUM 16
#define RMDNUMMASK 0x80000000
#else
#define RMDNUM 8
#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
#endif
#if 0
#define TMDNUM 1
#define TMDNUMMASK 0x00000000
#else
#define TMDNUM 4
#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
#endif
/* slightly oversized */
#define R_BUF_SIZE 1544
#define T_BUF_SIZE 1544
/*
* lance register defines
*/
#define L_DATAREG 0x00
#define L_ADDRREG 0x02
#define L_RESET 0x04
#define L_CONFIG 0x05
#define L_BUSIF 0x06
/*
* to access the lance/am7990-regs, you have to write
* reg-number into L_ADDRREG, then you can access it using L_DATAREG
*/
#define CSR0 0x00
#define CSR1 0x01
#define CSR2 0x02
#define CSR3 0x03
#define INIT_RING_BEFORE_START 0x1
#define FULL_RESET_ON_ERROR 0x2
#if 0
#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
inw(PORT+L_DATAREG))
#if 0
#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
#else
#define writedatareg(val) { writereg(val,CSR0); }
#endif
#else
#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
#define writedatareg(val) { writereg(val,CSR0); }
#endif
static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
static struct card {
unsigned char id0,id1;
short id_offset;
short total_size;
short cmd_offset;
short addr_offset;
unsigned char *vendor_id;
char *cardname;
unsigned long config;
} cards[] = {
{
.id0 = NI65_ID0,
.id1 = NI65_ID1,
.id_offset = 0x0e,
.total_size = 0x10,
.cmd_offset = 0x0,
.addr_offset = 0x8,
.vendor_id = ni_vendor,
.cardname = "ni6510",
.config = 0x1,
},
{
.id0 = NI65_EB_ID0,
.id1 = NI65_EB_ID1,
.id_offset = 0x0e,
.total_size = 0x18,
.cmd_offset = 0x10,
.addr_offset = 0x0,
.vendor_id = ni_vendor,
.cardname = "ni6510 EtherBlaster",
.config = 0x2,
},
{
.id0 = NE2100_ID0,
.id1 = NE2100_ID1,
.id_offset = 0x0e,
.total_size = 0x18,
.cmd_offset = 0x10,
.addr_offset = 0x0,
.vendor_id = NULL,
.cardname = "generic NE2100",
.config = 0x0,
},
};
#define NUM_CARDS 3
struct priv
{
struct rmd rmdhead[RMDNUM];
struct tmd tmdhead[TMDNUM];
struct init_block ib;
int rmdnum;
int tmdnum,tmdlast;
#ifdef RCV_VIA_SKB
struct sk_buff *recv_skb[RMDNUM];
#else
void *recvbounce[RMDNUM];
#endif
#ifdef XMT_VIA_SKB
struct sk_buff *tmd_skb[TMDNUM];
#endif
void *tmdbounce[TMDNUM];
int tmdbouncenum;
int lock,xmit_queued;
void *self;
int cmdr_addr;
int cardno;
int features;
spinlock_t ring_lock;
};
static int ni65_probe1(struct net_device *dev,int);
static irqreturn_t ni65_interrupt(int irq, void * dev_id);
static void ni65_recv_intr(struct net_device *dev,int);
static void ni65_xmit_intr(struct net_device *dev,int);
static int ni65_open(struct net_device *dev);
static int ni65_lance_reinit(struct net_device *dev);
static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
struct net_device *dev);
static void ni65_timeout(struct net_device *dev);
static int ni65_close(struct net_device *dev);
static int ni65_alloc_buffer(struct net_device *dev);
static void ni65_free_buffer(struct priv *p);
static void set_multicast_list(struct net_device *dev);
static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
static int debuglevel = 1;
/*
* set 'performance' registers .. we must STOP lance for that
*/
static void ni65_set_performance(struct priv *p)
{
writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
if( !(cards[p->cardno].config & 0x02) )
return;
outw(80,PORT+L_ADDRREG);
if(inw(PORT+L_ADDRREG) != 80)
return;
writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
outw(0,PORT+L_ADDRREG);
outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
outw(1,PORT+L_ADDRREG);
outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
}
/*
* open interface (up)
*/
static int ni65_open(struct net_device *dev)
{
struct priv *p = dev->ml_priv;
int irqval = request_irq(dev->irq, ni65_interrupt,0,
cards[p->cardno].cardname,dev);
if (irqval) {
printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
dev->name,dev->irq, irqval);
return -EAGAIN;
}
if(ni65_lance_reinit(dev))
{
netif_start_queue(dev);
return 0;
}
else
{
free_irq(dev->irq,dev);
return -EAGAIN;
}
}
/*
* close interface (down)
*/
static int ni65_close(struct net_device *dev)
{
struct priv *p = dev->ml_priv;
netif_stop_queue(dev);
outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
#ifdef XMT_VIA_SKB
{
int i;
for(i=0;i<TMDNUM;i++)
{
if(p->tmd_skb[i]) {
dev_kfree_skb(p->tmd_skb[i]);
p->tmd_skb[i] = NULL;
}
}
}
#endif
free_irq(dev->irq,dev);
return 0;
}
static void cleanup_card(struct net_device *dev)
{
struct priv *p = dev->ml_priv;
disable_dma(dev->dma);
free_dma(dev->dma);
release_region(dev->base_addr, cards[p->cardno].total_size);
ni65_free_buffer(p);
}
/* set: io,irq,dma or set it when calling insmod */
static int irq;
static int io;
static int dma;
/*
* Probe The Card (not the lance-chip)
*/
struct net_device * __init ni65_probe(int unit)
{
struct net_device *dev = alloc_etherdev(0);
static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
const int *port;
int err = 0;
if (!dev)
return ERR_PTR(-ENOMEM);
if (unit >= 0) {
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
irq = dev->irq;
dma = dev->dma;
} else {
dev->base_addr = io;
}
if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
err = ni65_probe1(dev, dev->base_addr);
} else if (dev->base_addr > 0) { /* Don't probe at all. */
err = -ENXIO;
} else {
for (port = ports; *port && ni65_probe1(dev, *port); port++)
;
if (!*port)
err = -ENODEV;
}
if (err)
goto out;
err = register_netdev(dev);
if (err)
goto out1;
return dev;
out1:
cleanup_card(dev);
out:
free_netdev(dev);
return ERR_PTR(err);
}
static const struct net_device_ops ni65_netdev_ops = {
.ndo_open = ni65_open,
.ndo_stop = ni65_close,
.ndo_start_xmit = ni65_send_packet,
.ndo_tx_timeout = ni65_timeout,
.ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/*
* this is the real card probe ..
*/
static int __init ni65_probe1(struct net_device *dev,int ioaddr)
{
int i,j;
struct priv *p;
unsigned long flags;
dev->irq = irq;
dev->dma = dma;
for(i=0;i<NUM_CARDS;i++) {
if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
continue;
if(cards[i].id_offset >= 0) {
if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
release_region(ioaddr, cards[i].total_size);
continue;
}
}
if(cards[i].vendor_id) {
for(j=0;j<3;j++)
if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
release_region(ioaddr, cards[i].total_size);
continue;
}
}
break;
}
if(i == NUM_CARDS)
return -ENODEV;
for(j=0;j<6;j++)
dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
if( (j=ni65_alloc_buffer(dev)) < 0) {
release_region(ioaddr, cards[i].total_size);
return j;
}
p = dev->ml_priv;
p->cmdr_addr = ioaddr + cards[i].cmd_offset;
p->cardno = i;
spin_lock_init(&p->ring_lock);
printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
if( (j=readreg(CSR0)) != 0x4) {
printk("failed.\n");
printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
ni65_free_buffer(p);
release_region(ioaddr, cards[p->cardno].total_size);
return -EAGAIN;
}
outw(88,PORT+L_ADDRREG);
if(inw(PORT+L_ADDRREG) == 88) {
unsigned long v;
v = inw(PORT+L_DATAREG);
v <<= 16;
outw(89,PORT+L_ADDRREG);
v |= inw(PORT+L_DATAREG);
printk("Version %#08lx, ",v);
p->features = INIT_RING_BEFORE_START;
}
else {
printk("ancient LANCE, ");
p->features = 0x0;
}
if(test_bit(0,&cards[i].config)) {
dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
}
else {
if(dev->dma == 0) {
/* 'stuck test' from lance.c */
unsigned long dma_channels =
((inb(DMA1_STAT_REG) >> 4) & 0x0f)
| (inb(DMA2_STAT_REG) & 0xf0);
for(i=1;i<5;i++) {
int dma = dmatab[i];
if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
continue;
flags=claim_dma_lock();
disable_dma(dma);
set_dma_mode(dma,DMA_MODE_CASCADE);
enable_dma(dma);
release_dma_lock(flags);
ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
flags=claim_dma_lock();
disable_dma(dma);
free_dma(dma);
release_dma_lock(flags);
if(readreg(CSR0) & CSR0_IDON)
break;
}
if(i == 5) {
printk("failed.\n");
printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
ni65_free_buffer(p);
release_region(ioaddr, cards[p->cardno].total_size);
return -EAGAIN;
}
dev->dma = dmatab[i];
printk("DMA %d (autodetected), ",dev->dma);
}
else
printk("DMA %d (assigned), ",dev->dma);
if(dev->irq < 2)
{
unsigned long irq_mask;
ni65_init_lance(p,dev->dev_addr,0,0);
irq_mask = probe_irq_on();
writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
msleep(20);
dev->irq = probe_irq_off(irq_mask);
if(!dev->irq)
{
printk("Failed to detect IRQ line!\n");
ni65_free_buffer(p);
release_region(ioaddr, cards[p->cardno].total_size);
return -EAGAIN;
}
printk("IRQ %d (autodetected).\n",dev->irq);
}
else
printk("IRQ %d (assigned).\n",dev->irq);
}
if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
{
printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
ni65_free_buffer(p);
release_region(ioaddr, cards[p->cardno].total_size);
return -EAGAIN;
}
dev->base_addr = ioaddr;
dev->netdev_ops = &ni65_netdev_ops;
dev->watchdog_timeo = HZ/2;
return 0; /* everything is OK */
}
/*
* set lance register and trigger init
*/
static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
{
int i;
u32 pib;
writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
for(i=0;i<6;i++)
p->ib.eaddr[i] = daddr[i];
for(i=0;i<8;i++)
p->ib.filter[i] = filter;
p->ib.mode = mode;
p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
writereg(0,CSR3); /* busmaster/no word-swap */
pib = (u32) isa_virt_to_bus(&p->ib);
writereg(pib & 0xffff,CSR1);
writereg(pib >> 16,CSR2);
writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
for(i=0;i<32;i++)
{
mdelay(4);
if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
break; /* init ok ? */
}
}
/*
* allocate memory area and check the 16MB border
*/
static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
{
struct sk_buff *skb=NULL;
unsigned char *ptr;
void *ret;
if(type) {
ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
if(!skb) {
printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
return NULL;
}
skb_reserve(skb,2+16);
skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
ptr = skb->data;
}
else {
ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
if(!ret)
return NULL;
}
if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
if(type)
kfree_skb(skb);
else
kfree(ptr);
return NULL;
}
return ret;
}
/*
* allocate all memory structures .. send/recv buffers etc ...
*/
static int ni65_alloc_buffer(struct net_device *dev)
{
unsigned char *ptr;
struct priv *p;
int i;
/*
* we need 8-aligned memory ..
*/
ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
if(!ptr)
return -ENOMEM;
p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
memset((char *)p, 0, sizeof(struct priv));
p->self = ptr;
for(i=0;i<TMDNUM;i++)
{
#ifdef XMT_VIA_SKB
p->tmd_skb[i] = NULL;
#endif
p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
if(!p->tmdbounce[i]) {
ni65_free_buffer(p);
return -ENOMEM;
}
}
for(i=0;i<RMDNUM;i++)
{
#ifdef RCV_VIA_SKB
p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
if(!p->recv_skb[i]) {
ni65_free_buffer(p);
return -ENOMEM;
}
#else
p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
if(!p->recvbounce[i]) {
ni65_free_buffer(p);
return -ENOMEM;
}
#endif
}
return 0; /* everything is OK */
}
/*
* free buffers and private struct
*/
static void ni65_free_buffer(struct priv *p)
{
int i;
if(!p)
return;
for(i=0;i<TMDNUM;i++) {
kfree(p->tmdbounce[i]);
#ifdef XMT_VIA_SKB
if(p->tmd_skb[i])
dev_kfree_skb(p->tmd_skb[i]);
#endif
}
for(i=0;i<RMDNUM;i++)
{
#ifdef RCV_VIA_SKB
if(p->recv_skb[i])
dev_kfree_skb(p->recv_skb[i]);
#else
kfree(p->recvbounce[i]);
#endif
}
kfree(p->self);
}
/*
* stop and (re)start lance .. e.g after an error
*/
static void ni65_stop_start(struct net_device *dev,struct priv *p)
{
int csr0 = CSR0_INEA;
writedatareg(CSR0_STOP);
if(debuglevel > 1)
printk(KERN_DEBUG "ni65_stop_start\n");
if(p->features & INIT_RING_BEFORE_START) {
int i;
#ifdef XMT_VIA_SKB
struct sk_buff *skb_save[TMDNUM];
#endif
unsigned long buffer[TMDNUM];
short blen[TMDNUM];
if(p->xmit_queued) {
while(1) {
if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
break;
p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
if(p->tmdlast == p->tmdnum)
break;
}
}
for(i=0;i<TMDNUM;i++) {
struct tmd *tmdp = p->tmdhead + i;
#ifdef XMT_VIA_SKB
skb_save[i] = p->tmd_skb[i];
#endif
buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
blen[i] = tmdp->blen;
tmdp->u.s.status = 0x0;
}
for(i=0;i<RMDNUM;i++) {
struct rmd *rmdp = p->rmdhead + i;
rmdp->u.s.status = RCV_OWN;
}
p->tmdnum = p->xmit_queued = 0;
writedatareg(CSR0_STRT | csr0);
for(i=0;i<TMDNUM;i++) {
int num = (i + p->tmdlast) & (TMDNUM-1);
p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
p->tmdhead[i].blen = blen[num];
if(p->tmdhead[i].u.s.status & XMIT_OWN) {
p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
p->xmit_queued = 1;
writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
}
#ifdef XMT_VIA_SKB
p->tmd_skb[i] = skb_save[num];
#endif
}
p->rmdnum = p->tmdlast = 0;
if(!p->lock)
if (p->tmdnum || !p->xmit_queued)
netif_wake_queue(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
}
else
writedatareg(CSR0_STRT | csr0);
}
/*
* init lance (write init-values .. init-buffers) (open-helper)
*/
static int ni65_lance_reinit(struct net_device *dev)
{
int i;
struct priv *p = dev->ml_priv;
unsigned long flags;
p->lock = 0;
p->xmit_queued = 0;
flags=claim_dma_lock();
disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
set_dma_mode(dev->dma,DMA_MODE_CASCADE);
enable_dma(dev->dma);
release_dma_lock(flags);
outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
if( (i=readreg(CSR0) ) != 0x4)
{
printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
cards[p->cardno].cardname,(int) i);
flags=claim_dma_lock();
disable_dma(dev->dma);
release_dma_lock(flags);
return 0;
}
p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
for(i=0;i<TMDNUM;i++)
{
struct tmd *tmdp = p->tmdhead + i;
#ifdef XMT_VIA_SKB
if(p->tmd_skb[i]) {
dev_kfree_skb(p->tmd_skb[i]);
p->tmd_skb[i] = NULL;
}
#endif
tmdp->u.buffer = 0x0;
tmdp->u.s.status = XMIT_START | XMIT_END;
tmdp->blen = tmdp->status2 = 0;
}
for(i=0;i<RMDNUM;i++)
{
struct rmd *rmdp = p->rmdhead + i;
#ifdef RCV_VIA_SKB
rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
#else
rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
#endif
rmdp->blen = -(R_BUF_SIZE-8);
rmdp->mlen = 0;
rmdp->u.s.status = RCV_OWN;
}
if(dev->flags & IFF_PROMISC)
ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
ni65_init_lance(p,dev->dev_addr,0xff,0x0);
else
ni65_init_lance(p,dev->dev_addr,0x00,0x00);
/*
* ni65_set_lance_mem() sets L_ADDRREG to CSR0
* NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
*/
if(inw(PORT+L_DATAREG) & CSR0_IDON) {
ni65_set_performance(p);
/* init OK: start lance , enable interrupts */
writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
return 1; /* ->OK */
}
printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
flags=claim_dma_lock();
disable_dma(dev->dma);
release_dma_lock(flags);
return 0; /* ->Error */
}
/*
* interrupt handler
*/
static irqreturn_t ni65_interrupt(int irq, void * dev_id)
{
int csr0 = 0;
struct net_device *dev = dev_id;
struct priv *p;
int bcnt = 32;
p = dev->ml_priv;
spin_lock(&p->ring_lock);
while(--bcnt) {
csr0 = inw(PORT+L_DATAREG);
#if 0
writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
#else
writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
#endif
if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
break;
if(csr0 & CSR0_RINT) /* RECV-int? */
ni65_recv_intr(dev,csr0);
if(csr0 & CSR0_TINT) /* XMIT-int? */
ni65_xmit_intr(dev,csr0);
if(csr0 & CSR0_ERR)
{
if(debuglevel > 1)
printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
if(csr0 & CSR0_BABL)
dev->stats.tx_errors++;
if(csr0 & CSR0_MISS) {
int i;
for(i=0;i<RMDNUM;i++)
printk("%02x ",p->rmdhead[i].u.s.status);
printk("\n");
dev->stats.rx_errors++;
}
if(csr0 & CSR0_MERR) {
if(debuglevel > 1)
printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
ni65_stop_start(dev,p);
}
}
}
#ifdef RCV_PARANOIA_CHECK
{
int j;
for(j=0;j<RMDNUM;j++)
{
int i, num2;
for(i=RMDNUM-1;i>0;i--) {
num2 = (p->rmdnum + i) & (RMDNUM-1);
if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
break;
}
if(i) {
int k, num1;
for(k=0;k<RMDNUM;k++) {
num1 = (p->rmdnum + k) & (RMDNUM-1);
if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
break;
}
if(!k)
break;
if(debuglevel > 0)
{
char buf[256],*buf1;
buf1 = buf;
for(k=0;k<RMDNUM;k++) {
sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
buf1 += 3;
}
*buf1 = 0;
printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
}
p->rmdnum = num1;
ni65_recv_intr(dev,csr0);
if((p->rmdhead[num2].u.s.status & RCV_OWN))
break; /* ok, we are 'in sync' again */
}
else
break;
}
}
#endif
if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
ni65_stop_start(dev,p);
}
else
writedatareg(CSR0_INEA);
spin_unlock(&p->ring_lock);
return IRQ_HANDLED;
}
/*
* We have received an Xmit-Interrupt ..
* send a new packet if necessary
*/
static void ni65_xmit_intr(struct net_device *dev,int csr0)
{
struct priv *p = dev->ml_priv;
while(p->xmit_queued)
{
struct tmd *tmdp = p->tmdhead + p->tmdlast;
int tmdstat = tmdp->u.s.status;
if(tmdstat & XMIT_OWN)
break;
if(tmdstat & XMIT_ERR)
{
#if 0
if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
#endif
/* checking some errors */
if(tmdp->status2 & XMIT_RTRY)
dev->stats.tx_aborted_errors++;
if(tmdp->status2 & XMIT_LCAR)
dev->stats.tx_carrier_errors++;
if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
/* this stops the xmitter */
dev->stats.tx_fifo_errors++;
if(debuglevel > 0)
printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
if(p->features & INIT_RING_BEFORE_START) {
tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
ni65_stop_start(dev,p);
break; /* no more Xmit processing .. */
}
else
ni65_stop_start(dev,p);
}
if(debuglevel > 2)
printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
dev->stats.tx_errors++;
tmdp->status2 = 0;
}
else {
dev->stats.tx_bytes -= (short)(tmdp->blen);
dev->stats.tx_packets++;
}
#ifdef XMT_VIA_SKB
if(p->tmd_skb[p->tmdlast]) {
dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
p->tmd_skb[p->tmdlast] = NULL;
}
#endif
p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
if(p->tmdlast == p->tmdnum)
p->xmit_queued = 0;
}
netif_wake_queue(dev);
}
/*
* We have received a packet
*/
static void ni65_recv_intr(struct net_device *dev,int csr0)
{
struct rmd *rmdp;
int rmdstat,len;
int cnt=0;
struct priv *p = dev->ml_priv;
rmdp = p->rmdhead + p->rmdnum;
while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
{
cnt++;
if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
{
if(!(rmdstat & RCV_ERR)) {
if(rmdstat & RCV_START)
{
dev->stats.rx_length_errors++;
printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
}
}
else {
if(debuglevel > 2)
printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
if(rmdstat & RCV_FRAM)
dev->stats.rx_frame_errors++;
if(rmdstat & RCV_OFLO)
dev->stats.rx_over_errors++;
if(rmdstat & RCV_CRC)
dev->stats.rx_crc_errors++;
if(rmdstat & RCV_BUF_ERR)
dev->stats.rx_fifo_errors++;
}
if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
dev->stats.rx_errors++;
}
else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
{
#ifdef RCV_VIA_SKB
struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
if (skb)
skb_reserve(skb,16);
#else
struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
#endif
if(skb)
{
skb_reserve(skb,2);
#ifdef RCV_VIA_SKB
if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
skb_put(skb,len);
skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
}
else {
struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
skb_put(skb,R_BUF_SIZE);
p->recv_skb[p->rmdnum] = skb;
rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
skb = skb1;
skb_trim(skb,len);
}
#else
skb_put(skb,len);
skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
#endif
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
else
{
printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
dev->stats.rx_dropped++;
}
}
else {
printk(KERN_INFO "%s: received runt packet\n",dev->name);
dev->stats.rx_errors++;
}
rmdp->blen = -(R_BUF_SIZE-8);
rmdp->mlen = 0;
rmdp->u.s.status = RCV_OWN; /* change owner */
p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
rmdp = p->rmdhead + p->rmdnum;
}
}
/*
* kick xmitter ..
*/
static void ni65_timeout(struct net_device *dev)
{
int i;
struct priv *p = dev->ml_priv;
printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
for(i=0;i<TMDNUM;i++)
printk("%02x ",p->tmdhead[i].u.s.status);
printk("\n");
ni65_lance_reinit(dev);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
/*
* Send a packet
*/
static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
struct net_device *dev)
{
struct priv *p = dev->ml_priv;
netif_stop_queue(dev);
if (test_and_set_bit(0, (void*)&p->lock)) {
printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
return NETDEV_TX_BUSY;
}
{
short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
struct tmd *tmdp;
unsigned long flags;
#ifdef XMT_VIA_SKB
if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
#endif
skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
skb->len > T_BUF_SIZE ? T_BUF_SIZE :
skb->len);
if (len > skb->len)
memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
dev_kfree_skb (skb);
spin_lock_irqsave(&p->ring_lock, flags);
tmdp = p->tmdhead + p->tmdnum;
tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
#ifdef XMT_VIA_SKB
}
else {
spin_lock_irqsave(&p->ring_lock, flags);
tmdp = p->tmdhead + p->tmdnum;
tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
p->tmd_skb[p->tmdnum] = skb;
}
#endif
tmdp->blen = -len;
tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
p->xmit_queued = 1;
p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
if(p->tmdnum != p->tmdlast)
netif_wake_queue(dev);
p->lock = 0;
spin_unlock_irqrestore(&p->ring_lock, flags);
}
return NETDEV_TX_OK;
}
static void set_multicast_list(struct net_device *dev)
{
if(!ni65_lance_reinit(dev))
printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
netif_wake_queue(dev);
}
#ifdef MODULE
static struct net_device *dev_ni65;
module_param(irq, int, 0);
module_param(io, int, 0);
module_param(dma, int, 0);
MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
MODULE_PARM_DESC(io, "ni6510 I/O base address");
MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
}
void __exit cleanup_module(void)
{
unregister_netdev(dev_ni65);
cleanup_card(dev_ni65);
free_netdev(dev_ni65);
}
#endif /* MODULE */
MODULE_LICENSE("GPL");
| gpl-2.0 |
profglavcho/tesr1 | arch/arm/mach-msm/board-trout-gpio.c | 9495 | 6251 | /*
* linux/arch/arm/mach-msm/gpio.c
*
* Copyright (C) 2005 HP Labs
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2009 Pavel Machek <pavel@ucw.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include "board-trout.h"
static uint8_t trout_int_mask[2] = {
[0] = 0xff, /* mask all interrupts */
[1] = 0xff,
};
static uint8_t trout_sleep_int_mask[] = {
[0] = 0xff,
[1] = 0xff,
};
struct msm_gpio_chip {
struct gpio_chip chip;
void __iomem *reg; /* Base of register bank */
u8 shadow;
};
#define to_msm_gpio_chip(c) container_of(c, struct msm_gpio_chip, chip)
static int msm_gpiolib_get(struct gpio_chip *chip, unsigned offset)
{
struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip);
unsigned mask = 1 << offset;
return !!(readb(msm_gpio->reg) & mask);
}
static void msm_gpiolib_set(struct gpio_chip *chip, unsigned offset, int val)
{
struct msm_gpio_chip *msm_gpio = to_msm_gpio_chip(chip);
unsigned mask = 1 << offset;
if (val)
msm_gpio->shadow |= mask;
else
msm_gpio->shadow &= ~mask;
writeb(msm_gpio->shadow, msm_gpio->reg);
}
static int msm_gpiolib_direction_input(struct gpio_chip *chip,
unsigned offset)
{
msm_gpiolib_set(chip, offset, 0);
return 0;
}
static int msm_gpiolib_direction_output(struct gpio_chip *chip,
unsigned offset, int val)
{
msm_gpiolib_set(chip, offset, val);
return 0;
}
static int trout_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
return TROUT_GPIO_TO_INT(offset + chip->base);
}
#define TROUT_GPIO_BANK(name, reg_num, base_gpio, shadow_val) \
{ \
.chip = { \
.label = name, \
.direction_input = msm_gpiolib_direction_input,\
.direction_output = msm_gpiolib_direction_output, \
.get = msm_gpiolib_get, \
.set = msm_gpiolib_set, \
.to_irq = trout_gpio_to_irq, \
.base = base_gpio, \
.ngpio = 8, \
}, \
.reg = (void *) reg_num + TROUT_CPLD_BASE, \
.shadow = shadow_val, \
}
static struct msm_gpio_chip msm_gpio_banks[] = {
#if defined(CONFIG_MSM_DEBUG_UART1)
/* H2W pins <-> UART1 */
TROUT_GPIO_BANK("MISC2", 0x00, TROUT_GPIO_MISC2_BASE, 0x40),
#else
/* H2W pins <-> UART3, Bluetooth <-> UART1 */
TROUT_GPIO_BANK("MISC2", 0x00, TROUT_GPIO_MISC2_BASE, 0x80),
#endif
/* I2C pull */
TROUT_GPIO_BANK("MISC3", 0x02, TROUT_GPIO_MISC3_BASE, 0x04),
TROUT_GPIO_BANK("MISC4", 0x04, TROUT_GPIO_MISC4_BASE, 0),
/* mmdi 32k en */
TROUT_GPIO_BANK("MISC5", 0x06, TROUT_GPIO_MISC5_BASE, 0x04),
TROUT_GPIO_BANK("INT2", 0x08, TROUT_GPIO_INT2_BASE, 0),
TROUT_GPIO_BANK("MISC1", 0x0a, TROUT_GPIO_MISC1_BASE, 0),
TROUT_GPIO_BANK("VIRTUAL", 0x12, TROUT_GPIO_VIRTUAL_BASE, 0),
};
static void trout_gpio_irq_ack(struct irq_data *d)
{
int bank = TROUT_INT_TO_BANK(d->irq);
uint8_t mask = TROUT_INT_TO_MASK(d->irq);
int reg = TROUT_BANK_TO_STAT_REG(bank);
/*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", d->irq);*/
writeb(mask, TROUT_CPLD_BASE + reg);
}
static void trout_gpio_irq_mask(struct irq_data *d)
{
unsigned long flags;
uint8_t reg_val;
int bank = TROUT_INT_TO_BANK(d->irq);
uint8_t mask = TROUT_INT_TO_MASK(d->irq);
int reg = TROUT_BANK_TO_MASK_REG(bank);
local_irq_save(flags);
reg_val = trout_int_mask[bank] |= mask;
/*printk(KERN_INFO "trout_gpio_irq_mask irq %d => %d:%02x\n",
d->irq, bank, reg_val);*/
writeb(reg_val, TROUT_CPLD_BASE + reg);
local_irq_restore(flags);
}
static void trout_gpio_irq_unmask(struct irq_data *d)
{
unsigned long flags;
uint8_t reg_val;
int bank = TROUT_INT_TO_BANK(d->irq);
uint8_t mask = TROUT_INT_TO_MASK(d->irq);
int reg = TROUT_BANK_TO_MASK_REG(bank);
local_irq_save(flags);
reg_val = trout_int_mask[bank] &= ~mask;
/*printk(KERN_INFO "trout_gpio_irq_unmask irq %d => %d:%02x\n",
d->irq, bank, reg_val);*/
writeb(reg_val, TROUT_CPLD_BASE + reg);
local_irq_restore(flags);
}
int trout_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
{
unsigned long flags;
int bank = TROUT_INT_TO_BANK(d->irq);
uint8_t mask = TROUT_INT_TO_MASK(d->irq);
local_irq_save(flags);
if(on)
trout_sleep_int_mask[bank] &= ~mask;
else
trout_sleep_int_mask[bank] |= mask;
local_irq_restore(flags);
return 0;
}
static void trout_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
int j, m;
unsigned v;
int bank;
int stat_reg;
int int_base = TROUT_INT_START;
uint8_t int_mask;
for (bank = 0; bank < 2; bank++) {
stat_reg = TROUT_BANK_TO_STAT_REG(bank);
v = readb(TROUT_CPLD_BASE + stat_reg);
int_mask = trout_int_mask[bank];
if (v & int_mask) {
writeb(v & int_mask, TROUT_CPLD_BASE + stat_reg);
printk(KERN_ERR "trout_gpio_irq_handler: got masked "
"interrupt: %d:%02x\n", bank, v & int_mask);
}
v &= ~int_mask;
while (v) {
m = v & -v;
j = fls(m) - 1;
/*printk(KERN_INFO "msm_gpio_irq_handler %d:%02x %02x b"
"it %d irq %d\n", bank, v, m, j, int_base + j);*/
v &= ~m;
generic_handle_irq(int_base + j);
}
int_base += TROUT_INT_BANK0_COUNT;
}
desc->irq_data.chip->irq_ack(&desc->irq_data);
}
static struct irq_chip trout_gpio_irq_chip = {
.name = "troutgpio",
.irq_ack = trout_gpio_irq_ack,
.irq_mask = trout_gpio_irq_mask,
.irq_unmask = trout_gpio_irq_unmask,
.irq_set_wake = trout_gpio_irq_set_wake,
};
/*
* Called from the processor-specific init to enable GPIO pin support.
*/
int __init trout_init_gpio(void)
{
int i;
for(i = TROUT_INT_START; i <= TROUT_INT_END; i++) {
irq_set_chip_and_handler(i, &trout_gpio_irq_chip,
handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
}
for (i = 0; i < ARRAY_SIZE(msm_gpio_banks); i++)
gpiochip_add(&msm_gpio_banks[i].chip);
irq_set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH);
irq_set_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler);
irq_set_irq_wake(MSM_GPIO_TO_INT(17), 1);
return 0;
}
postcore_initcall(trout_init_gpio);
| gpl-2.0 |
scue/android_kernel_lenovo_stuttgart-base | arch/mips/sgi-ip22/ip22-hpc.c | 11799 | 1644 | /*
* ip22-hpc.c: Routines for generic manipulation of the HPC controllers.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998 Ralf Baechle
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/sgi/hpc3.h>
#include <asm/sgi/ioc.h>
#include <asm/sgi/ip22.h>
struct hpc3_regs *hpc3c0, *hpc3c1;
EXPORT_SYMBOL(hpc3c0);
EXPORT_SYMBOL(hpc3c1);
struct sgioc_regs *sgioc;
EXPORT_SYMBOL(sgioc);
/* We need software copies of these because they are write only. */
u8 sgi_ioc_reset, sgi_ioc_write;
extern char *system_type;
void __init sgihpc_init(void)
{
/* ioremap can't fail */
hpc3c0 = (struct hpc3_regs *)
ioremap(HPC3_CHIP0_BASE, sizeof(struct hpc3_regs));
hpc3c1 = (struct hpc3_regs *)
ioremap(HPC3_CHIP1_BASE, sizeof(struct hpc3_regs));
/* IOC lives in PBUS PIO channel 6 */
sgioc = (struct sgioc_regs *)hpc3c0->pbus_extregs[6];
hpc3c0->pbus_piocfg[6][0] |= HPC3_PIOCFG_DS16;
if (ip22_is_fullhouse()) {
/* Full House comes with INT2 which lives in PBUS PIO
* channel 4 */
sgint = (struct sgint_regs *)hpc3c0->pbus_extregs[4];
system_type = "SGI Indigo2";
} else {
/* Guiness comes with INT3 which is part of IOC */
sgint = &sgioc->int3;
system_type = "SGI Indy";
}
sgi_ioc_reset = (SGIOC_RESET_PPORT | SGIOC_RESET_KBDMOUSE |
SGIOC_RESET_EISA | SGIOC_RESET_ISDN |
SGIOC_RESET_LC0OFF);
sgi_ioc_write = (SGIOC_WRITE_EASEL | SGIOC_WRITE_NTHRESH |
SGIOC_WRITE_TPSPEED | SGIOC_WRITE_EPSEL |
SGIOC_WRITE_U0AMODE | SGIOC_WRITE_U1AMODE);
sgioc->reset = sgi_ioc_reset;
sgioc->write = sgi_ioc_write;
}
| gpl-2.0 |
spezi77/android_kernel_google_mako | drivers/infiniband/hw/amso1100/c2_mm.c | 13335 | 8887 | /*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include "c2.h"
#include "c2_vq.h"
#define PBL_VIRT 1
#define PBL_PHYS 2
/*
* Send all the PBL messages to convey the remainder of the PBL
* Wait for the adapter's reply on the last one.
* This is indicated by setting the MEM_PBL_COMPLETE in the flags.
*
* NOTE: vq_req is _not_ freed by this function. The VQ Host
* Reply buffer _is_ freed by this function.
*/
static int
send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
unsigned long va, u32 pbl_depth,
struct c2_vq_req *vq_req, int pbl_type)
{
u32 pbe_count; /* amt that fits in a PBL msg */
u32 count; /* amt in this PBL MSG. */
struct c2wr_nsmr_pbl_req *wr; /* PBL WR ptr */
struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */
int err, pbl_virt, pbl_index, i;
switch (pbl_type) {
case PBL_VIRT:
pbl_virt = 1;
break;
case PBL_PHYS:
pbl_virt = 0;
break;
default:
return -EINVAL;
break;
}
pbe_count = (c2dev->req_vq.msg_size -
sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
if (!wr) {
return -ENOMEM;
}
c2_wr_set_id(wr, CCWR_NSMR_PBL);
/*
* Only the last PBL message will generate a reply from the verbs,
* so we set the context to 0 indicating there is no kernel verbs
* handler blocked awaiting this reply.
*/
wr->hdr.context = 0;
wr->rnic_handle = c2dev->adapter_handle;
wr->stag_index = stag_index; /* already swapped */
wr->flags = 0;
pbl_index = 0;
while (pbl_depth) {
count = min(pbe_count, pbl_depth);
wr->addrs_length = cpu_to_be32(count);
/*
* If this is the last message, then reference the
* vq request struct cuz we're gonna wait for a reply.
* also make this PBL msg as the last one.
*/
if (count == pbl_depth) {
/*
* reference the request struct. dereferenced in the
* int handler.
*/
vq_req_get(c2dev, vq_req);
wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
/*
* This is the last PBL message.
* Set the context to our VQ Request Object so we can
* wait for the reply.
*/
wr->hdr.context = (unsigned long) vq_req;
}
/*
* If pbl_virt is set then va is a virtual address
* that describes a virtually contiguous memory
* allocation. The wr needs the start of each virtual page
* to be converted to the corresponding physical address
* of the page. If pbl_virt is not set then va is an array
* of physical addresses and there is no conversion to do.
* Just fill in the wr with what is in the array.
*/
for (i = 0; i < count; i++) {
if (pbl_virt) {
va += PAGE_SIZE;
} else {
wr->paddrs[i] =
cpu_to_be64(((u64 *)va)[pbl_index + i]);
}
}
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) wr);
if (err) {
if (count <= pbe_count) {
vq_req_put(c2dev, vq_req);
}
goto bail0;
}
pbl_depth -= count;
pbl_index += count;
}
/*
* Now wait for the reply...
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail0;
}
/*
* Process reply
*/
reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
vq_repbuf_free(c2dev, reply);
bail0:
kfree(wr);
return err;
}
#define C2_PBL_MAX_DEPTH 131072
int
c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
int page_size, int pbl_depth, u32 length,
u32 offset, u64 *va, enum c2_acf acf,
struct c2_mr *mr)
{
struct c2_vq_req *vq_req;
struct c2wr_nsmr_register_req *wr;
struct c2wr_nsmr_register_rep *reply;
u16 flags;
int i, pbe_count, count;
int err;
if (!va || !length || !addr_list || !pbl_depth)
return -EINTR;
/*
* Verify PBL depth is within rnic max
*/
if (pbl_depth > C2_PBL_MAX_DEPTH) {
return -EINTR;
}
/*
* allocate verbs request object
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req)
return -ENOMEM;
wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
if (!wr) {
err = -ENOMEM;
goto bail0;
}
/*
* build the WR
*/
c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
wr->hdr.context = (unsigned long) vq_req;
wr->rnic_handle = c2dev->adapter_handle;
flags = (acf | MEM_VA_BASED | MEM_REMOTE);
/*
* compute how many pbes can fit in the message
*/
pbe_count = (c2dev->req_vq.msg_size -
sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
if (pbl_depth <= pbe_count) {
flags |= MEM_PBL_COMPLETE;
}
wr->flags = cpu_to_be16(flags);
wr->stag_key = 0; //stag_key;
wr->va = cpu_to_be64(*va);
wr->pd_id = mr->pd->pd_id;
wr->pbe_size = cpu_to_be32(page_size);
wr->length = cpu_to_be32(length);
wr->pbl_depth = cpu_to_be32(pbl_depth);
wr->fbo = cpu_to_be32(offset);
count = min(pbl_depth, pbe_count);
wr->addrs_length = cpu_to_be32(count);
/*
* fill out the PBL for this message
*/
for (i = 0; i < count; i++) {
wr->paddrs[i] = cpu_to_be64(addr_list[i]);
}
/*
* regerence the request struct
*/
vq_req_get(c2dev, vq_req);
/*
* send the WR to the adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail1;
}
/*
* wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail1;
}
/*
* process reply
*/
reply =
(struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
if (!reply) {
err = -ENOMEM;
goto bail1;
}
if ((err = c2_errno(reply))) {
goto bail2;
}
//*p_pb_entries = be32_to_cpu(reply->pbl_depth);
mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
vq_repbuf_free(c2dev, reply);
/*
* if there are still more PBEs we need to send them to
* the adapter and wait for a reply on the final one.
* reuse vq_req for this purpose.
*/
pbl_depth -= count;
if (pbl_depth) {
vq_req->reply_msg = (unsigned long) NULL;
atomic_set(&vq_req->reply_ready, 0);
err = send_pbl_messages(c2dev,
cpu_to_be32(mr->ibmr.lkey),
(unsigned long) &addr_list[i],
pbl_depth, vq_req, PBL_PHYS);
if (err) {
goto bail1;
}
}
vq_req_free(c2dev, vq_req);
kfree(wr);
return err;
bail2:
vq_repbuf_free(c2dev, reply);
bail1:
kfree(wr);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
{
struct c2_vq_req *vq_req; /* verbs request object */
struct c2wr_stag_dealloc_req wr; /* work request */
struct c2wr_stag_dealloc_rep *reply; /* WR reply */
int err;
/*
* allocate verbs request object
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req) {
return -ENOMEM;
}
/*
* Build the WR
*/
c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
wr.hdr.context = (u64) (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.stag_index = cpu_to_be32(stag_index);
/*
* reference the request struct. dereferenced in the int handler.
*/
vq_req_get(c2dev, vq_req);
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
/*
* Wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail0;
}
/*
* Process reply
*/
reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
| gpl-2.0 |
hgl888/linux | drivers/staging/fbtft/fb_watterott.c | 24 | 7504 | /*
* FB driver for the Watterott LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_watterott"
#define WIDTH 320
#define HEIGHT 240
#define FPS 5
#define TXBUFLEN 1024
#define DEFAULT_BRIGHTNESS 50
#define CMD_VERSION 0x01
#define CMD_LCD_LED 0x10
#define CMD_LCD_RESET 0x11
#define CMD_LCD_ORIENTATION 0x20
#define CMD_LCD_DRAWIMAGE 0x27
#define COLOR_RGB323 8
#define COLOR_RGB332 9
#define COLOR_RGB233 10
#define COLOR_RGB565 16
static short mode = 565;
module_param(mode, short, 0000);
MODULE_PARM_DESC(mode, "RGB color transfer mode: 332, 565 (default)");
static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
{
va_list args;
int i, ret;
u8 *buf = par->buf;
va_start(args, len);
for (i = 0; i < len; i++)
*buf++ = (u8)va_arg(args, unsigned int);
va_end(args);
fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
par->info->device, u8, par->buf, len, "%s: ", __func__);
ret = par->fbtftops.write(par, par->buf, len);
if (ret < 0) {
dev_err(par->info->device,
"write() failed and returned %d\n", ret);
return;
}
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
unsigned int start_line, end_line;
u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
u16 *pos = par->txbuf.buf + 1;
u16 *buf16 = par->txbuf.buf + 10;
int i, j;
int ret = 0;
start_line = offset / par->info->fix.line_length;
end_line = start_line + (len / par->info->fix.line_length) - 1;
/* Set command header. pos: x, y, w, h */
((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
pos[0] = 0;
pos[2] = cpu_to_be16(par->info->var.xres);
pos[3] = cpu_to_be16(1);
((u8 *)par->txbuf.buf)[9] = COLOR_RGB565;
for (i = start_line; i <= end_line; i++) {
pos[1] = cpu_to_be16(i);
for (j = 0; j < par->info->var.xres; j++)
buf16[j] = cpu_to_be16(*vmem16++);
ret = par->fbtftops.write(par,
par->txbuf.buf, 10 + par->info->fix.line_length);
if (ret < 0)
return ret;
udelay(300);
}
return 0;
}
#define RGB565toRGB323(c) (((c&0xE000)>>8) | ((c&0600)>>6) | ((c&0x001C)>>2))
#define RGB565toRGB332(c) (((c&0xE000)>>8) | ((c&0700)>>6) | ((c&0x0018)>>3))
#define RGB565toRGB233(c) (((c&0xC000)>>8) | ((c&0700)>>5) | ((c&0x001C)>>2))
static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
{
unsigned int start_line, end_line;
u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
u16 *pos = par->txbuf.buf + 1;
u8 *buf8 = par->txbuf.buf + 10;
int i, j;
int ret = 0;
start_line = offset / par->info->fix.line_length;
end_line = start_line + (len / par->info->fix.line_length) - 1;
/* Set command header. pos: x, y, w, h */
((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
pos[0] = 0;
pos[2] = cpu_to_be16(par->info->var.xres);
pos[3] = cpu_to_be16(1);
((u8 *)par->txbuf.buf)[9] = COLOR_RGB332;
for (i = start_line; i <= end_line; i++) {
pos[1] = cpu_to_be16(i);
for (j = 0; j < par->info->var.xres; j++) {
buf8[j] = RGB565toRGB332(*vmem16);
vmem16++;
}
ret = par->fbtftops.write(par,
par->txbuf.buf, 10 + par->info->var.xres);
if (ret < 0)
return ret;
udelay(700);
}
return 0;
}
static unsigned int firmware_version(struct fbtft_par *par)
{
u8 rxbuf[4] = {0, };
write_reg(par, CMD_VERSION);
par->fbtftops.read(par, rxbuf, 4);
if (rxbuf[1] != '.')
return 0;
return (rxbuf[0] - '0') << 8 | (rxbuf[2] - '0') << 4 | (rxbuf[3] - '0');
}
static int init_display(struct fbtft_par *par)
{
int ret;
unsigned int version;
u8 save_mode;
/* enable SPI interface by having CS and MOSI low during reset */
save_mode = par->spi->mode;
par->spi->mode |= SPI_CS_HIGH;
ret = spi_setup(par->spi); /* set CS inactive low */
if (ret) {
dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
return ret;
}
write_reg(par, 0x00); /* make sure mode is set */
mdelay(50);
par->fbtftops.reset(par);
mdelay(1000);
par->spi->mode = save_mode;
ret = spi_setup(par->spi);
if (ret) {
dev_err(par->info->device, "Could not restore SPI mode\n");
return ret;
}
write_reg(par, 0x00);
version = firmware_version(par);
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Firmware version: %x.%02x\n",
version >> 8, version & 0xFF);
if (mode == 332)
par->fbtftops.write_vmem = write_vmem_8bit;
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* not used on this controller */
}
static int set_var(struct fbtft_par *par)
{
u8 rotate;
/* this controller rotates clock wise */
switch (par->info->var.rotate) {
case 90:
rotate = 27;
break;
case 180:
rotate = 18;
break;
case 270:
rotate = 9;
break;
default:
rotate = 0;
}
write_reg(par, CMD_LCD_ORIENTATION, rotate);
return 0;
}
static int verify_gpios(struct fbtft_par *par)
{
if (par->gpio.reset < 0) {
dev_err(par->info->device, "Missing 'reset' gpio. Aborting.\n");
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_FB_BACKLIGHT
static int backlight_chip_update_status(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
int brightness = bd->props.brightness;
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
"%s: brightness=%d, power=%d, fb_blank=%d\n",
__func__, bd->props.brightness, bd->props.power,
bd->props.fb_blank);
if (bd->props.power != FB_BLANK_UNBLANK)
brightness = 0;
if (bd->props.fb_blank != FB_BLANK_UNBLANK)
brightness = 0;
write_reg(par, CMD_LCD_LED, brightness);
return 0;
}
static const struct backlight_ops bl_ops = {
.update_status = backlight_chip_update_status,
};
static void register_chip_backlight(struct fbtft_par *par)
{
struct backlight_device *bd;
struct backlight_properties bl_props = { 0, };
bl_props.type = BACKLIGHT_RAW;
bl_props.power = FB_BLANK_POWERDOWN;
bl_props.max_brightness = 100;
bl_props.brightness = DEFAULT_BRIGHTNESS;
bd = backlight_device_register(dev_driver_string(par->info->device),
par->info->device, par, &bl_ops, &bl_props);
if (IS_ERR(bd)) {
dev_err(par->info->device,
"cannot register backlight device (%ld)\n",
PTR_ERR(bd));
return;
}
par->info->bl_dev = bd;
if (!par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
}
#else
#define register_chip_backlight NULL
#endif
static struct fbtft_display display = {
.regwidth = 8,
.buswidth = 8,
.width = WIDTH,
.height = HEIGHT,
.fps = FPS,
.txbuflen = TXBUFLEN,
.fbtftops = {
.write_register = write_reg8_bus8,
.write_vmem = write_vmem,
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
.verify_gpios = verify_gpios,
.register_backlight = register_chip_backlight,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "watterott,openlcd", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_DESCRIPTION("FB driver for the Watterott LCD Controller");
MODULE_AUTHOR("Noralf Tronnes");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Glurmo/obs-studio | libobs/obs-cocoa.c | 24 | 40081 | /******************************************************************************
Copyright (C) 2013 by Ruwen Hahn <palana@stunned.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
#include "util/platform.h"
#include "util/dstr.h"
#include "obs.h"
#include "obs-internal.h"
#include <unistd.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#include <objc/objc.h>
#include <Carbon/Carbon.h>
#include <IOKit/hid/IOHIDDevice.h>
#include <IOKit/hid/IOHIDManager.h>
const char *get_module_extension(void)
{
return ".so";
}
static const char *module_bin[] = {
"../obs-plugins",
OBS_INSTALL_PREFIX "obs-plugins",
};
static const char *module_data[] = {
"../data/obs-plugins/%module%",
OBS_INSTALL_DATA_PATH "obs-plugins/%module%",
};
static const int module_patterns_size =
sizeof(module_bin)/sizeof(module_bin[0]);
void add_default_module_paths(void)
{
for (int i = 0; i < module_patterns_size; i++)
obs_add_module_path(module_bin[i], module_data[i]);
}
char *find_libobs_data_file(const char *file)
{
struct dstr path;
dstr_init_copy(&path, OBS_INSTALL_DATA_PATH "/libobs/");
dstr_cat(&path, file);
return path.array;
}
static void log_processor_name(void)
{
char *name = NULL;
size_t size;
int ret;
ret = sysctlbyname("machdep.cpu.brand_string", NULL, &size, NULL, 0);
if (ret != 0)
return;
name = malloc(size);
ret = sysctlbyname("machdep.cpu.brand_string", name, &size, NULL, 0);
if (ret == 0)
blog(LOG_INFO, "CPU Name: %s", name);
free(name);
}
static void log_processor_speed(void)
{
size_t size;
long long freq;
int ret;
size = sizeof(freq);
ret = sysctlbyname("hw.cpufrequency", &freq, &size, NULL, 0);
if (ret == 0)
blog(LOG_INFO, "CPU Speed: %lldMHz", freq / 1000000);
}
static void log_processor_cores(void)
{
size_t size;
int physical_cores = 0, logical_cores = 0;
int ret;
size = sizeof(physical_cores);
ret = sysctlbyname("machdep.cpu.core_count", &physical_cores,
&size, NULL, 0);
if (ret != 0)
return;
ret = sysctlbyname("machdep.cpu.thread_count", &logical_cores,
&size, NULL, 0);
if (ret != 0)
return;
blog(LOG_INFO, "Physical Cores: %d, Logical Cores: %d",
physical_cores, logical_cores);
}
static void log_available_memory(void)
{
size_t size;
long long memory_available;
int ret;
size = sizeof(memory_available);
ret = sysctlbyname("hw.memsize", &memory_available, &size, NULL, 0);
if (ret == 0)
blog(LOG_INFO, "Physical Memory: %lldMB Total",
memory_available / 1024 / 1024);
}
static void log_os_name(id pi, SEL UTF8String)
{
unsigned long os_id = (unsigned long)objc_msgSend(pi,
sel_registerName("operatingSystem"));
id os = objc_msgSend(pi,
sel_registerName("operatingSystemName"));
const char *name = (const char*)objc_msgSend(os, UTF8String);
if (os_id == 5 /*NSMACHOperatingSystem*/) {
blog(LOG_INFO, "OS Name: Mac OS X (%s)", name);
return;
}
blog(LOG_INFO, "OS Name: %s", name ? name : "Unknown");
}
static void log_os_version(id pi, SEL UTF8String)
{
id vs = objc_msgSend(pi,
sel_registerName("operatingSystemVersionString"));
const char *version = (const char*)objc_msgSend(vs, UTF8String);
blog(LOG_INFO, "OS Version: %s", version ? version : "Unknown");
}
static void log_os(void)
{
Class NSProcessInfo = objc_getClass("NSProcessInfo");
id pi = objc_msgSend((id)NSProcessInfo,
sel_registerName("processInfo"));
SEL UTF8String = sel_registerName("UTF8String");
log_os_name(pi, UTF8String);
log_os_version(pi, UTF8String);
}
static void log_kernel_version(void)
{
char kernel_version[1024];
size_t size = sizeof(kernel_version);
int ret;
ret = sysctlbyname("kern.osrelease", kernel_version, &size,
NULL, 0);
if (ret == 0)
blog(LOG_INFO, "Kernel Version: %s", kernel_version);
}
void log_system_info(void)
{
log_processor_name();
log_processor_speed();
log_processor_cores();
log_available_memory();
log_os();
log_kernel_version();
}
static bool dstr_from_cfstring(struct dstr *str, CFStringRef ref)
{
CFIndex length = CFStringGetLength(ref);
CFIndex max_size = CFStringGetMaximumSizeForEncoding(length,
kCFStringEncodingUTF8);
dstr_reserve(str, max_size);
if (!CFStringGetCString(ref, str->array, max_size,
kCFStringEncodingUTF8))
return false;
str->len = strlen(str->array);
return true;
}
struct obs_hotkeys_platform {
volatile long refs;
TISInputSourceRef tis;
CFDataRef layout_data;
UCKeyboardLayout *layout;
IOHIDManagerRef manager;
DARRAY(IOHIDElementRef) keys[OBS_KEY_LAST_VALUE];
};
static void hotkeys_retain(struct obs_hotkeys_platform *plat)
{
os_atomic_inc_long(&plat->refs);
}
static inline void free_hotkeys_platform(obs_hotkeys_platform_t *plat);
static void hotkeys_release(struct obs_hotkeys_platform *plat)
{
if (os_atomic_dec_long(&plat->refs) == -1)
free_hotkeys_platform(plat);
}
#define INVALID_KEY 0xff
int obs_key_to_virtual_key(obs_key_t code)
{
switch (code) {
case OBS_KEY_A: return kVK_ANSI_A;
case OBS_KEY_B: return kVK_ANSI_B;
case OBS_KEY_C: return kVK_ANSI_C;
case OBS_KEY_D: return kVK_ANSI_D;
case OBS_KEY_E: return kVK_ANSI_E;
case OBS_KEY_F: return kVK_ANSI_F;
case OBS_KEY_G: return kVK_ANSI_G;
case OBS_KEY_H: return kVK_ANSI_H;
case OBS_KEY_I: return kVK_ANSI_I;
case OBS_KEY_J: return kVK_ANSI_J;
case OBS_KEY_K: return kVK_ANSI_K;
case OBS_KEY_L: return kVK_ANSI_L;
case OBS_KEY_M: return kVK_ANSI_M;
case OBS_KEY_N: return kVK_ANSI_N;
case OBS_KEY_O: return kVK_ANSI_O;
case OBS_KEY_P: return kVK_ANSI_P;
case OBS_KEY_Q: return kVK_ANSI_Q;
case OBS_KEY_R: return kVK_ANSI_R;
case OBS_KEY_S: return kVK_ANSI_S;
case OBS_KEY_T: return kVK_ANSI_T;
case OBS_KEY_U: return kVK_ANSI_U;
case OBS_KEY_V: return kVK_ANSI_V;
case OBS_KEY_W: return kVK_ANSI_W;
case OBS_KEY_X: return kVK_ANSI_X;
case OBS_KEY_Y: return kVK_ANSI_Y;
case OBS_KEY_Z: return kVK_ANSI_Z;
case OBS_KEY_1: return kVK_ANSI_1;
case OBS_KEY_2: return kVK_ANSI_2;
case OBS_KEY_3: return kVK_ANSI_3;
case OBS_KEY_4: return kVK_ANSI_4;
case OBS_KEY_5: return kVK_ANSI_5;
case OBS_KEY_6: return kVK_ANSI_6;
case OBS_KEY_7: return kVK_ANSI_7;
case OBS_KEY_8: return kVK_ANSI_8;
case OBS_KEY_9: return kVK_ANSI_9;
case OBS_KEY_0: return kVK_ANSI_0;
case OBS_KEY_RETURN: return kVK_Return;
case OBS_KEY_ESCAPE: return kVK_Escape;
case OBS_KEY_BACKSPACE: return kVK_Delete;
case OBS_KEY_TAB: return kVK_Tab;
case OBS_KEY_SPACE: return kVK_Space;
case OBS_KEY_MINUS: return kVK_ANSI_Minus;
case OBS_KEY_EQUAL: return kVK_ANSI_Equal;
case OBS_KEY_BRACKETLEFT: return kVK_ANSI_LeftBracket;
case OBS_KEY_BRACKETRIGHT: return kVK_ANSI_RightBracket;
case OBS_KEY_BACKSLASH: return kVK_ANSI_Backslash;
case OBS_KEY_SEMICOLON: return kVK_ANSI_Semicolon;
case OBS_KEY_QUOTE: return kVK_ANSI_Quote;
case OBS_KEY_DEAD_GRAVE: return kVK_ANSI_Grave;
case OBS_KEY_COMMA: return kVK_ANSI_Comma;
case OBS_KEY_PERIOD: return kVK_ANSI_Period;
case OBS_KEY_SLASH: return kVK_ANSI_Slash;
case OBS_KEY_CAPSLOCK: return kVK_CapsLock;
case OBS_KEY_SECTION: return kVK_ISO_Section;
case OBS_KEY_F1: return kVK_F1;
case OBS_KEY_F2: return kVK_F2;
case OBS_KEY_F3: return kVK_F3;
case OBS_KEY_F4: return kVK_F4;
case OBS_KEY_F5: return kVK_F5;
case OBS_KEY_F6: return kVK_F6;
case OBS_KEY_F7: return kVK_F7;
case OBS_KEY_F8: return kVK_F8;
case OBS_KEY_F9: return kVK_F9;
case OBS_KEY_F10: return kVK_F10;
case OBS_KEY_F11: return kVK_F11;
case OBS_KEY_F12: return kVK_F12;
case OBS_KEY_HELP: return kVK_Help;
case OBS_KEY_HOME: return kVK_Home;
case OBS_KEY_PAGEUP: return kVK_PageUp;
case OBS_KEY_DELETE: return kVK_ForwardDelete;
case OBS_KEY_END: return kVK_End;
case OBS_KEY_PAGEDOWN: return kVK_PageDown;
case OBS_KEY_RIGHT: return kVK_RightArrow;
case OBS_KEY_LEFT: return kVK_LeftArrow;
case OBS_KEY_DOWN: return kVK_DownArrow;
case OBS_KEY_UP: return kVK_UpArrow;
case OBS_KEY_CLEAR: return kVK_ANSI_KeypadClear;
case OBS_KEY_NUMSLASH: return kVK_ANSI_KeypadDivide;
case OBS_KEY_NUMASTERISK: return kVK_ANSI_KeypadMultiply;
case OBS_KEY_NUMMINUS: return kVK_ANSI_KeypadMinus;
case OBS_KEY_NUMPLUS: return kVK_ANSI_KeypadPlus;
case OBS_KEY_ENTER: return kVK_ANSI_KeypadEnter;
case OBS_KEY_NUM1: return kVK_ANSI_Keypad1;
case OBS_KEY_NUM2: return kVK_ANSI_Keypad2;
case OBS_KEY_NUM3: return kVK_ANSI_Keypad3;
case OBS_KEY_NUM4: return kVK_ANSI_Keypad4;
case OBS_KEY_NUM5: return kVK_ANSI_Keypad5;
case OBS_KEY_NUM6: return kVK_ANSI_Keypad6;
case OBS_KEY_NUM7: return kVK_ANSI_Keypad7;
case OBS_KEY_NUM8: return kVK_ANSI_Keypad8;
case OBS_KEY_NUM9: return kVK_ANSI_Keypad9;
case OBS_KEY_NUM0: return kVK_ANSI_Keypad0;
case OBS_KEY_NUMPERIOD: return kVK_ANSI_KeypadDecimal;
case OBS_KEY_NUMEQUAL: return kVK_ANSI_KeypadEquals;
case OBS_KEY_F13: return kVK_F13;
case OBS_KEY_F14: return kVK_F14;
case OBS_KEY_F15: return kVK_F15;
case OBS_KEY_F16: return kVK_F16;
case OBS_KEY_F17: return kVK_F17;
case OBS_KEY_F18: return kVK_F18;
case OBS_KEY_F19: return kVK_F19;
case OBS_KEY_F20: return kVK_F20;
case OBS_KEY_CONTROL: return kVK_Control;
case OBS_KEY_SHIFT: return kVK_Shift;
case OBS_KEY_ALT: return kVK_Option;
case OBS_KEY_META: return kVK_Command;
//case OBS_KEY_CONTROL: return kVK_RightControl;
//case OBS_KEY_SHIFT: return kVK_RightShift;
//case OBS_KEY_ALT: return kVK_RightOption;
//case OBS_KEY_META: return 0x36;
case OBS_KEY_NONE:
case OBS_KEY_LAST_VALUE:
default:
break;
}
return INVALID_KEY;
}
static bool localized_key_to_str(obs_key_t key, struct dstr *str)
{
#define MAP_KEY(k, s) case k: \
dstr_copy(str, obs_get_hotkey_translation(k, s)); \
return true
#define MAP_BUTTON(i) case OBS_KEY_MOUSE ## i: \
dstr_copy(str, obs_get_hotkey_translation(key, "Mouse " #i)); \
return true
switch (key) {
MAP_KEY(OBS_KEY_SPACE, "Space");
MAP_KEY(OBS_KEY_NUMEQUAL, "= (Keypad)");
MAP_KEY(OBS_KEY_NUMASTERISK, "* (Keypad)");
MAP_KEY(OBS_KEY_NUMPLUS, "+ (Keypad)");
MAP_KEY(OBS_KEY_NUMMINUS, "- (Keypad)");
MAP_KEY(OBS_KEY_NUMPERIOD, ". (Keypad)");
MAP_KEY(OBS_KEY_NUMSLASH, "/ (Keypad)");
MAP_KEY(OBS_KEY_NUM0, "0 (Keypad)");
MAP_KEY(OBS_KEY_NUM1, "1 (Keypad)");
MAP_KEY(OBS_KEY_NUM2, "2 (Keypad)");
MAP_KEY(OBS_KEY_NUM3, "3 (Keypad)");
MAP_KEY(OBS_KEY_NUM4, "4 (Keypad)");
MAP_KEY(OBS_KEY_NUM5, "5 (Keypad)");
MAP_KEY(OBS_KEY_NUM6, "6 (Keypad)");
MAP_KEY(OBS_KEY_NUM7, "7 (Keypad)");
MAP_KEY(OBS_KEY_NUM8, "8 (Keypad)");
MAP_KEY(OBS_KEY_NUM9, "9 (Keypad)");
MAP_BUTTON(1);
MAP_BUTTON(2);
MAP_BUTTON(3);
MAP_BUTTON(4);
MAP_BUTTON(5);
MAP_BUTTON(6);
MAP_BUTTON(7);
MAP_BUTTON(8);
MAP_BUTTON(9);
MAP_BUTTON(10);
MAP_BUTTON(11);
MAP_BUTTON(12);
MAP_BUTTON(13);
MAP_BUTTON(14);
MAP_BUTTON(15);
MAP_BUTTON(16);
MAP_BUTTON(17);
MAP_BUTTON(18);
MAP_BUTTON(19);
MAP_BUTTON(20);
MAP_BUTTON(21);
MAP_BUTTON(22);
MAP_BUTTON(23);
MAP_BUTTON(24);
MAP_BUTTON(25);
MAP_BUTTON(26);
MAP_BUTTON(27);
MAP_BUTTON(28);
MAP_BUTTON(29);
default: break;
}
#undef MAP_BUTTON
#undef MAP_KEY
return false;
}
static bool code_to_str(int code, struct dstr *str)
{
#define MAP_GLYPH(c, g) \
case c: dstr_from_wcs(str, (wchar_t[]){g, 0}); return true
#define MAP_STR(c, s) case c: dstr_copy(str, s); return true
switch (code) {
MAP_GLYPH(kVK_Return, 0x21A9);
MAP_GLYPH(kVK_Escape, 0x238B);
MAP_GLYPH(kVK_Delete, 0x232B);
MAP_GLYPH(kVK_Tab, 0x21e5);
MAP_GLYPH(kVK_CapsLock, 0x21EA);
MAP_GLYPH(kVK_ANSI_KeypadClear, 0x2327);
MAP_GLYPH(kVK_ANSI_KeypadEnter, 0x2305);
MAP_GLYPH(kVK_Help, 0x003F);
MAP_GLYPH(kVK_Home, 0x2196);
MAP_GLYPH(kVK_PageUp, 0x21de);
MAP_GLYPH(kVK_ForwardDelete, 0x2326);
MAP_GLYPH(kVK_End, 0x2198);
MAP_GLYPH(kVK_PageDown, 0x21df);
MAP_GLYPH(kVK_RightArrow, 0x2192);
MAP_GLYPH(kVK_LeftArrow, 0x2190);
MAP_GLYPH(kVK_DownArrow, 0x2193);
MAP_GLYPH(kVK_UpArrow, 0x2191);
MAP_STR (kVK_F1, "F1");
MAP_STR (kVK_F2, "F2");
MAP_STR (kVK_F3, "F3");
MAP_STR (kVK_F4, "F4");
MAP_STR (kVK_F5, "F5");
MAP_STR (kVK_F6, "F6");
MAP_STR (kVK_F7, "F7");
MAP_STR (kVK_F8, "F8");
MAP_STR (kVK_F9, "F9");
MAP_STR (kVK_F10, "F10");
MAP_STR (kVK_F11, "F11");
MAP_STR (kVK_F12, "F12");
MAP_STR (kVK_F13, "F13");
MAP_STR (kVK_F14, "F14");
MAP_STR (kVK_F15, "F15");
MAP_STR (kVK_F16, "F16");
MAP_STR (kVK_F17, "F17");
MAP_STR (kVK_F18, "F18");
MAP_STR (kVK_F19, "F19");
MAP_STR (kVK_F20, "F20");
MAP_GLYPH(kVK_Control, kControlUnicode);
MAP_GLYPH(kVK_Shift, kShiftUnicode);
MAP_GLYPH(kVK_Option, kOptionUnicode);
MAP_GLYPH(kVK_Command, kCommandUnicode);
MAP_GLYPH(kVK_RightControl, kControlUnicode);
MAP_GLYPH(kVK_RightShift, kShiftUnicode);
MAP_GLYPH(kVK_RightOption, kOptionUnicode);
}
#undef MAP_STR
#undef MAP_GLYPH
return false;
}
void obs_key_to_str(obs_key_t key, struct dstr *str)
{
if (localized_key_to_str(key, str))
return;
int code = obs_key_to_virtual_key(key);
if (code_to_str(code, str))
return;
if (code == INVALID_KEY) {
blog(LOG_ERROR, "hotkey-cocoa: Got invalid key while "
"translating key '%d' (%s)",
key, obs_key_to_name(key));
goto err;
}
struct obs_hotkeys_platform *plat = NULL;
if (obs) {
pthread_mutex_lock(&obs->hotkeys.mutex);
plat = obs->hotkeys.platform_context;
hotkeys_retain(plat);
pthread_mutex_unlock(&obs->hotkeys.mutex);
}
if (!plat) {
blog(LOG_ERROR, "hotkey-cocoa: Could not get hotkey platform "
"while translating key '%d' (%s)",
key, obs_key_to_name(key));
goto err;
}
const UniCharCount max_length = 16;
UInt32 dead_key_state = 0;
UniChar buffer[max_length];
UniCharCount len = 0;
OSStatus err = UCKeyTranslate(plat->layout,
code,
kUCKeyActionDown,
0x104, //caps lock for upper case letters
LMGetKbdType(),
kUCKeyTranslateNoDeadKeysBit,
&dead_key_state,
max_length,
&len,
buffer);
if (err == noErr && len <= 0 && dead_key_state) {
err = UCKeyTranslate(plat->layout,
kVK_Space,
kUCKeyActionDown,
0x104,
LMGetKbdType(),
kUCKeyTranslateNoDeadKeysBit,
&dead_key_state,
max_length,
&len,
buffer);
}
hotkeys_release(plat);
if (err != noErr) {
blog(LOG_ERROR, "hotkey-cocoa: Error while translating key '%d'"
" (0x%x, %s) to string: %d", key, code,
obs_key_to_name(key), err);
goto err;
}
if (len == 0) {
blog(LOG_ERROR, "hotkey-cocoa: Got 0 length string while "
"translating '%d' (0x%x, %s) to string",
key, code, obs_key_to_name(key));
goto err;
}
CFStringRef string = CFStringCreateWithCharactersNoCopy(NULL,
buffer, len, kCFAllocatorNull);
if (!string) {
blog(LOG_ERROR, "hotkey-cocoa: Could not create CFStringRef "
"while translating '%d' (0x%x, %s) to string",
key, code, obs_key_to_name(key));
goto err;
}
if (!dstr_from_cfstring(str, string)) {
blog(LOG_ERROR, "hotkey-cocoa: Could not translate CFStringRef "
"to CString while translating '%d' (0x%x, %s)",
key, code, obs_key_to_name(key));
goto release;
}
CFRelease(string);
return;
release:
CFRelease(string);
err:
dstr_copy(str, obs_key_to_name(key));
}
#define OBS_COCOA_MODIFIER_SIZE 7
static void unichar_to_utf8(const UniChar *c, char *buff)
{
CFStringRef string = CFStringCreateWithCharactersNoCopy(NULL, c, 2,
kCFAllocatorNull);
if (!string) {
blog(LOG_ERROR, "hotkey-cocoa: Could not create CFStringRef "
"while populating modifier strings");
return;
}
if (!CFStringGetCString(string, buff, OBS_COCOA_MODIFIER_SIZE,
kCFStringEncodingUTF8))
blog(LOG_ERROR, "hotkey-cocoa: Error while populating "
" modifier string with glyph %d (0x%x)",
c[0], c[0]);
CFRelease(string);
}
static char ctrl_str[OBS_COCOA_MODIFIER_SIZE];
static char opt_str[OBS_COCOA_MODIFIER_SIZE];
static char shift_str[OBS_COCOA_MODIFIER_SIZE];
static char cmd_str[OBS_COCOA_MODIFIER_SIZE];
static void init_utf_8_strings(void)
{
const UniChar ctrl_uni[] = {kControlUnicode, 0};
const UniChar opt_uni[] = {kOptionUnicode, 0};
const UniChar shift_uni[] = {kShiftUnicode, 0};
const UniChar cmd_uni[] = {kCommandUnicode, 0};
unichar_to_utf8(ctrl_uni, ctrl_str);
unichar_to_utf8(opt_uni, opt_str);
unichar_to_utf8(shift_uni, shift_str);
unichar_to_utf8(cmd_uni, cmd_str);
}
static pthread_once_t strings_token = PTHREAD_ONCE_INIT;
void obs_key_combination_to_str(obs_key_combination_t key, struct dstr *str)
{
struct dstr key_str = {0};
if (key.key != OBS_KEY_NONE)
obs_key_to_str(key.key, &key_str);
int res = pthread_once(&strings_token, init_utf_8_strings);
if (res) {
blog(LOG_ERROR, "hotkeys-cocoa: Error while translating "
"modifiers %d (0x%x)", res, res);
dstr_move(str, &key_str);
return;
}
#define CHECK_MODIFIER(mod, str) ((key.modifiers & mod) ? str : "")
dstr_printf(str, "%s%s%s%s%s",
CHECK_MODIFIER(INTERACT_CONTROL_KEY, ctrl_str),
CHECK_MODIFIER(INTERACT_ALT_KEY, opt_str),
CHECK_MODIFIER(INTERACT_SHIFT_KEY, shift_str),
CHECK_MODIFIER(INTERACT_COMMAND_KEY, cmd_str),
key_str.len ? key_str.array : "");
#undef CHECK_MODIFIER
dstr_free(&key_str);
}
static inline CFDictionaryRef copy_device_mask(UInt32 page, UInt32 usage)
{
CFMutableDictionaryRef dict = CFDictionaryCreateMutable(
kCFAllocatorDefault, 2,
&kCFTypeDictionaryKeyCallBacks,
&kCFTypeDictionaryValueCallBacks);
CFNumberRef value;
// Add the page value.
value = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &page);
CFDictionarySetValue(dict, CFSTR(kIOHIDDeviceUsagePageKey), value);
CFRelease(value);
// Add the usage value (which is only valid if page value exists).
value = CFNumberCreate(kCFAllocatorDefault, kCFNumberIntType, &usage);
CFDictionarySetValue(dict, CFSTR(kIOHIDDeviceUsageKey), value);
CFRelease(value);
return dict;
}
static CFSetRef copy_devices(obs_hotkeys_platform_t *plat,
UInt32 page, UInt32 usage)
{
CFDictionaryRef mask = copy_device_mask(page, usage);
IOHIDManagerSetDeviceMatching(plat->manager, mask);
CFRelease(mask);
CFSetRef devices = IOHIDManagerCopyDevices(plat->manager);
if (!devices)
return NULL;
if (CFSetGetCount(devices) < 1) {
CFRelease(devices);
return NULL;
}
return devices;
}
static UInt16 usage_to_carbon(UInt32 usage)
{
switch (usage)
{
case kHIDUsage_KeyboardErrorRollOver: return INVALID_KEY;
case kHIDUsage_KeyboardPOSTFail: return INVALID_KEY;
case kHIDUsage_KeyboardErrorUndefined: return INVALID_KEY;
case kHIDUsage_KeyboardA: return kVK_ANSI_A;
case kHIDUsage_KeyboardB: return kVK_ANSI_B;
case kHIDUsage_KeyboardC: return kVK_ANSI_C;
case kHIDUsage_KeyboardD: return kVK_ANSI_D;
case kHIDUsage_KeyboardE: return kVK_ANSI_E;
case kHIDUsage_KeyboardF: return kVK_ANSI_F;
case kHIDUsage_KeyboardG: return kVK_ANSI_G;
case kHIDUsage_KeyboardH: return kVK_ANSI_H;
case kHIDUsage_KeyboardI: return kVK_ANSI_I;
case kHIDUsage_KeyboardJ: return kVK_ANSI_J;
case kHIDUsage_KeyboardK: return kVK_ANSI_K;
case kHIDUsage_KeyboardL: return kVK_ANSI_L;
case kHIDUsage_KeyboardM: return kVK_ANSI_M;
case kHIDUsage_KeyboardN: return kVK_ANSI_N;
case kHIDUsage_KeyboardO: return kVK_ANSI_O;
case kHIDUsage_KeyboardP: return kVK_ANSI_P;
case kHIDUsage_KeyboardQ: return kVK_ANSI_Q;
case kHIDUsage_KeyboardR: return kVK_ANSI_R;
case kHIDUsage_KeyboardS: return kVK_ANSI_S;
case kHIDUsage_KeyboardT: return kVK_ANSI_T;
case kHIDUsage_KeyboardU: return kVK_ANSI_U;
case kHIDUsage_KeyboardV: return kVK_ANSI_V;
case kHIDUsage_KeyboardW: return kVK_ANSI_W;
case kHIDUsage_KeyboardX: return kVK_ANSI_X;
case kHIDUsage_KeyboardY: return kVK_ANSI_Y;
case kHIDUsage_KeyboardZ: return kVK_ANSI_Z;
case kHIDUsage_Keyboard1: return kVK_ANSI_1;
case kHIDUsage_Keyboard2: return kVK_ANSI_2;
case kHIDUsage_Keyboard3: return kVK_ANSI_3;
case kHIDUsage_Keyboard4: return kVK_ANSI_4;
case kHIDUsage_Keyboard5: return kVK_ANSI_5;
case kHIDUsage_Keyboard6: return kVK_ANSI_6;
case kHIDUsage_Keyboard7: return kVK_ANSI_7;
case kHIDUsage_Keyboard8: return kVK_ANSI_8;
case kHIDUsage_Keyboard9: return kVK_ANSI_9;
case kHIDUsage_Keyboard0: return kVK_ANSI_0;
case kHIDUsage_KeyboardReturnOrEnter: return kVK_Return;
case kHIDUsage_KeyboardEscape: return kVK_Escape;
case kHIDUsage_KeyboardDeleteOrBackspace: return kVK_Delete;
case kHIDUsage_KeyboardTab: return kVK_Tab;
case kHIDUsage_KeyboardSpacebar: return kVK_Space;
case kHIDUsage_KeyboardHyphen: return kVK_ANSI_Minus;
case kHIDUsage_KeyboardEqualSign: return kVK_ANSI_Equal;
case kHIDUsage_KeyboardOpenBracket: return kVK_ANSI_LeftBracket;
case kHIDUsage_KeyboardCloseBracket: return kVK_ANSI_RightBracket;
case kHIDUsage_KeyboardBackslash: return kVK_ANSI_Backslash;
case kHIDUsage_KeyboardNonUSPound: return INVALID_KEY;
case kHIDUsage_KeyboardSemicolon: return kVK_ANSI_Semicolon;
case kHIDUsage_KeyboardQuote: return kVK_ANSI_Quote;
case kHIDUsage_KeyboardGraveAccentAndTilde: return kVK_ANSI_Grave;
case kHIDUsage_KeyboardComma: return kVK_ANSI_Comma;
case kHIDUsage_KeyboardPeriod: return kVK_ANSI_Period;
case kHIDUsage_KeyboardSlash: return kVK_ANSI_Slash;
case kHIDUsage_KeyboardCapsLock: return kVK_CapsLock;
case kHIDUsage_KeyboardF1: return kVK_F1;
case kHIDUsage_KeyboardF2: return kVK_F2;
case kHIDUsage_KeyboardF3: return kVK_F3;
case kHIDUsage_KeyboardF4: return kVK_F4;
case kHIDUsage_KeyboardF5: return kVK_F5;
case kHIDUsage_KeyboardF6: return kVK_F6;
case kHIDUsage_KeyboardF7: return kVK_F7;
case kHIDUsage_KeyboardF8: return kVK_F8;
case kHIDUsage_KeyboardF9: return kVK_F9;
case kHIDUsage_KeyboardF10: return kVK_F10;
case kHIDUsage_KeyboardF11: return kVK_F11;
case kHIDUsage_KeyboardF12: return kVK_F12;
case kHIDUsage_KeyboardPrintScreen: return INVALID_KEY;
case kHIDUsage_KeyboardScrollLock: return INVALID_KEY;
case kHIDUsage_KeyboardPause: return INVALID_KEY;
case kHIDUsage_KeyboardInsert: return kVK_Help;
case kHIDUsage_KeyboardHome: return kVK_Home;
case kHIDUsage_KeyboardPageUp: return kVK_PageUp;
case kHIDUsage_KeyboardDeleteForward: return kVK_ForwardDelete;
case kHIDUsage_KeyboardEnd: return kVK_End;
case kHIDUsage_KeyboardPageDown: return kVK_PageDown;
case kHIDUsage_KeyboardRightArrow: return kVK_RightArrow;
case kHIDUsage_KeyboardLeftArrow: return kVK_LeftArrow;
case kHIDUsage_KeyboardDownArrow: return kVK_DownArrow;
case kHIDUsage_KeyboardUpArrow: return kVK_UpArrow;
case kHIDUsage_KeypadNumLock: return kVK_ANSI_KeypadClear;
case kHIDUsage_KeypadSlash: return kVK_ANSI_KeypadDivide;
case kHIDUsage_KeypadAsterisk: return kVK_ANSI_KeypadMultiply;
case kHIDUsage_KeypadHyphen: return kVK_ANSI_KeypadMinus;
case kHIDUsage_KeypadPlus: return kVK_ANSI_KeypadPlus;
case kHIDUsage_KeypadEnter: return kVK_ANSI_KeypadEnter;
case kHIDUsage_Keypad1: return kVK_ANSI_Keypad1;
case kHIDUsage_Keypad2: return kVK_ANSI_Keypad2;
case kHIDUsage_Keypad3: return kVK_ANSI_Keypad3;
case kHIDUsage_Keypad4: return kVK_ANSI_Keypad4;
case kHIDUsage_Keypad5: return kVK_ANSI_Keypad5;
case kHIDUsage_Keypad6: return kVK_ANSI_Keypad6;
case kHIDUsage_Keypad7: return kVK_ANSI_Keypad7;
case kHIDUsage_Keypad8: return kVK_ANSI_Keypad8;
case kHIDUsage_Keypad9: return kVK_ANSI_Keypad9;
case kHIDUsage_Keypad0: return kVK_ANSI_Keypad0;
case kHIDUsage_KeypadPeriod: return kVK_ANSI_KeypadDecimal;
case kHIDUsage_KeyboardNonUSBackslash: return INVALID_KEY;
case kHIDUsage_KeyboardApplication: return kVK_F13;
case kHIDUsage_KeyboardPower: return INVALID_KEY;
case kHIDUsage_KeypadEqualSign: return kVK_ANSI_KeypadEquals;
case kHIDUsage_KeyboardF13: return kVK_F13;
case kHIDUsage_KeyboardF14: return kVK_F14;
case kHIDUsage_KeyboardF15: return kVK_F15;
case kHIDUsage_KeyboardF16: return kVK_F16;
case kHIDUsage_KeyboardF17: return kVK_F17;
case kHIDUsage_KeyboardF18: return kVK_F18;
case kHIDUsage_KeyboardF19: return kVK_F19;
case kHIDUsage_KeyboardF20: return kVK_F20;
case kHIDUsage_KeyboardF21: return INVALID_KEY;
case kHIDUsage_KeyboardF22: return INVALID_KEY;
case kHIDUsage_KeyboardF23: return INVALID_KEY;
case kHIDUsage_KeyboardF24: return INVALID_KEY;
case kHIDUsage_KeyboardExecute: return INVALID_KEY;
case kHIDUsage_KeyboardHelp: return INVALID_KEY;
case kHIDUsage_KeyboardMenu: return 0x7F;
case kHIDUsage_KeyboardSelect: return kVK_ANSI_KeypadEnter;
case kHIDUsage_KeyboardStop: return INVALID_KEY;
case kHIDUsage_KeyboardAgain: return INVALID_KEY;
case kHIDUsage_KeyboardUndo: return INVALID_KEY;
case kHIDUsage_KeyboardCut: return INVALID_KEY;
case kHIDUsage_KeyboardCopy: return INVALID_KEY;
case kHIDUsage_KeyboardPaste: return INVALID_KEY;
case kHIDUsage_KeyboardFind: return INVALID_KEY;
case kHIDUsage_KeyboardMute: return kVK_Mute;
case kHIDUsage_KeyboardVolumeUp: return kVK_VolumeUp;
case kHIDUsage_KeyboardVolumeDown: return kVK_VolumeDown;
case kHIDUsage_KeyboardLockingCapsLock: return INVALID_KEY;
case kHIDUsage_KeyboardLockingNumLock: return INVALID_KEY;
case kHIDUsage_KeyboardLockingScrollLock: return INVALID_KEY;
case kHIDUsage_KeypadComma: return INVALID_KEY;
case kHIDUsage_KeypadEqualSignAS400: return INVALID_KEY;
case kHIDUsage_KeyboardInternational1: return INVALID_KEY;
case kHIDUsage_KeyboardInternational2: return INVALID_KEY;
case kHIDUsage_KeyboardInternational3: return INVALID_KEY;
case kHIDUsage_KeyboardInternational4: return INVALID_KEY;
case kHIDUsage_KeyboardInternational5: return INVALID_KEY;
case kHIDUsage_KeyboardInternational6: return INVALID_KEY;
case kHIDUsage_KeyboardInternational7: return INVALID_KEY;
case kHIDUsage_KeyboardInternational8: return INVALID_KEY;
case kHIDUsage_KeyboardInternational9: return INVALID_KEY;
case kHIDUsage_KeyboardLANG1: return INVALID_KEY;
case kHIDUsage_KeyboardLANG2: return INVALID_KEY;
case kHIDUsage_KeyboardLANG3: return INVALID_KEY;
case kHIDUsage_KeyboardLANG4: return INVALID_KEY;
case kHIDUsage_KeyboardLANG5: return INVALID_KEY;
case kHIDUsage_KeyboardLANG6: return INVALID_KEY;
case kHIDUsage_KeyboardLANG7: return INVALID_KEY;
case kHIDUsage_KeyboardLANG8: return INVALID_KEY;
case kHIDUsage_KeyboardLANG9: return INVALID_KEY;
case kHIDUsage_KeyboardAlternateErase: return INVALID_KEY;
case kHIDUsage_KeyboardSysReqOrAttention: return INVALID_KEY;
case kHIDUsage_KeyboardCancel: return INVALID_KEY;
case kHIDUsage_KeyboardClear: return INVALID_KEY;
case kHIDUsage_KeyboardPrior: return INVALID_KEY;
case kHIDUsage_KeyboardReturn: return INVALID_KEY;
case kHIDUsage_KeyboardSeparator: return INVALID_KEY;
case kHIDUsage_KeyboardOut: return INVALID_KEY;
case kHIDUsage_KeyboardOper: return INVALID_KEY;
case kHIDUsage_KeyboardClearOrAgain: return INVALID_KEY;
case kHIDUsage_KeyboardCrSelOrProps: return INVALID_KEY;
case kHIDUsage_KeyboardExSel: return INVALID_KEY;
/* 0xa5-0xdf Reserved */
case kHIDUsage_KeyboardLeftControl: return kVK_Control;
case kHIDUsage_KeyboardLeftShift: return kVK_Shift;
case kHIDUsage_KeyboardLeftAlt: return kVK_Option;
case kHIDUsage_KeyboardLeftGUI: return kVK_Command;
case kHIDUsage_KeyboardRightControl: return kVK_RightControl;
case kHIDUsage_KeyboardRightShift: return kVK_RightShift;
case kHIDUsage_KeyboardRightAlt: return kVK_RightOption;
case kHIDUsage_KeyboardRightGUI: return 0x36; //??
/* 0xe8-0xffff Reserved */
case kHIDUsage_Keyboard_Reserved: return INVALID_KEY;
default: return INVALID_KEY;
}
return INVALID_KEY;
}
obs_key_t obs_key_from_virtual_key(int code)
{
switch (code) {
case kVK_ANSI_A: return OBS_KEY_A;
case kVK_ANSI_B: return OBS_KEY_B;
case kVK_ANSI_C: return OBS_KEY_C;
case kVK_ANSI_D: return OBS_KEY_D;
case kVK_ANSI_E: return OBS_KEY_E;
case kVK_ANSI_F: return OBS_KEY_F;
case kVK_ANSI_G: return OBS_KEY_G;
case kVK_ANSI_H: return OBS_KEY_H;
case kVK_ANSI_I: return OBS_KEY_I;
case kVK_ANSI_J: return OBS_KEY_J;
case kVK_ANSI_K: return OBS_KEY_K;
case kVK_ANSI_L: return OBS_KEY_L;
case kVK_ANSI_M: return OBS_KEY_M;
case kVK_ANSI_N: return OBS_KEY_N;
case kVK_ANSI_O: return OBS_KEY_O;
case kVK_ANSI_P: return OBS_KEY_P;
case kVK_ANSI_Q: return OBS_KEY_Q;
case kVK_ANSI_R: return OBS_KEY_R;
case kVK_ANSI_S: return OBS_KEY_S;
case kVK_ANSI_T: return OBS_KEY_T;
case kVK_ANSI_U: return OBS_KEY_U;
case kVK_ANSI_V: return OBS_KEY_V;
case kVK_ANSI_W: return OBS_KEY_W;
case kVK_ANSI_X: return OBS_KEY_X;
case kVK_ANSI_Y: return OBS_KEY_Y;
case kVK_ANSI_Z: return OBS_KEY_Z;
case kVK_ANSI_1: return OBS_KEY_1;
case kVK_ANSI_2: return OBS_KEY_2;
case kVK_ANSI_3: return OBS_KEY_3;
case kVK_ANSI_4: return OBS_KEY_4;
case kVK_ANSI_5: return OBS_KEY_5;
case kVK_ANSI_6: return OBS_KEY_6;
case kVK_ANSI_7: return OBS_KEY_7;
case kVK_ANSI_8: return OBS_KEY_8;
case kVK_ANSI_9: return OBS_KEY_9;
case kVK_ANSI_0: return OBS_KEY_0;
case kVK_Return: return OBS_KEY_RETURN;
case kVK_Escape: return OBS_KEY_ESCAPE;
case kVK_Delete: return OBS_KEY_BACKSPACE;
case kVK_Tab: return OBS_KEY_TAB;
case kVK_Space: return OBS_KEY_SPACE;
case kVK_ANSI_Minus: return OBS_KEY_MINUS;
case kVK_ANSI_Equal: return OBS_KEY_EQUAL;
case kVK_ANSI_LeftBracket: return OBS_KEY_BRACKETLEFT;
case kVK_ANSI_RightBracket: return OBS_KEY_BRACKETRIGHT;
case kVK_ANSI_Backslash: return OBS_KEY_BACKSLASH;
case kVK_ANSI_Semicolon: return OBS_KEY_SEMICOLON;
case kVK_ANSI_Quote: return OBS_KEY_QUOTE;
case kVK_ANSI_Grave: return OBS_KEY_DEAD_GRAVE;
case kVK_ANSI_Comma: return OBS_KEY_COMMA;
case kVK_ANSI_Period: return OBS_KEY_PERIOD;
case kVK_ANSI_Slash: return OBS_KEY_SLASH;
case kVK_CapsLock: return OBS_KEY_CAPSLOCK;
case kVK_ISO_Section: return OBS_KEY_SECTION;
case kVK_F1: return OBS_KEY_F1;
case kVK_F2: return OBS_KEY_F2;
case kVK_F3: return OBS_KEY_F3;
case kVK_F4: return OBS_KEY_F4;
case kVK_F5: return OBS_KEY_F5;
case kVK_F6: return OBS_KEY_F6;
case kVK_F7: return OBS_KEY_F7;
case kVK_F8: return OBS_KEY_F8;
case kVK_F9: return OBS_KEY_F9;
case kVK_F10: return OBS_KEY_F10;
case kVK_F11: return OBS_KEY_F11;
case kVK_F12: return OBS_KEY_F12;
case kVK_Help: return OBS_KEY_HELP;
case kVK_Home: return OBS_KEY_HOME;
case kVK_PageUp: return OBS_KEY_PAGEUP;
case kVK_ForwardDelete: return OBS_KEY_DELETE;
case kVK_End: return OBS_KEY_END;
case kVK_PageDown: return OBS_KEY_PAGEDOWN;
case kVK_RightArrow: return OBS_KEY_RIGHT;
case kVK_LeftArrow: return OBS_KEY_LEFT;
case kVK_DownArrow: return OBS_KEY_DOWN;
case kVK_UpArrow: return OBS_KEY_UP;
case kVK_ANSI_KeypadClear: return OBS_KEY_CLEAR;
case kVK_ANSI_KeypadDivide: return OBS_KEY_NUMSLASH;
case kVK_ANSI_KeypadMultiply: return OBS_KEY_NUMASTERISK;
case kVK_ANSI_KeypadMinus: return OBS_KEY_NUMMINUS;
case kVK_ANSI_KeypadPlus: return OBS_KEY_NUMPLUS;
case kVK_ANSI_KeypadEnter: return OBS_KEY_ENTER;
case kVK_ANSI_Keypad1: return OBS_KEY_NUM1;
case kVK_ANSI_Keypad2: return OBS_KEY_NUM2;
case kVK_ANSI_Keypad3: return OBS_KEY_NUM3;
case kVK_ANSI_Keypad4: return OBS_KEY_NUM4;
case kVK_ANSI_Keypad5: return OBS_KEY_NUM5;
case kVK_ANSI_Keypad6: return OBS_KEY_NUM6;
case kVK_ANSI_Keypad7: return OBS_KEY_NUM7;
case kVK_ANSI_Keypad8: return OBS_KEY_NUM8;
case kVK_ANSI_Keypad9: return OBS_KEY_NUM9;
case kVK_ANSI_Keypad0: return OBS_KEY_NUM0;
case kVK_ANSI_KeypadDecimal: return OBS_KEY_NUMPERIOD;
case kVK_ANSI_KeypadEquals: return OBS_KEY_NUMEQUAL;
case kVK_F13: return OBS_KEY_F13;
case kVK_F14: return OBS_KEY_F14;
case kVK_F15: return OBS_KEY_F15;
case kVK_F16: return OBS_KEY_F16;
case kVK_F17: return OBS_KEY_F17;
case kVK_F18: return OBS_KEY_F18;
case kVK_F19: return OBS_KEY_F19;
case kVK_F20: return OBS_KEY_F20;
case kVK_Control: return OBS_KEY_CONTROL;
case kVK_Shift: return OBS_KEY_SHIFT;
case kVK_Option: return OBS_KEY_ALT;
case kVK_Command: return OBS_KEY_META;
case kVK_RightControl: return OBS_KEY_CONTROL;
case kVK_RightShift: return OBS_KEY_SHIFT;
case kVK_RightOption: return OBS_KEY_ALT;
case 0x36: return OBS_KEY_META;
case kVK_Function:
case kVK_Mute:
case kVK_VolumeDown:
case kVK_VolumeUp:
break;
}
return OBS_KEY_NONE;
}
static inline void load_key(obs_hotkeys_platform_t *plat, IOHIDElementRef key)
{
UInt32 usage_code = IOHIDElementGetUsage(key);
UInt16 carbon_code = usage_to_carbon(usage_code);
if (carbon_code == INVALID_KEY) return;
obs_key_t obs_key = obs_key_from_virtual_key(carbon_code);
if (obs_key == OBS_KEY_NONE)
return;
da_push_back(plat->keys[obs_key], &key);
CFRetain(*(IOHIDElementRef*)da_end(plat->keys[obs_key]));
}
static inline void load_keyboard(obs_hotkeys_platform_t *plat,
IOHIDDeviceRef keyboard)
{
CFArrayRef keys = IOHIDDeviceCopyMatchingElements(keyboard, NULL,
kIOHIDOptionsTypeNone);
if (!keys) {
blog(LOG_ERROR, "hotkeys-cocoa: Getting keyboard keys failed");
return;
}
CFIndex count = CFArrayGetCount(keys);
if (!count) {
blog(LOG_ERROR, "hotkeys-cocoa: Keyboard has no keys");
CFRelease(keys);
return;
}
for (CFIndex i = 0; i < count; i++) {
IOHIDElementRef key =
(IOHIDElementRef)CFArrayGetValueAtIndex(keys, i);
// Skip non-matching keys elements
if (IOHIDElementGetUsagePage(key) != kHIDPage_KeyboardOrKeypad)
continue;
load_key(plat, key);
}
CFRelease(keys);
}
static bool init_keyboard(obs_hotkeys_platform_t *plat)
{
CFSetRef keyboards = copy_devices(plat, kHIDPage_GenericDesktop,
kHIDUsage_GD_Keyboard);
if (!keyboards)
return false;
CFIndex count = CFSetGetCount(keyboards);
CFTypeRef devices[count];
CFSetGetValues(keyboards, devices);
for (CFIndex i = 0; i < count; i++)
load_keyboard(plat, (IOHIDDeviceRef)devices[i]);
CFRelease(keyboards);
return true;
}
static inline void free_hotkeys_platform(obs_hotkeys_platform_t *plat)
{
if (!plat)
return;
if (plat->tis) {
CFRelease(plat->tis);
plat->tis = NULL;
}
if (plat->layout_data) {
CFRelease(plat->layout_data);
plat->layout_data = NULL;
}
if (plat->manager) {
CFRelease(plat->manager);
plat->manager = NULL;
}
for (size_t i = 0; i < OBS_KEY_LAST_VALUE; i++) {
for (size_t j = 0; j < plat->keys[i].num; j++)
CFRelease(plat->keys[i].array[j]);
da_free(plat->keys[i]);
}
bfree(plat);
}
static bool log_layout_name(TISInputSourceRef tis)
{
struct dstr layout_name = {0};
CFStringRef sid = (CFStringRef)TISGetInputSourceProperty(tis,
kTISPropertyInputSourceID);
if (!sid) {
blog(LOG_ERROR, "hotkeys-cocoa: Failed getting InputSourceID");
return false;
}
if (!dstr_from_cfstring(&layout_name, sid)) {
blog(LOG_ERROR, "hotkeys-cocoa: Could not convert InputSourceID"
" to CString");
goto fail;
}
blog(LOG_INFO, "hotkeys-cocoa: Using layout '%s'", layout_name.array);
dstr_free(&layout_name);
return true;
fail:
dstr_free(&layout_name);
return false;
}
static bool init_hotkeys_platform(obs_hotkeys_platform_t **plat_)
{
if (!plat_)
return false;
*plat_ = bzalloc(sizeof(obs_hotkeys_platform_t));
obs_hotkeys_platform_t *plat = *plat_;
if (!plat) {
*plat_ = NULL;
return false;
}
plat->tis = TISCopyCurrentKeyboardLayoutInputSource();
plat->layout_data = (CFDataRef)TISGetInputSourceProperty(plat->tis,
kTISPropertyUnicodeKeyLayoutData);
if (!plat->layout_data) {
blog(LOG_ERROR, "hotkeys-cocoa: Failed getting LayoutData");
goto fail;
}
CFRetain(plat->layout_data);
plat->layout = (UCKeyboardLayout*)CFDataGetBytePtr(plat->layout_data);
plat->manager = IOHIDManagerCreate(kCFAllocatorDefault,
kIOHIDOptionsTypeNone);
IOReturn openStatus = IOHIDManagerOpen(plat->manager,
kIOHIDOptionsTypeNone);
if (openStatus != kIOReturnSuccess) {
blog(LOG_ERROR, "hotkeys-cocoa: Failed opening HIDManager");
goto fail;
}
init_keyboard(plat);
return true;
fail:
hotkeys_release(plat);
*plat_ = NULL;
return false;
}
static void input_method_changed(CFNotificationCenterRef nc, void *observer,
CFStringRef name, const void *object, CFDictionaryRef user_info)
{
UNUSED_PARAMETER(nc);
UNUSED_PARAMETER(name);
UNUSED_PARAMETER(object);
UNUSED_PARAMETER(user_info);
struct obs_core_hotkeys *hotkeys = observer;
obs_hotkeys_platform_t *new_plat;
if (init_hotkeys_platform(&new_plat)) {
obs_hotkeys_platform_t *plat;
pthread_mutex_lock(&hotkeys->mutex);
plat = hotkeys->platform_context;
if (new_plat && plat &&
new_plat->layout_data == plat->layout_data) {
pthread_mutex_unlock(&hotkeys->mutex);
hotkeys_release(new_plat);
return;
}
hotkeys->platform_context = new_plat;
if (new_plat)
log_layout_name(new_plat->tis);
pthread_mutex_unlock(&hotkeys->mutex);
calldata_t params = {0};
signal_handler_signal(hotkeys->signals,
"hotkey_layout_change", ¶ms);
if (plat)
hotkeys_release(plat);
}
}
bool obs_hotkeys_platform_init(struct obs_core_hotkeys *hotkeys)
{
CFNotificationCenterAddObserver(
CFNotificationCenterGetDistributedCenter(),
hotkeys, input_method_changed,
kTISNotifySelectedKeyboardInputSourceChanged, NULL,
CFNotificationSuspensionBehaviorDeliverImmediately);
input_method_changed(NULL, hotkeys, NULL, NULL, NULL);
return hotkeys->platform_context != NULL;
}
void obs_hotkeys_platform_free(struct obs_core_hotkeys *hotkeys)
{
CFNotificationCenterRemoveEveryObserver(
CFNotificationCenterGetDistributedCenter(),
hotkeys);
hotkeys_release(hotkeys->platform_context);
}
typedef unsigned long NSUInteger;
static bool mouse_button_pressed(obs_key_t key, bool *pressed)
{
int button = 0;
switch (key) {
#define MAP_BUTTON(n) case OBS_KEY_MOUSE ## n: button = n - 1; break
MAP_BUTTON(1);
MAP_BUTTON(2);
MAP_BUTTON(3);
MAP_BUTTON(4);
MAP_BUTTON(5);
MAP_BUTTON(6);
MAP_BUTTON(7);
MAP_BUTTON(8);
MAP_BUTTON(9);
MAP_BUTTON(10);
MAP_BUTTON(11);
MAP_BUTTON(12);
MAP_BUTTON(13);
MAP_BUTTON(14);
MAP_BUTTON(15);
MAP_BUTTON(16);
MAP_BUTTON(17);
MAP_BUTTON(18);
MAP_BUTTON(19);
MAP_BUTTON(20);
MAP_BUTTON(21);
MAP_BUTTON(22);
MAP_BUTTON(23);
MAP_BUTTON(24);
MAP_BUTTON(25);
MAP_BUTTON(26);
MAP_BUTTON(27);
MAP_BUTTON(28);
MAP_BUTTON(29);
break;
#undef MAP_BUTTON
default:
return false;
}
Class NSEvent = objc_getClass("NSEvent");
SEL pressedMouseButtons = sel_registerName("pressedMouseButtons");
NSUInteger buttons = (NSUInteger)objc_msgSend((id)NSEvent,
pressedMouseButtons);
*pressed = (buttons & (1 << button)) != 0;
return true;
}
bool obs_hotkeys_platform_is_pressed(obs_hotkeys_platform_t *plat,
obs_key_t key)
{
bool mouse_pressed = false;
if (mouse_button_pressed(key, &mouse_pressed))
return mouse_pressed;
if (!plat)
return false;
if (key >= OBS_KEY_LAST_VALUE)
return false;
for (size_t i = 0; i < plat->keys[key].num;) {
IOHIDElementRef element = plat->keys[key].array[i];
IOHIDValueRef value = 0;
IOHIDDeviceRef device = IOHIDElementGetDevice(element);
IOHIDDeviceGetValue(device, element, &value);
if (!value) {
CFRelease(element);
da_erase(plat->keys[key], i);
continue;
}
if (IOHIDValueGetIntegerValue(value) == 1)
return true;
i += 1;
}
return false;
}
| gpl-2.0 |
cubieboard/openbox_external_bison | src/symlist.c | 24 | 3546 | /* Lists of symbols for Bison
Copyright (C) 2002, 2005, 2006 Free Software Foundation, Inc.
This file is part of Bison, the GNU Compiler Compiler.
Bison is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
Bison is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Bison; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#include <config.h>
#include "system.h"
#include "complain.h"
#include "symlist.h"
/*--------------------------------------.
| Create a list containing SYM at LOC. |
`--------------------------------------*/
symbol_list *
symbol_list_new (symbol *sym, location loc)
{
symbol_list *res = xmalloc (sizeof *res);
res->sym = sym;
res->location = loc;
res->midrule = NULL;
res->action = NULL;
res->used = false;
res->ruleprec = NULL;
res->dprec = 0;
res->merger = 0;
res->next = NULL;
return res;
}
/*------------------.
| Print this list. |
`------------------*/
void
symbol_list_print (const symbol_list *l, FILE *f)
{
for (/* Nothing. */; l && l->sym; l = l->next)
{
symbol_print (l->sym, f);
fprintf (stderr, l->used ? " used" : " unused");
if (l && l->sym)
fprintf (f, ", ");
}
}
/*---------------------------------.
| Prepend SYM at LOC to the LIST. |
`---------------------------------*/
symbol_list *
symbol_list_prepend (symbol_list *list, symbol *sym, location loc)
{
symbol_list *res = symbol_list_new (sym, loc);
res->next = list;
return res;
}
/*-------------------------------------------------.
| Free the LIST, but not the symbols it contains. |
`-------------------------------------------------*/
void
symbol_list_free (symbol_list *list)
{
LIST_FREE (symbol_list, list);
}
/*--------------------.
| Return its length. |
`--------------------*/
unsigned int
symbol_list_length (const symbol_list *l)
{
int res = 0;
for (/* Nothing. */; l; l = l->next)
++res;
return res;
}
/*--------------------------------.
| Get symbol N in symbol list L. |
`--------------------------------*/
symbol_list *
symbol_list_n_get (symbol_list *l, int n)
{
int i;
if (n < 0)
return NULL;
for (i = 0; i < n; ++i)
{
l = l->next;
if (l == NULL || l->sym == NULL)
return NULL;
}
return l;
}
/*--------------------------------------------------------------.
| Get the data type (alternative in the union) of the value for |
| symbol N in symbol list L. |
`--------------------------------------------------------------*/
uniqstr
symbol_list_n_type_name_get (symbol_list *l, location loc, int n)
{
l = symbol_list_n_get (l, n);
if (!l)
{
complain_at (loc, _("invalid $ value: $%d"), n);
return NULL;
}
return l->sym->type_name;
}
/*----------------------------------------.
| The symbol N in symbol list L is USED. |
`----------------------------------------*/
void
symbol_list_n_used_set (symbol_list *l, int n, bool used)
{
l = symbol_list_n_get (l, n);
if (l)
l->used = used;
}
| gpl-2.0 |
ntddk/pemu | plugins/glibc-2.13-new/iconvdata/brf.c | 24 | 1146 | /* Conversion from and to BRF.
Copyright (C) 2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Samuel Thibault <samuel.thibault@ens-lyon.org>, 2006.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
#include <stdint.h>
/* Get the conversion table. */
#define TABLES <brf.h>
#define CHARSET_NAME "BRF//"
#define HAS_HOLES 1 /* Not all 256 character are defined. */
#include <8bit-gap.c>
| gpl-2.0 |
weimenlove/linux-2.6.24.4 | net/ipv4/tcp_cong.c | 24 | 9877 | /*
* Plugable TCP congestion control support and newReno
* congestion control.
* Based on ideas from I/O scheduler suport and Web100.
*
* Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/list.h>
#include <net/tcp.h>
int sysctl_tcp_max_ssthresh = 0;
static DEFINE_SPINLOCK(tcp_cong_list_lock);
static LIST_HEAD(tcp_cong_list);
/* Simple linear search, don't expect many entries! */
static struct tcp_congestion_ops *tcp_ca_find(const char *name)
{
struct tcp_congestion_ops *e;
list_for_each_entry_rcu(e, &tcp_cong_list, list) {
if (strcmp(e->name, name) == 0)
return e;
}
return NULL;
}
/*
* Attach new congestion control algorithm to the list
* of available options.
*/
int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
{
int ret = 0;
/* all algorithms must implement ssthresh and cong_avoid ops */
if (!ca->ssthresh || !ca->cong_avoid) {
printk(KERN_ERR "TCP %s does not implement required ops\n",
ca->name);
return -EINVAL;
}
spin_lock(&tcp_cong_list_lock);
if (tcp_ca_find(ca->name)) {
printk(KERN_NOTICE "TCP %s already registered\n", ca->name);
ret = -EEXIST;
} else {
list_add_tail_rcu(&ca->list, &tcp_cong_list);
printk(KERN_INFO "TCP %s registered\n", ca->name);
}
spin_unlock(&tcp_cong_list_lock);
return ret;
}
EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
/*
* Remove congestion control algorithm, called from
* the module's remove function. Module ref counts are used
* to ensure that this can't be done till all sockets using
* that method are closed.
*/
void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
{
spin_lock(&tcp_cong_list_lock);
list_del_rcu(&ca->list);
spin_unlock(&tcp_cong_list_lock);
}
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
/* Assign choice of congestion control. */
void tcp_init_congestion_control(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_congestion_ops *ca;
/* if no choice made yet assign the current value set as default */
if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) {
rcu_read_lock();
list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
if (try_module_get(ca->owner)) {
icsk->icsk_ca_ops = ca;
break;
}
/* fallback to next available */
}
rcu_read_unlock();
}
if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
}
/* Manage refcounts on socket close. */
void tcp_cleanup_congestion_control(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ca_ops->release)
icsk->icsk_ca_ops->release(sk);
module_put(icsk->icsk_ca_ops->owner);
}
/* Used by sysctl to change default congestion control */
int tcp_set_default_congestion_control(const char *name)
{
struct tcp_congestion_ops *ca;
int ret = -ENOENT;
spin_lock(&tcp_cong_list_lock);
ca = tcp_ca_find(name);
#ifdef CONFIG_KMOD
if (!ca && capable(CAP_SYS_MODULE)) {
spin_unlock(&tcp_cong_list_lock);
request_module("tcp_%s", name);
spin_lock(&tcp_cong_list_lock);
ca = tcp_ca_find(name);
}
#endif
if (ca) {
ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */
list_move(&ca->list, &tcp_cong_list);
ret = 0;
}
spin_unlock(&tcp_cong_list_lock);
return ret;
}
/* Set default value from kernel configuration at bootup */
static int __init tcp_congestion_default(void)
{
return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
}
late_initcall(tcp_congestion_default);
/* Build string with list of available congestion control values */
void tcp_get_available_congestion_control(char *buf, size_t maxlen)
{
struct tcp_congestion_ops *ca;
size_t offs = 0;
rcu_read_lock();
list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
}
rcu_read_unlock();
}
/* Get current default congestion control */
void tcp_get_default_congestion_control(char *name)
{
struct tcp_congestion_ops *ca;
/* We will always have reno... */
BUG_ON(list_empty(&tcp_cong_list));
rcu_read_lock();
ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
strncpy(name, ca->name, TCP_CA_NAME_MAX);
rcu_read_unlock();
}
/* Built list of non-restricted congestion control values */
void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
{
struct tcp_congestion_ops *ca;
size_t offs = 0;
*buf = '\0';
rcu_read_lock();
list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
continue;
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
}
rcu_read_unlock();
}
/* Change list of non-restricted congestion control */
int tcp_set_allowed_congestion_control(char *val)
{
struct tcp_congestion_ops *ca;
char *clone, *name;
int ret = 0;
clone = kstrdup(val, GFP_USER);
if (!clone)
return -ENOMEM;
spin_lock(&tcp_cong_list_lock);
/* pass 1 check for bad entries */
while ((name = strsep(&clone, " ")) && *name) {
ca = tcp_ca_find(name);
if (!ca) {
ret = -ENOENT;
goto out;
}
}
/* pass 2 clear old values */
list_for_each_entry_rcu(ca, &tcp_cong_list, list)
ca->flags &= ~TCP_CONG_NON_RESTRICTED;
/* pass 3 mark as allowed */
while ((name = strsep(&val, " ")) && *name) {
ca = tcp_ca_find(name);
WARN_ON(!ca);
if (ca)
ca->flags |= TCP_CONG_NON_RESTRICTED;
}
out:
spin_unlock(&tcp_cong_list_lock);
return ret;
}
/* Change congestion control for socket */
int tcp_set_congestion_control(struct sock *sk, const char *name)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_congestion_ops *ca;
int err = 0;
rcu_read_lock();
ca = tcp_ca_find(name);
/* no change asking for existing value */
if (ca == icsk->icsk_ca_ops)
goto out;
#ifdef CONFIG_KMOD
/* not found attempt to autoload module */
if (!ca && capable(CAP_SYS_MODULE)) {
rcu_read_unlock();
request_module("tcp_%s", name);
rcu_read_lock();
ca = tcp_ca_find(name);
}
#endif
if (!ca)
err = -ENOENT;
else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN)))
err = -EPERM;
else if (!try_module_get(ca->owner))
err = -EBUSY;
else {
tcp_cleanup_congestion_control(sk);
icsk->icsk_ca_ops = ca;
if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
}
out:
rcu_read_unlock();
return err;
}
/*
* Slow start is used when congestion window is less than slow start
* threshold. This version implements the basic RFC2581 version
* and optionally supports:
* RFC3742 Limited Slow Start - growth limited to max_ssthresh
* RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged
*/
void tcp_slow_start(struct tcp_sock *tp)
{
int cnt; /* increase in packets */
/* RFC3465: ABC Slow start
* Increase only after a full MSS of bytes is acked
*
* TCP sender SHOULD increase cwnd by the number of
* previously unacknowledged bytes ACKed by each incoming
* acknowledgment, provided the increase is not more than L
*/
if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache)
return;
if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
else
cnt = tp->snd_cwnd; /* exponential increase */
/* RFC3465: ABC
* We MAY increase by 2 if discovered delayed ack
*/
if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache)
cnt <<= 1;
tp->bytes_acked = 0;
tp->snd_cwnd_cnt += cnt;
while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
tp->snd_cwnd_cnt -= tp->snd_cwnd;
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
}
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
/*
* TCP Reno congestion control
* This is special case used for fallback as well.
*/
/* This is Jacobson's slow start and congestion avoidance.
* SIGCOMM '88, p. 328.
*/
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tcp_is_cwnd_limited(sk, in_flight))
return;
/* In "safe" area, increase. */
if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp);
/* In dangerous area, increase slowly. */
else if (sysctl_tcp_abc) {
/* RFC3465: Appropriate Byte Count
* increase once for each full cwnd acked
*/
if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
}
} else {
/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */
if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
tp->snd_cwnd_cnt = 0;
} else
tp->snd_cwnd_cnt++;
}
}
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
/* Slow start threshold is half the congestion window (min 2) */
u32 tcp_reno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return max(tp->snd_cwnd >> 1U, 2U);
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
/* Lower bound on congestion window with halving. */
u32 tcp_reno_min_cwnd(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return tp->snd_ssthresh/2;
}
EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
struct tcp_congestion_ops tcp_reno = {
.flags = TCP_CONG_NON_RESTRICTED,
.name = "reno",
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
.min_cwnd = tcp_reno_min_cwnd,
};
/* Initial congestion control used (until SYN)
* really reno under another name so we can tell difference
* during tcp_set_default_congestion_control
*/
struct tcp_congestion_ops tcp_init_congestion_ops = {
.name = "",
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
.min_cwnd = tcp_reno_min_cwnd,
};
EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
| gpl-2.0 |
AndroidDeveloperAlliance/kernel_samsung_smdk4210 | drivers/usb/core/devio.c | 24 | 53381 | /*****************************************************************************/
/*
* devio.c -- User space communication with USB devices.
*
* Copyright (C) 1999-2000 Thomas Sailer (sailer@ife.ee.ethz.ch)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* This file implements the usbfs/x/y files, where
* x is the bus number and y the device number.
*
* It allows user space programs/"drivers" to communicate directly
* with USB devices without intervening kernel driver.
*
* Revision history
* 22.12.1999 0.1 Initial release (split from proc_usb.c)
* 04.01.2000 0.2 Turned into its own filesystem
* 30.09.2005 0.3 Fix user-triggerable oops in async URB delivery
* (CAN-2005-3055)
*/
/*****************************************************************************/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/signal.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
#include <linux/cdev.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
#include <linux/moduleparam.h>
#include "usb.h"
#define USB_MAXBUS 64
#define USB_DEVICE_MAX USB_MAXBUS * 128
/* Mutual exclusion for removal, open, and release */
DEFINE_MUTEX(usbfs_mutex);
struct dev_state {
struct list_head list; /* state list */
struct usb_device *dev;
struct file *file;
spinlock_t lock; /* protects the async urb lists */
struct list_head async_pending;
struct list_head async_completed;
wait_queue_head_t wait; /* wake up if a request completed */
unsigned int discsignr;
struct pid *disc_pid;
uid_t disc_uid, disc_euid;
void __user *disccontext;
unsigned long ifclaimed;
u32 secid;
u32 disabled_bulk_eps;
};
struct async {
struct list_head asynclist;
struct dev_state *ps;
struct pid *pid;
uid_t uid, euid;
unsigned int signr;
unsigned int ifnum;
void __user *userbuffer;
void __user *userurb;
struct urb *urb;
int status;
u32 secid;
u8 bulk_addr;
u8 bulk_status;
};
static int usbfs_snoop;
module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic");
#define snoop(dev, format, arg...) \
do { \
if (usbfs_snoop) \
dev_info(dev , format , ## arg); \
} while (0)
enum snoop_when {
SUBMIT, COMPLETE
};
#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
#define MAX_USBFS_BUFFER_SIZE 16384
static int connected(struct dev_state *ps)
{
return (!list_empty(&ps->list) &&
ps->dev->state != USB_STATE_NOTATTACHED);
}
static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
file->f_pos = offset;
ret = file->f_pos;
break;
case 1:
file->f_pos += offset;
ret = file->f_pos;
break;
case 2:
default:
ret = -EINVAL;
}
mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
struct dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
ssize_t ret = 0;
unsigned len;
loff_t pos;
int i;
pos = *ppos;
usb_lock_device(dev);
if (!connected(ps)) {
ret = -ENODEV;
goto err;
} else if (pos < 0) {
ret = -EINVAL;
goto err;
}
if (pos < sizeof(struct usb_device_descriptor)) {
/* 18 bytes - fits on the stack */
struct usb_device_descriptor temp_desc;
memcpy(&temp_desc, &dev->descriptor, sizeof(dev->descriptor));
le16_to_cpus(&temp_desc.bcdUSB);
le16_to_cpus(&temp_desc.idVendor);
le16_to_cpus(&temp_desc.idProduct);
le16_to_cpus(&temp_desc.bcdDevice);
len = sizeof(struct usb_device_descriptor) - pos;
if (len > nbytes)
len = nbytes;
if (copy_to_user(buf, ((char *)&temp_desc) + pos, len)) {
ret = -EFAULT;
goto err;
}
*ppos += len;
buf += len;
nbytes -= len;
ret += len;
}
pos = sizeof(struct usb_device_descriptor);
for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
struct usb_config_descriptor *config =
(struct usb_config_descriptor *)dev->rawdescriptors[i];
unsigned int length = le16_to_cpu(config->wTotalLength);
if (*ppos < pos + length) {
/* The descriptor may claim to be longer than it
* really is. Here is the actual allocated length. */
unsigned alloclen =
le16_to_cpu(dev->config[i].desc.wTotalLength);
len = length - (*ppos - pos);
if (len > nbytes)
len = nbytes;
/* Simply don't write (skip over) unallocated parts */
if (alloclen > (*ppos - pos)) {
alloclen -= (*ppos - pos);
if (copy_to_user(buf,
dev->rawdescriptors[i] + (*ppos - pos),
min(len, alloclen))) {
ret = -EFAULT;
goto err;
}
}
*ppos += len;
buf += len;
nbytes -= len;
ret += len;
}
pos += length;
}
err:
usb_unlock_device(dev);
return ret;
}
/*
* async list handling
*/
static struct async *alloc_async(unsigned int numisoframes)
{
struct async *as;
as = kzalloc(sizeof(struct async), GFP_KERNEL);
if (!as)
return NULL;
as->urb = usb_alloc_urb(numisoframes, GFP_KERNEL);
if (!as->urb) {
kfree(as);
return NULL;
}
return as;
}
static void free_async(struct async *as)
{
put_pid(as->pid);
kfree(as->urb->transfer_buffer);
kfree(as->urb->setup_packet);
usb_free_urb(as->urb);
kfree(as);
}
static void async_newpending(struct async *as)
{
struct dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
list_add_tail(&as->asynclist, &ps->async_pending);
spin_unlock_irqrestore(&ps->lock, flags);
}
static void async_removepending(struct async *as)
{
struct dev_state *ps = as->ps;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
list_del_init(&as->asynclist);
spin_unlock_irqrestore(&ps->lock, flags);
}
static struct async *async_getcompleted(struct dev_state *ps)
{
unsigned long flags;
struct async *as = NULL;
spin_lock_irqsave(&ps->lock, flags);
if (!list_empty(&ps->async_completed)) {
as = list_entry(ps->async_completed.next, struct async,
asynclist);
list_del_init(&as->asynclist);
}
spin_unlock_irqrestore(&ps->lock, flags);
return as;
}
static struct async *async_getpending(struct dev_state *ps,
void __user *userurb)
{
struct async *as;
list_for_each_entry(as, &ps->async_pending, asynclist)
if (as->userurb == userurb) {
list_del_init(&as->asynclist);
return as;
}
return NULL;
}
static void snoop_urb(struct usb_device *udev,
void __user *userurb, int pipe, unsigned length,
int timeout_or_status, enum snoop_when when,
unsigned char *data, unsigned data_len)
{
static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
static const char *dirs[] = {"out", "in"};
int ep;
const char *t, *d;
if (!usbfs_snoop)
return;
ep = usb_pipeendpoint(pipe);
t = types[usb_pipetype(pipe)];
d = dirs[!!usb_pipein(pipe)];
if (userurb) { /* Async */
if (when == SUBMIT)
dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
"length %u\n",
userurb, ep, t, d, length);
else
dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
"actual_length %u status %d\n",
userurb, ep, t, d, length,
timeout_or_status);
} else {
if (when == SUBMIT)
dev_info(&udev->dev, "ep%d %s-%s, length %u, "
"timeout %d\n",
ep, t, d, length, timeout_or_status);
else
dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, "
"status %d\n",
ep, t, d, length, timeout_or_status);
}
if (data && data_len > 0) {
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
data, data_len, 1);
}
}
#define AS_CONTINUATION 1
#define AS_UNLINK 2
static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr)
__releases(ps->lock)
__acquires(ps->lock)
{
struct urb *urb;
struct async *as;
/* Mark all the pending URBs that match bulk_addr, up to but not
* including the first one without AS_CONTINUATION. If such an
* URB is encountered then a new transfer has already started so
* the endpoint doesn't need to be disabled; otherwise it does.
*/
list_for_each_entry(as, &ps->async_pending, asynclist) {
if (as->bulk_addr == bulk_addr) {
if (as->bulk_status != AS_CONTINUATION)
goto rescan;
as->bulk_status = AS_UNLINK;
as->bulk_addr = 0;
}
}
ps->disabled_bulk_eps |= (1 << bulk_addr);
/* Now carefully unlink all the marked pending URBs */
rescan:
list_for_each_entry(as, &ps->async_pending, asynclist) {
if (as->bulk_status == AS_UNLINK) {
as->bulk_status = 0; /* Only once */
urb = as->urb;
usb_get_urb(urb);
spin_unlock(&ps->lock); /* Allow completions */
usb_unlink_urb(urb);
usb_put_urb(urb);
spin_lock(&ps->lock);
goto rescan;
}
}
}
static void async_completed(struct urb *urb)
{
struct async *as = urb->context;
struct dev_state *ps = as->ps;
struct siginfo sinfo;
struct pid *pid = NULL;
uid_t uid = 0;
uid_t euid = 0;
u32 secid = 0;
int signr;
spin_lock(&ps->lock);
list_move_tail(&as->asynclist, &ps->async_completed);
as->status = urb->status;
signr = as->signr;
if (signr) {
sinfo.si_signo = as->signr;
sinfo.si_errno = as->status;
sinfo.si_code = SI_ASYNCIO;
sinfo.si_addr = as->userurb;
pid = get_pid(as->pid);
uid = as->uid;
euid = as->euid;
secid = as->secid;
}
snoop(&urb->dev->dev, "urb complete\n");
snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
as->status, COMPLETE,
((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_OUT) ?
NULL : urb->transfer_buffer, urb->actual_length);
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
spin_unlock(&ps->lock);
if (signr) {
kill_pid_info_as_uid(sinfo.si_signo, &sinfo, pid, uid,
euid, secid);
put_pid(pid);
}
wake_up(&ps->wait);
}
static void destroy_async(struct dev_state *ps, struct list_head *list)
{
struct urb *urb;
struct async *as;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
while (!list_empty(list)) {
as = list_entry(list->next, struct async, asynclist);
list_del_init(&as->asynclist);
urb = as->urb;
usb_get_urb(urb);
/* drop the spinlock so the completion handler can run */
spin_unlock_irqrestore(&ps->lock, flags);
usb_kill_urb(urb);
usb_put_urb(urb);
spin_lock_irqsave(&ps->lock, flags);
}
spin_unlock_irqrestore(&ps->lock, flags);
}
static void destroy_async_on_interface(struct dev_state *ps,
unsigned int ifnum)
{
struct list_head *p, *q, hitlist;
unsigned long flags;
INIT_LIST_HEAD(&hitlist);
spin_lock_irqsave(&ps->lock, flags);
list_for_each_safe(p, q, &ps->async_pending)
if (ifnum == list_entry(p, struct async, asynclist)->ifnum)
list_move_tail(p, &hitlist);
spin_unlock_irqrestore(&ps->lock, flags);
destroy_async(ps, &hitlist);
}
static void destroy_all_async(struct dev_state *ps)
{
destroy_async(ps, &ps->async_pending);
}
/*
* interface claims are made only at the request of user level code,
* which can also release them (explicitly or by closing files).
* they're also undone when devices disconnect.
*/
static int driver_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return -ENODEV;
}
static void driver_disconnect(struct usb_interface *intf)
{
struct dev_state *ps = usb_get_intfdata(intf);
unsigned int ifnum = intf->altsetting->desc.bInterfaceNumber;
if (!ps)
return;
/* NOTE: this relies on usbcore having canceled and completed
* all pending I/O requests; 2.6 does that.
*/
if (likely(ifnum < 8*sizeof(ps->ifclaimed)))
clear_bit(ifnum, &ps->ifclaimed);
else
dev_warn(&intf->dev, "interface number %u out of range\n",
ifnum);
usb_set_intfdata(intf, NULL);
/* force async requests to complete */
destroy_async_on_interface(ps, ifnum);
}
/* The following routines are merely placeholders. There is no way
* to inform a user task about suspend or resumes.
*/
static int driver_suspend(struct usb_interface *intf, pm_message_t msg)
{
return 0;
}
static int driver_resume(struct usb_interface *intf)
{
return 0;
}
struct usb_driver usbfs_driver = {
.name = "usbfs",
.probe = driver_probe,
.disconnect = driver_disconnect,
.suspend = driver_suspend,
.resume = driver_resume,
};
static int claimintf(struct dev_state *ps, unsigned int ifnum)
{
struct usb_device *dev = ps->dev;
struct usb_interface *intf;
int err;
if (ifnum >= 8*sizeof(ps->ifclaimed))
return -EINVAL;
/* already claimed */
if (test_bit(ifnum, &ps->ifclaimed))
return 0;
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
else
err = usb_driver_claim_interface(&usbfs_driver, intf, ps);
if (err == 0)
set_bit(ifnum, &ps->ifclaimed);
return err;
}
static int releaseintf(struct dev_state *ps, unsigned int ifnum)
{
struct usb_device *dev;
struct usb_interface *intf;
int err;
err = -EINVAL;
if (ifnum >= 8*sizeof(ps->ifclaimed))
return err;
dev = ps->dev;
intf = usb_ifnum_to_if(dev, ifnum);
if (!intf)
err = -ENOENT;
else if (test_and_clear_bit(ifnum, &ps->ifclaimed)) {
usb_driver_release_interface(&usbfs_driver, intf);
err = 0;
}
return err;
}
static int checkintf(struct dev_state *ps, unsigned int ifnum)
{
if (ps->dev->state != USB_STATE_CONFIGURED)
return -EHOSTUNREACH;
if (ifnum >= 8*sizeof(ps->ifclaimed))
return -EINVAL;
if (test_bit(ifnum, &ps->ifclaimed))
return 0;
/* if not yet claimed, claim it for the driver */
dev_warn(&ps->dev->dev, "usbfs: process %d (%s) did not claim "
"interface %u before use\n", task_pid_nr(current),
current->comm, ifnum);
return claimintf(ps, ifnum);
}
static int findintfep(struct usb_device *dev, unsigned int ep)
{
unsigned int i, j, e;
struct usb_interface *intf;
struct usb_host_interface *alts;
struct usb_endpoint_descriptor *endpt;
if (ep & ~(USB_DIR_IN|0xf))
return -EINVAL;
if (!dev->actconfig)
return -ESRCH;
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
intf = dev->actconfig->interface[i];
for (j = 0; j < intf->num_altsetting; j++) {
alts = &intf->altsetting[j];
for (e = 0; e < alts->desc.bNumEndpoints; e++) {
endpt = &alts->endpoint[e].desc;
if (endpt->bEndpointAddress == ep)
return alts->desc.bInterfaceNumber;
}
}
}
return -ENOENT;
}
static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
unsigned int request, unsigned int index)
{
int ret = 0;
struct usb_host_interface *alt_setting;
if (ps->dev->state != USB_STATE_UNAUTHENTICATED
&& ps->dev->state != USB_STATE_ADDRESS
&& ps->dev->state != USB_STATE_CONFIGURED)
return -EHOSTUNREACH;
if (USB_TYPE_VENDOR == (USB_TYPE_MASK & requesttype))
return 0;
/*
* check for the special corner case 'get_device_id' in the printer
* class specification, where wIndex is (interface << 8 | altsetting)
* instead of just interface
*/
if (requesttype == 0xa1 && request == 0) {
alt_setting = usb_find_alt_setting(ps->dev->actconfig,
index >> 8, index & 0xff);
if (alt_setting
&& alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER)
index >>= 8;
}
index &= 0xff;
switch (requesttype & USB_RECIP_MASK) {
case USB_RECIP_ENDPOINT:
ret = findintfep(ps->dev, index);
if (ret >= 0)
ret = checkintf(ps, ret);
break;
case USB_RECIP_INTERFACE:
ret = checkintf(ps, index);
break;
}
return ret;
}
static int match_devt(struct device *dev, void *data)
{
return dev->devt == (dev_t) (unsigned long) data;
}
static struct usb_device *usbdev_lookup_by_devt(dev_t devt)
{
struct device *dev;
dev = bus_find_device(&usb_bus_type, NULL,
(void *) (unsigned long) devt, match_devt);
if (!dev)
return NULL;
return container_of(dev, struct usb_device, dev);
}
/*
* file operations
*/
static int usbdev_open(struct inode *inode, struct file *file)
{
struct usb_device *dev = NULL;
struct dev_state *ps;
const struct cred *cred = current_cred();
int ret;
ret = -ENOMEM;
ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL);
if (!ps)
goto out_free_ps;
ret = -ENODEV;
/* Protect against simultaneous removal or release */
mutex_lock(&usbfs_mutex);
/* usbdev device-node */
if (imajor(inode) == USB_DEVICE_MAJOR)
dev = usbdev_lookup_by_devt(inode->i_rdev);
#ifdef CONFIG_USB_DEVICEFS
/* procfs file */
if (!dev) {
dev = inode->i_private;
if (dev && dev->usbfs_dentry &&
dev->usbfs_dentry->d_inode == inode)
usb_get_dev(dev);
else
dev = NULL;
}
#endif
mutex_unlock(&usbfs_mutex);
if (!dev)
goto out_free_ps;
usb_lock_device(dev);
if (dev->state == USB_STATE_NOTATTACHED)
goto out_unlock_device;
#if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB)
pr_debug("mif: modem usbdev_open, skip usb_autoresume_device\n");
ret = 0;
#else
ret = usb_autoresume_device(dev);
if (ret)
goto out_unlock_device;
#endif
ps->dev = dev;
ps->file = file;
spin_lock_init(&ps->lock);
INIT_LIST_HEAD(&ps->list);
INIT_LIST_HEAD(&ps->async_pending);
INIT_LIST_HEAD(&ps->async_completed);
init_waitqueue_head(&ps->wait);
ps->discsignr = 0;
ps->disc_pid = get_pid(task_pid(current));
ps->disc_uid = cred->uid;
ps->disc_euid = cred->euid;
ps->disccontext = NULL;
ps->ifclaimed = 0;
security_task_getsecid(current, &ps->secid);
smp_wmb();
list_add_tail(&ps->list, &dev->filelist);
file->private_data = ps;
usb_unlock_device(dev);
snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
current->comm);
return ret;
out_unlock_device:
usb_unlock_device(dev);
usb_put_dev(dev);
out_free_ps:
kfree(ps);
return ret;
}
static int usbdev_release(struct inode *inode, struct file *file)
{
struct dev_state *ps = file->private_data;
struct usb_device *dev = ps->dev;
unsigned int ifnum;
struct async *as;
usb_lock_device(dev);
usb_hub_release_all_ports(dev, ps);
list_del_init(&ps->list);
for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed);
ifnum++) {
if (test_bit(ifnum, &ps->ifclaimed))
releaseintf(ps, ifnum);
}
destroy_all_async(ps);
#if defined(CONFIG_LINK_DEVICE_HSIC) || defined(CONFIG_LINK_DEVICE_USB)
pr_debug("mif: modem usbdev_open, skip usb_autosuspend_device\n");
#else
usb_autosuspend_device(dev);
#endif
usb_unlock_device(dev);
usb_put_dev(dev);
put_pid(ps->disc_pid);
as = async_getcompleted(ps);
while (as) {
free_async(as);
as = async_getcompleted(ps);
}
kfree(ps);
return 0;
}
static int proc_control(struct dev_state *ps, void __user *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_ctrltransfer ctrl;
unsigned int tmo;
unsigned char *tbuf;
unsigned wLength;
int i, pipe, ret;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
ret = check_ctrlrecip(ps, ctrl.bRequestType, ctrl.bRequest,
ctrl.wIndex);
if (ret)
return ret;
wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */
if (wLength > PAGE_SIZE)
return -EINVAL;
tbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
if (!tbuf)
return -ENOMEM;
tmo = ctrl.timeout;
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
ctrl.bRequestType, ctrl.bRequest,
__le16_to_cpup(&ctrl.wValue),
__le16_to_cpup(&ctrl.wIndex),
__le16_to_cpup(&ctrl.wLength));
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
free_page((unsigned long)tbuf);
return -EINVAL;
}
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_control_msg(dev, pipe, ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
tbuf, max(i, 0));
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
free_page((unsigned long)tbuf);
return -EFAULT;
}
}
} else {
if (ctrl.wLength) {
if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) {
free_page((unsigned long)tbuf);
return -EFAULT;
}
}
pipe = usb_sndctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT,
tbuf, ctrl.wLength);
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL "
"failed cmd %s rqt %u rq %u len %u ret %d\n",
current->comm, ctrl.bRequestType, ctrl.bRequest,
ctrl.wLength, i);
}
return i;
}
static int proc_bulk(struct dev_state *ps, void __user *arg)
{
struct usb_device *dev = ps->dev;
struct usbdevfs_bulktransfer bulk;
unsigned int tmo, len1, pipe;
int len2;
unsigned char *tbuf;
int i, ret;
if (copy_from_user(&bulk, arg, sizeof(bulk)))
return -EFAULT;
ret = findintfep(ps->dev, bulk.ep);
if (ret < 0)
return ret;
ret = checkintf(ps, ret);
if (ret)
return ret;
if (bulk.ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(dev, bulk.ep & 0x7f);
else
pipe = usb_sndbulkpipe(dev, bulk.ep & 0x7f);
if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
return -EINVAL;
len1 = bulk.len;
if (len1 > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
if (!(tbuf = kmalloc(len1, GFP_KERNEL)))
return -ENOMEM;
tmo = bulk.timeout;
if (bulk.ep & 0x80) {
if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) {
kfree(tbuf);
return -EINVAL;
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2);
if (!i && len2) {
if (copy_to_user(bulk.data, tbuf, len2)) {
kfree(tbuf);
return -EFAULT;
}
}
} else {
if (len1) {
if (copy_from_user(tbuf, bulk.data, len1)) {
kfree(tbuf);
return -EFAULT;
}
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
kfree(tbuf);
if (i < 0)
return i;
return len2;
}
static int proc_resetep(struct dev_state *ps, void __user *arg)
{
unsigned int ep;
int ret;
if (get_user(ep, (unsigned int __user *)arg))
return -EFAULT;
ret = findintfep(ps->dev, ep);
if (ret < 0)
return ret;
ret = checkintf(ps, ret);
if (ret)
return ret;
usb_reset_endpoint(ps->dev, ep);
return 0;
}
static int proc_clearhalt(struct dev_state *ps, void __user *arg)
{
unsigned int ep;
int pipe;
int ret;
if (get_user(ep, (unsigned int __user *)arg))
return -EFAULT;
ret = findintfep(ps->dev, ep);
if (ret < 0)
return ret;
ret = checkintf(ps, ret);
if (ret)
return ret;
if (ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(ps->dev, ep & 0x7f);
else
pipe = usb_sndbulkpipe(ps->dev, ep & 0x7f);
return usb_clear_halt(ps->dev, pipe);
}
static int proc_getdriver(struct dev_state *ps, void __user *arg)
{
struct usbdevfs_getdriver gd;
struct usb_interface *intf;
int ret;
if (copy_from_user(&gd, arg, sizeof(gd)))
return -EFAULT;
intf = usb_ifnum_to_if(ps->dev, gd.interface);
if (!intf || !intf->dev.driver)
ret = -ENODATA;
else {
strncpy(gd.driver, intf->dev.driver->name,
sizeof(gd.driver));
ret = (copy_to_user(arg, &gd, sizeof(gd)) ? -EFAULT : 0);
}
return ret;
}
static int proc_connectinfo(struct dev_state *ps, void __user *arg)
{
struct usbdevfs_connectinfo ci = {
.devnum = ps->dev->devnum,
.slow = ps->dev->speed == USB_SPEED_LOW
};
if (copy_to_user(arg, &ci, sizeof(ci)))
return -EFAULT;
return 0;
}
static int proc_resetdevice(struct dev_state *ps)
{
return usb_reset_device(ps->dev);
}
static int proc_setintf(struct dev_state *ps, void __user *arg)
{
struct usbdevfs_setinterface setintf;
int ret;
if (copy_from_user(&setintf, arg, sizeof(setintf)))
return -EFAULT;
if ((ret = checkintf(ps, setintf.interface)))
return ret;
return usb_set_interface(ps->dev, setintf.interface,
setintf.altsetting);
}
static int proc_setconfig(struct dev_state *ps, void __user *arg)
{
int u;
int status = 0;
struct usb_host_config *actconfig;
if (get_user(u, (int __user *)arg))
return -EFAULT;
actconfig = ps->dev->actconfig;
/* Don't touch the device if any interfaces are claimed.
* It could interfere with other drivers' operations, and if
* an interface is claimed by usbfs it could easily deadlock.
*/
if (actconfig) {
int i;
for (i = 0; i < actconfig->desc.bNumInterfaces; ++i) {
if (usb_interface_claimed(actconfig->interface[i])) {
dev_warn(&ps->dev->dev,
"usbfs: interface %d claimed by %s "
"while '%s' sets config #%d\n",
actconfig->interface[i]
->cur_altsetting
->desc.bInterfaceNumber,
actconfig->interface[i]
->dev.driver->name,
current->comm, u);
status = -EBUSY;
break;
}
}
}
/* SET_CONFIGURATION is often abused as a "cheap" driver reset,
* so avoid usb_set_configuration()'s kick to sysfs
*/
if (status == 0) {
if (actconfig && actconfig->desc.bConfigurationValue == u)
status = usb_reset_configuration(ps->dev);
else
status = usb_set_configuration(ps->dev, u);
}
return status;
}
static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
struct usbdevfs_iso_packet_desc __user *iso_frame_desc,
void __user *arg)
{
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_host_endpoint *ep;
struct async *as;
struct usb_ctrlrequest *dr = NULL;
const struct cred *cred = current_cred();
unsigned int u, totlen, isofrmlen;
int ret, ifnum = -1;
int is_in;
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
USBDEVFS_URB_ZERO_PACKET |
USBDEVFS_URB_NO_INTERRUPT))
return -EINVAL;
if (uurb->buffer_length > 0 && !uurb->buffer)
return -EINVAL;
if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
(uurb->endpoint & ~USB_ENDPOINT_DIR_MASK) == 0)) {
ifnum = findintfep(ps->dev, uurb->endpoint);
if (ifnum < 0)
return ifnum;
ret = checkintf(ps, ifnum);
if (ret)
return ret;
}
if ((uurb->endpoint & USB_ENDPOINT_DIR_MASK) != 0) {
is_in = 1;
ep = ps->dev->ep_in[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
} else {
is_in = 0;
ep = ps->dev->ep_out[uurb->endpoint & USB_ENDPOINT_NUMBER_MASK];
}
if (!ep)
return -ENOENT;
switch(uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
/* min 8 byte setup packet,
* max 8 byte setup plus an arbitrary data stage */
if (uurb->buffer_length < 8 ||
uurb->buffer_length > (8 + MAX_USBFS_BUFFER_SIZE))
return -EINVAL;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!dr)
return -ENOMEM;
if (copy_from_user(dr, uurb->buffer, 8)) {
kfree(dr);
return -EFAULT;
}
if (uurb->buffer_length < (le16_to_cpup(&dr->wLength) + 8)) {
kfree(dr);
return -EINVAL;
}
ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest,
le16_to_cpup(&dr->wIndex));
if (ret) {
kfree(dr);
return ret;
}
uurb->number_of_packets = 0;
uurb->buffer_length = le16_to_cpup(&dr->wLength);
uurb->buffer += 8;
if ((dr->bRequestType & USB_DIR_IN) && uurb->buffer_length) {
is_in = 1;
uurb->endpoint |= USB_DIR_IN;
} else {
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
dr->bRequestType, dr->bRequest,
__le16_to_cpup(&dr->wValue),
__le16_to_cpup(&dr->wIndex),
__le16_to_cpup(&dr->wLength));
break;
case USBDEVFS_URB_TYPE_BULK:
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
return -EINVAL;
case USB_ENDPOINT_XFER_INT:
/* allow single-shot interrupt transfers */
uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
goto interrupt_urb;
}
uurb->number_of_packets = 0;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
uurb->number_of_packets = 0;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
break;
case USBDEVFS_URB_TYPE_ISO:
/* arbitrary limit */
if (uurb->number_of_packets < 1 ||
uurb->number_of_packets > 128)
return -EINVAL;
if (!usb_endpoint_xfer_isoc(&ep->desc))
return -EINVAL;
isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
uurb->number_of_packets;
if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
kfree(isopkt);
return -EFAULT;
}
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
/* arbitrary limit,
* sufficient for USB 2.0 high-bandwidth iso */
if (isopkt[u].length > 8192) {
kfree(isopkt);
return -EINVAL;
}
totlen += isopkt[u].length;
}
/* 3072 * 64 microframes */
if (totlen > 196608) {
kfree(isopkt);
return -EINVAL;
}
uurb->buffer_length = totlen;
break;
default:
return -EINVAL;
}
if (uurb->buffer_length > 0 &&
!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
uurb->buffer, uurb->buffer_length)) {
kfree(isopkt);
kfree(dr);
return -EFAULT;
}
as = alloc_async(uurb->number_of_packets);
if (!as) {
kfree(isopkt);
kfree(dr);
return -ENOMEM;
}
if (uurb->buffer_length > 0) {
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
GFP_KERNEL);
if (!as->urb->transfer_buffer) {
kfree(isopkt);
kfree(dr);
free_async(as);
return -ENOMEM;
}
/* Isochronous input data may end up being discontiguous
* if some of the packets are short. Clear the buffer so
* that the gaps don't leak kernel data to userspace.
*/
if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
memset(as->urb->transfer_buffer, 0,
uurb->buffer_length);
}
as->urb->dev = ps->dev;
as->urb->pipe = (uurb->type << 30) |
__create_pipe(ps->dev, uurb->endpoint & 0xf) |
(uurb->endpoint & USB_DIR_IN);
/* This tedious sequence is necessary because the URB_* flags
* are internal to the kernel and subject to change, whereas
* the USBDEVFS_URB_* flags are a user API and must not be changed.
*/
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
u |= URB_ISO_ASAP;
if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
u |= URB_SHORT_NOT_OK;
if (uurb->flags & USBDEVFS_URB_NO_FSBR)
u |= URB_NO_FSBR;
if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
u |= URB_NO_INTERRUPT;
as->urb->transfer_flags = u;
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = uurb->number_of_packets;
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
ps->dev->speed == USB_SPEED_HIGH)
as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
else
as->urb->interval = ep->desc.bInterval;
as->urb->context = as;
as->urb->complete = async_completed;
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
as->urb->iso_frame_desc[u].offset = totlen;
as->urb->iso_frame_desc[u].length = isopkt[u].length;
totlen += isopkt[u].length;
}
kfree(isopkt);
as->ps = ps;
as->userurb = arg;
if (is_in && uurb->buffer_length > 0)
as->userbuffer = uurb->buffer;
else
as->userbuffer = NULL;
as->signr = uurb->signr;
as->ifnum = ifnum;
as->pid = get_pid(task_pid(current));
as->uid = cred->uid;
as->euid = cred->euid;
security_task_getsecid(current, &as->secid);
if (!is_in && uurb->buffer_length > 0) {
if (copy_from_user(as->urb->transfer_buffer, uurb->buffer,
uurb->buffer_length)) {
free_async(as);
return -EFAULT;
}
}
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
as->urb->transfer_buffer_length, 0, SUBMIT,
is_in ? NULL : as->urb->transfer_buffer,
uurb->buffer_length);
async_newpending(as);
if (usb_endpoint_xfer_bulk(&ep->desc)) {
spin_lock_irq(&ps->lock);
/* Not exactly the endpoint address; the direction bit is
* shifted to the 0x10 position so that the value will be
* between 0 and 31.
*/
as->bulk_addr = usb_endpoint_num(&ep->desc) |
((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK)
>> 3);
/* If this bulk URB is the start of a new transfer, re-enable
* the endpoint. Otherwise mark it as a continuation URB.
*/
if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION)
as->bulk_status = AS_CONTINUATION;
else
ps->disabled_bulk_eps &= ~(1 << as->bulk_addr);
/* Don't accept continuation URBs if the endpoint is
* disabled because of an earlier error.
*/
if (ps->disabled_bulk_eps & (1 << as->bulk_addr))
ret = -EREMOTEIO;
else
ret = usb_submit_urb(as->urb, GFP_ATOMIC);
spin_unlock_irq(&ps->lock);
} else {
ret = usb_submit_urb(as->urb, GFP_KERNEL);
}
if (ret) {
dev_printk(KERN_DEBUG, &ps->dev->dev,
"usbfs: usb_submit_urb returned %d\n", ret);
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
0, ret, COMPLETE, NULL, 0);
async_removepending(as);
free_async(as);
return ret;
}
return 0;
}
static int proc_submiturb(struct dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
if (copy_from_user(&uurb, arg, sizeof(uurb)))
return -EFAULT;
return proc_do_submiturb(ps, &uurb,
(((struct usbdevfs_urb __user *)arg)->iso_frame_desc),
arg);
}
static int proc_unlinkurb(struct dev_state *ps, void __user *arg)
{
struct urb *urb;
struct async *as;
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
as = async_getpending(ps, arg);
if (!as) {
spin_unlock_irqrestore(&ps->lock, flags);
return -EINVAL;
}
urb = as->urb;
usb_get_urb(urb);
spin_unlock_irqrestore(&ps->lock, flags);
usb_kill_urb(urb);
usb_put_urb(urb);
return 0;
}
static int processcompl(struct async *as, void __user * __user *arg)
{
struct urb *urb = as->urb;
struct usbdevfs_urb __user *userurb = as->userurb;
void __user *addr = as->userurb;
unsigned int i;
if (as->userbuffer && urb->actual_length) {
if (urb->number_of_packets > 0) /* Isochronous */
i = urb->transfer_buffer_length;
else /* Non-Isoc */
i = urb->actual_length;
if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
goto err_out;
}
if (put_user(as->status, &userurb->status))
goto err_out;
if (put_user(urb->actual_length, &userurb->actual_length))
goto err_out;
if (put_user(urb->error_count, &userurb->error_count))
goto err_out;
if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
for (i = 0; i < urb->number_of_packets; i++) {
if (put_user(urb->iso_frame_desc[i].actual_length,
&userurb->iso_frame_desc[i].actual_length))
goto err_out;
if (put_user(urb->iso_frame_desc[i].status,
&userurb->iso_frame_desc[i].status))
goto err_out;
}
}
if (put_user(addr, (void __user * __user *)arg))
return -EFAULT;
return 0;
err_out:
return -EFAULT;
}
static struct async *reap_as(struct dev_state *ps)
{
DECLARE_WAITQUEUE(wait, current);
struct async *as = NULL;
struct usb_device *dev = ps->dev;
add_wait_queue(&ps->wait, &wait);
for (;;) {
__set_current_state(TASK_INTERRUPTIBLE);
as = async_getcompleted(ps);
if (as)
break;
if (signal_pending(current))
break;
usb_unlock_device(dev);
schedule();
usb_lock_device(dev);
}
remove_wait_queue(&ps->wait, &wait);
set_current_state(TASK_RUNNING);
return as;
}
static int proc_reapurb(struct dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
if (as) {
int retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
return retval;
}
if (signal_pending(current))
return -EINTR;
return -EIO;
}
static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg)
{
int retval;
struct async *as;
as = async_getcompleted(ps);
retval = -EAGAIN;
if (as) {
retval = processcompl(as, (void __user * __user *)arg);
free_async(as);
}
return retval;
}
#ifdef CONFIG_COMPAT
static int proc_control_compat(struct dev_state *ps,
struct usbdevfs_ctrltransfer32 __user *p32)
{
struct usbdevfs_ctrltransfer __user *p;
__u32 udata;
p = compat_alloc_user_space(sizeof(*p));
if (copy_in_user(p, p32, (sizeof(*p32) - sizeof(compat_caddr_t))) ||
get_user(udata, &p32->data) ||
put_user(compat_ptr(udata), &p->data))
return -EFAULT;
return proc_control(ps, p);
}
static int proc_bulk_compat(struct dev_state *ps,
struct usbdevfs_bulktransfer32 __user *p32)
{
struct usbdevfs_bulktransfer __user *p;
compat_uint_t n;
compat_caddr_t addr;
p = compat_alloc_user_space(sizeof(*p));
if (get_user(n, &p32->ep) || put_user(n, &p->ep) ||
get_user(n, &p32->len) || put_user(n, &p->len) ||
get_user(n, &p32->timeout) || put_user(n, &p->timeout) ||
get_user(addr, &p32->data) || put_user(compat_ptr(addr), &p->data))
return -EFAULT;
return proc_bulk(ps, p);
}
static int proc_disconnectsignal_compat(struct dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnectsignal32 ds;
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
ps->discsignr = ds.signr;
ps->disccontext = compat_ptr(ds.context);
return 0;
}
static int get_urb32(struct usbdevfs_urb *kurb,
struct usbdevfs_urb32 __user *uurb)
{
__u32 uptr;
if (!access_ok(VERIFY_READ, uurb, sizeof(*uurb)) ||
__get_user(kurb->type, &uurb->type) ||
__get_user(kurb->endpoint, &uurb->endpoint) ||
__get_user(kurb->status, &uurb->status) ||
__get_user(kurb->flags, &uurb->flags) ||
__get_user(kurb->buffer_length, &uurb->buffer_length) ||
__get_user(kurb->actual_length, &uurb->actual_length) ||
__get_user(kurb->start_frame, &uurb->start_frame) ||
__get_user(kurb->number_of_packets, &uurb->number_of_packets) ||
__get_user(kurb->error_count, &uurb->error_count) ||
__get_user(kurb->signr, &uurb->signr))
return -EFAULT;
if (__get_user(uptr, &uurb->buffer))
return -EFAULT;
kurb->buffer = compat_ptr(uptr);
if (__get_user(uptr, &uurb->usercontext))
return -EFAULT;
kurb->usercontext = compat_ptr(uptr);
return 0;
}
static int proc_submiturb_compat(struct dev_state *ps, void __user *arg)
{
struct usbdevfs_urb uurb;
if (get_urb32(&uurb, (struct usbdevfs_urb32 __user *)arg))
return -EFAULT;
return proc_do_submiturb(ps, &uurb,
((struct usbdevfs_urb32 __user *)arg)->iso_frame_desc,
arg);
}
static int processcompl_compat(struct async *as, void __user * __user *arg)
{
struct urb *urb = as->urb;
struct usbdevfs_urb32 __user *userurb = as->userurb;
void __user *addr = as->userurb;
unsigned int i;
if (as->userbuffer && urb->actual_length)
if (copy_to_user(as->userbuffer, urb->transfer_buffer,
urb->actual_length))
return -EFAULT;
if (put_user(as->status, &userurb->status))
return -EFAULT;
if (put_user(urb->actual_length, &userurb->actual_length))
return -EFAULT;
if (put_user(urb->error_count, &userurb->error_count))
return -EFAULT;
if (usb_endpoint_xfer_isoc(&urb->ep->desc)) {
for (i = 0; i < urb->number_of_packets; i++) {
if (put_user(urb->iso_frame_desc[i].actual_length,
&userurb->iso_frame_desc[i].actual_length))
return -EFAULT;
if (put_user(urb->iso_frame_desc[i].status,
&userurb->iso_frame_desc[i].status))
return -EFAULT;
}
}
if (put_user(ptr_to_compat(addr), (u32 __user *)arg))
return -EFAULT;
return 0;
}
static int proc_reapurb_compat(struct dev_state *ps, void __user *arg)
{
struct async *as = reap_as(ps);
if (as) {
int retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
return retval;
}
if (signal_pending(current))
return -EINTR;
return -EIO;
}
static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg)
{
int retval;
struct async *as;
retval = -EAGAIN;
as = async_getcompleted(ps);
if (as) {
retval = processcompl_compat(as, (void __user * __user *)arg);
free_async(as);
}
return retval;
}
#endif
static int proc_disconnectsignal(struct dev_state *ps, void __user *arg)
{
struct usbdevfs_disconnectsignal ds;
if (copy_from_user(&ds, arg, sizeof(ds)))
return -EFAULT;
ps->discsignr = ds.signr;
ps->disccontext = ds.context;
return 0;
}
static int proc_claiminterface(struct dev_state *ps, void __user *arg)
{
unsigned int ifnum;
if (get_user(ifnum, (unsigned int __user *)arg))
return -EFAULT;
return claimintf(ps, ifnum);
}
static int proc_releaseinterface(struct dev_state *ps, void __user *arg)
{
unsigned int ifnum;
int ret;
if (get_user(ifnum, (unsigned int __user *)arg))
return -EFAULT;
if ((ret = releaseintf(ps, ifnum)) < 0)
return ret;
destroy_async_on_interface (ps, ifnum);
return 0;
}
static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
{
int size;
void *buf = NULL;
int retval = 0;
struct usb_interface *intf = NULL;
struct usb_driver *driver = NULL;
/* alloc buffer */
if ((size = _IOC_SIZE(ctl->ioctl_code)) > 0) {
if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
return -ENOMEM;
if ((_IOC_DIR(ctl->ioctl_code) & _IOC_WRITE)) {
if (copy_from_user(buf, ctl->data, size)) {
kfree(buf);
return -EFAULT;
}
} else {
memset(buf, 0, size);
}
}
if (!connected(ps)) {
kfree(buf);
return -ENODEV;
}
if (ps->dev->state != USB_STATE_CONFIGURED)
retval = -EHOSTUNREACH;
else if (!(intf = usb_ifnum_to_if(ps->dev, ctl->ifno)))
retval = -EINVAL;
else switch (ctl->ioctl_code) {
/* disconnect kernel driver from interface */
case USBDEVFS_DISCONNECT:
if (intf->dev.driver) {
driver = to_usb_driver(intf->dev.driver);
dev_dbg(&intf->dev, "disconnect by usbfs\n");
usb_driver_release_interface(driver, intf);
} else
retval = -ENODATA;
break;
/* let kernel drivers try to (re)bind to the interface */
case USBDEVFS_CONNECT:
if (!intf->dev.driver)
retval = device_attach(&intf->dev);
else
retval = -EBUSY;
break;
/* talk directly to the interface's driver */
default:
if (intf->dev.driver)
driver = to_usb_driver(intf->dev.driver);
if (driver == NULL || driver->unlocked_ioctl == NULL) {
retval = -ENOTTY;
} else {
retval = driver->unlocked_ioctl(intf, ctl->ioctl_code, buf);
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
}
/* cleanup and return */
if (retval >= 0
&& (_IOC_DIR(ctl->ioctl_code) & _IOC_READ) != 0
&& size > 0
&& copy_to_user(ctl->data, buf, size) != 0)
retval = -EFAULT;
kfree(buf);
return retval;
}
static int proc_ioctl_default(struct dev_state *ps, void __user *arg)
{
struct usbdevfs_ioctl ctrl;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
return proc_ioctl(ps, &ctrl);
}
#ifdef CONFIG_COMPAT
static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
{
struct usbdevfs_ioctl32 __user *uioc;
struct usbdevfs_ioctl ctrl;
u32 udata;
uioc = compat_ptr((long)arg);
if (!access_ok(VERIFY_READ, uioc, sizeof(*uioc)) ||
__get_user(ctrl.ifno, &uioc->ifno) ||
__get_user(ctrl.ioctl_code, &uioc->ioctl_code) ||
__get_user(udata, &uioc->data))
return -EFAULT;
ctrl.data = compat_ptr(udata);
return proc_ioctl(ps, &ctrl);
}
#endif
static int proc_claim_port(struct dev_state *ps, void __user *arg)
{
unsigned portnum;
int rc;
if (get_user(portnum, (unsigned __user *) arg))
return -EFAULT;
rc = usb_hub_claim_port(ps->dev, portnum, ps);
if (rc == 0)
snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n",
portnum, task_pid_nr(current), current->comm);
return rc;
}
static int proc_release_port(struct dev_state *ps, void __user *arg)
{
unsigned portnum;
if (get_user(portnum, (unsigned __user *) arg))
return -EFAULT;
return usb_hub_release_port(ps->dev, portnum, ps);
}
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
* changing. But there's no mechanism to ensure that...
*/
static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
void __user *p)
{
struct dev_state *ps = file->private_data;
struct inode *inode = file->f_path.dentry->d_inode;
struct usb_device *dev = ps->dev;
int ret = -ENOTTY;
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
usb_lock_device(dev);
if (!connected(ps)) {
usb_unlock_device(dev);
return -ENODEV;
}
switch (cmd) {
case USBDEVFS_CONTROL:
snoop(&dev->dev, "%s: CONTROL\n", __func__);
ret = proc_control(ps, p);
if (ret >= 0)
inode->i_mtime = CURRENT_TIME;
break;
case USBDEVFS_BULK:
snoop(&dev->dev, "%s: BULK\n", __func__);
ret = proc_bulk(ps, p);
if (ret >= 0)
inode->i_mtime = CURRENT_TIME;
break;
case USBDEVFS_RESETEP:
snoop(&dev->dev, "%s: RESETEP\n", __func__);
ret = proc_resetep(ps, p);
if (ret >= 0)
inode->i_mtime = CURRENT_TIME;
break;
case USBDEVFS_RESET:
snoop(&dev->dev, "%s: RESET\n", __func__);
ret = proc_resetdevice(ps);
break;
case USBDEVFS_CLEAR_HALT:
snoop(&dev->dev, "%s: CLEAR_HALT\n", __func__);
ret = proc_clearhalt(ps, p);
if (ret >= 0)
inode->i_mtime = CURRENT_TIME;
break;
case USBDEVFS_GETDRIVER:
snoop(&dev->dev, "%s: GETDRIVER\n", __func__);
ret = proc_getdriver(ps, p);
break;
case USBDEVFS_CONNECTINFO:
snoop(&dev->dev, "%s: CONNECTINFO\n", __func__);
ret = proc_connectinfo(ps, p);
break;
case USBDEVFS_SETINTERFACE:
snoop(&dev->dev, "%s: SETINTERFACE\n", __func__);
ret = proc_setintf(ps, p);
break;
case USBDEVFS_SETCONFIGURATION:
snoop(&dev->dev, "%s: SETCONFIGURATION\n", __func__);
ret = proc_setconfig(ps, p);
break;
case USBDEVFS_SUBMITURB:
snoop(&dev->dev, "%s: SUBMITURB\n", __func__);
ret = proc_submiturb(ps, p);
if (ret >= 0)
inode->i_mtime = CURRENT_TIME;
break;
#ifdef CONFIG_COMPAT
case USBDEVFS_CONTROL32:
snoop(&dev->dev, "%s: CONTROL32\n", __func__);
ret = proc_control_compat(ps, p);
if (ret >= 0)
inode->i_mtime = CURRENT_TIME;
break;
case USBDEVFS_BULK32:
snoop(&dev->dev, "%s: BULK32\n", __func__);
ret = proc_bulk_compat(ps, p);
if (ret >= 0)
inode->i_mtime = CURRENT_TIME;
break;
case USBDEVFS_DISCSIGNAL32:
snoop(&dev->dev, "%s: DISCSIGNAL32\n", __func__);
ret = proc_disconnectsignal_compat(ps, p);
break;
case USBDEVFS_SUBMITURB32:
snoop(&dev->dev, "%s: SUBMITURB32\n", __func__);
ret = proc_submiturb_compat(ps, p);
if (ret >= 0)
inode->i_mtime = CURRENT_TIME;
break;
case USBDEVFS_REAPURB32:
snoop(&dev->dev, "%s: REAPURB32\n", __func__);
ret = proc_reapurb_compat(ps, p);
break;
case USBDEVFS_REAPURBNDELAY32:
snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
ret = proc_reapurbnonblock_compat(ps, p);
break;
case USBDEVFS_IOCTL32:
snoop(&dev->dev, "%s: IOCTL32\n", __func__);
ret = proc_ioctl_compat(ps, ptr_to_compat(p));
break;
#endif
case USBDEVFS_DISCARDURB:
snoop(&dev->dev, "%s: DISCARDURB\n", __func__);
ret = proc_unlinkurb(ps, p);
break;
case USBDEVFS_REAPURB:
snoop(&dev->dev, "%s: REAPURB\n", __func__);
ret = proc_reapurb(ps, p);
break;
case USBDEVFS_REAPURBNDELAY:
snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
ret = proc_reapurbnonblock(ps, p);
break;
case USBDEVFS_DISCSIGNAL:
snoop(&dev->dev, "%s: DISCSIGNAL\n", __func__);
ret = proc_disconnectsignal(ps, p);
break;
case USBDEVFS_CLAIMINTERFACE:
snoop(&dev->dev, "%s: CLAIMINTERFACE\n", __func__);
ret = proc_claiminterface(ps, p);
break;
case USBDEVFS_RELEASEINTERFACE:
snoop(&dev->dev, "%s: RELEASEINTERFACE\n", __func__);
ret = proc_releaseinterface(ps, p);
break;
case USBDEVFS_IOCTL:
snoop(&dev->dev, "%s: IOCTL\n", __func__);
ret = proc_ioctl_default(ps, p);
break;
case USBDEVFS_CLAIM_PORT:
snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__);
ret = proc_claim_port(ps, p);
break;
case USBDEVFS_RELEASE_PORT:
snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__);
ret = proc_release_port(ps, p);
break;
}
usb_unlock_device(dev);
if (ret >= 0)
inode->i_atime = CURRENT_TIME;
return ret;
}
static long usbdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
return ret;
}
#ifdef CONFIG_COMPAT
static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret;
ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
return ret;
}
#endif
/* No kernel lock - fine */
static unsigned int usbdev_poll(struct file *file,
struct poll_table_struct *wait)
{
struct dev_state *ps = file->private_data;
unsigned int mask = 0;
poll_wait(file, &ps->wait, wait);
if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed))
mask |= POLLOUT | POLLWRNORM;
if (!connected(ps))
mask |= POLLERR | POLLHUP;
return mask;
}
const struct file_operations usbdev_file_operations = {
.owner = THIS_MODULE,
.llseek = usbdev_lseek,
.read = usbdev_read,
.poll = usbdev_poll,
.unlocked_ioctl = usbdev_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = usbdev_compat_ioctl,
#endif
.open = usbdev_open,
.release = usbdev_release,
};
static void usbdev_remove(struct usb_device *udev)
{
struct dev_state *ps;
struct siginfo sinfo;
while (!list_empty(&udev->filelist)) {
ps = list_entry(udev->filelist.next, struct dev_state, list);
destroy_all_async(ps);
wake_up_all(&ps->wait);
list_del_init(&ps->list);
if (ps->discsignr) {
sinfo.si_signo = ps->discsignr;
sinfo.si_errno = EPIPE;
sinfo.si_code = SI_ASYNCIO;
sinfo.si_addr = ps->disccontext;
kill_pid_info_as_uid(ps->discsignr, &sinfo,
ps->disc_pid, ps->disc_uid,
ps->disc_euid, ps->secid);
}
}
}
#ifdef CONFIG_USB_DEVICE_CLASS
static struct class *usb_classdev_class;
static int usb_classdev_add(struct usb_device *dev)
{
struct device *cldev;
cldev = device_create(usb_classdev_class, &dev->dev, dev->dev.devt,
NULL, "usbdev%d.%d", dev->bus->busnum,
dev->devnum);
if (IS_ERR(cldev))
return PTR_ERR(cldev);
dev->usb_classdev = cldev;
return 0;
}
static void usb_classdev_remove(struct usb_device *dev)
{
if (dev->usb_classdev)
device_unregister(dev->usb_classdev);
}
#else
#define usb_classdev_add(dev) 0
#define usb_classdev_remove(dev) do {} while (0)
#endif
static int usbdev_notify(struct notifier_block *self,
unsigned long action, void *dev)
{
switch (action) {
case USB_DEVICE_ADD:
if (usb_classdev_add(dev))
return NOTIFY_BAD;
break;
case USB_DEVICE_REMOVE:
usb_classdev_remove(dev);
usbdev_remove(dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block usbdev_nb = {
.notifier_call = usbdev_notify,
};
static struct cdev usb_device_cdev;
int __init usb_devio_init(void)
{
int retval;
retval = register_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX,
"usb_device");
if (retval) {
printk(KERN_ERR "Unable to register minors for usb_device\n");
goto out;
}
cdev_init(&usb_device_cdev, &usbdev_file_operations);
retval = cdev_add(&usb_device_cdev, USB_DEVICE_DEV, USB_DEVICE_MAX);
if (retval) {
printk(KERN_ERR "Unable to get usb_device major %d\n",
USB_DEVICE_MAJOR);
goto error_cdev;
}
#ifdef CONFIG_USB_DEVICE_CLASS
usb_classdev_class = class_create(THIS_MODULE, "usb_device");
if (IS_ERR(usb_classdev_class)) {
printk(KERN_ERR "Unable to register usb_device class\n");
retval = PTR_ERR(usb_classdev_class);
cdev_del(&usb_device_cdev);
usb_classdev_class = NULL;
goto out;
}
/* devices of this class shadow the major:minor of their parent
* device, so clear ->dev_kobj to prevent adding duplicate entries
* to /sys/dev
*/
usb_classdev_class->dev_kobj = NULL;
#endif
usb_register_notify(&usbdev_nb);
out:
return retval;
error_cdev:
unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX);
goto out;
}
void usb_devio_cleanup(void)
{
usb_unregister_notify(&usbdev_nb);
#ifdef CONFIG_USB_DEVICE_CLASS
class_destroy(usb_classdev_class);
#endif
cdev_del(&usb_device_cdev);
unregister_chrdev_region(USB_DEVICE_DEV, USB_DEVICE_MAX);
}
| gpl-2.0 |
iocellnetworks/ndas4linux | 2.6.32/platform/linux/xixfs/xixcore/core/volume.c | 24 | 10169 | /*
-------------------------------------------------------------------------
Copyright (c) 2012 IOCELL Networks, Plainsboro, NJ, USA.
All rights reserved.
LICENSE TERMS
The free distribution and use of this software in both source and binary
form is allowed (with or without changes) provided that:
1. distributions of this source code include the above copyright
notice, this list of conditions and the following disclaimer;
2. distributions in binary form include the above copyright
notice, this list of conditions and the following disclaimer
in the documentation and/or other associated materials;
3. the copyright holder's name is not used to endorse products
built using this software without specific written permission.
ALTERNATIVELY, provided that this notice is retained in full, this product
may be distributed under the terms of the GNU General Public License (GPL v2),
in which case the provisions of the GPL apply INSTEAD OF those given above.
DISCLAIMER
This software is provided 'as is' with no explcit or implied warranties
in respect of any properties, including, but not limited to, correctness
and fitness for purpose.
-------------------------------------------------------------------------
*/
#include "xcsystem/debug.h"
#include "xcsystem/errinfo.h"
#include "xcsystem/system.h"
#include "xixcore/callback.h"
#include "xixcore/layouts.h"
#include "xixcore/buffer.h"
#include "xixcore/ondisk.h"
#include "xixcore/lotlock.h"
#include "xixcore/dir.h"
#include "xixcore/volume.h"
/* Define module name */
#undef __XIXCORE_MODULE__
#define __XIXCORE_MODULE__ "XCVOL"
#if defined(XIXCORE_DEBUG)
long XixcoreDebugLevel;
long XixcoreDebugTarget;
#endif
XIXCORE_GLOBAL xixcore_global;
int
xixcore_call
xixcore_IntializeGlobalData(
xc_uint8 uuid[],
PXIXCORE_SPINLOCK AuxLockListSpinLock
)
{
memset((void *)&xixcore_global, 0, sizeof(XIXCORE_GLOBAL));
xixcore_global.tmp_lot_lock_list_lock = AuxLockListSpinLock;
xixcore_InitializeSpinLock(xixcore_global.tmp_lot_lock_list_lock);
xixcore_InitializeListHead(&(xixcore_global.tmp_lot_lock_list));
xixcore_global.xixcore_case_table_length = XIXCORE_DEFAULT_UPCASE_NAME_LEN;
xixcore_global.xixcore_case_table = xixcore_GenerateDefaultUpcaseTable();
if(xixcore_global.xixcore_case_table == NULL) {
return XCCODE_UNSUCCESS;
}
memcpy(xixcore_global.HostId, uuid, 16);
xixcore_global.IsInitialized = 1;
return XCCODE_SUCCESS;
}
/*
* Initialize Xixcore VCB
*/
void
xixcore_call
xixcore_InitializeVolume(
PXIXCORE_VCB XixcoreVcb,
PXIXCORE_BLOCK_DEVICE XixcoreBlockDevice,
PXIXCORE_SPINLOCK XixcoreChidcacheSpinLock,
xc_uint8 IsReadOnly,
xc_uint16 SectorSize,
xc_uint16 SectorSizeBit,
xc_uint32 AddrLotSize,
xc_uint8 VolumeId[]
)
{
xc_uint32 i = 0;
memset((void *)XixcoreVcb, 0, sizeof(XIXCORE_VCB));
XixcoreVcb->NodeType.Type = XIXCORE_NODE_TYPE_VCB;
XixcoreVcb->NodeType.Size = sizeof(XIXCORE_VCB);
XixcoreVcb->IsVolumeWriteProtected = IsReadOnly;
XixcoreVcb->SectorSize = SectorSize;
XixcoreVcb->SectorSizeBit = SectorSizeBit;
XixcoreVcb->AddrLotSize = AddrLotSize;
// Set block device to Xixcore VCB
XixcoreVcb->XixcoreBlockDevice = XixcoreBlockDevice;
XixcoreVcb->childCacheCount = 0;
XixcoreVcb->childEntryCacheSpinlock = XixcoreChidcacheSpinLock;
xixcore_InitializeSpinLock(XixcoreVcb->childEntryCacheSpinlock);
xixcore_InitializeListHead(&(XixcoreVcb->LRUchildEntryCacheHeader));
for(i = 0; i< 10; i++){
xixcore_InitializeListHead(&(XixcoreVcb->HASHchildEntryCacheHeader[i]));
}
/*
* Host information
*/
memcpy(XixcoreVcb->HostId, xixcore_global.HostId, 16);
memcpy(XixcoreVcb->HostMac, xixcore_global.HostMac, 32);
memcpy(XixcoreVcb->VolumeId, VolumeId, 16);
}
void
xixcore_call
xixcore_InitializeMetaContext(
PXIXCORE_META_CTX MetaContext,
PXIXCORE_VCB XixcoreVcb,
PXIXCORE_SPINLOCK MetaLock
)
{
memset((void *)MetaContext, 0, sizeof(XIXCORE_META_CTX));
MetaContext->XixcoreVCB = XixcoreVcb;
MetaContext->MetaLock = MetaLock;
xixcore_InitializeSpinLock(MetaLock);
}
int
xixcore_call
xixcore_checkVolume(
PXIXCORE_BLOCK_DEVICE xixBlkDev,
xc_uint32 sectorsize,
xc_uint32 sectorsizeBit,
xc_uint32 LotSize,
xc_sector_t LotNumber,
PXIXCORE_BUFFER VolumeInfo,
PXIXCORE_BUFFER LotHeader,
xc_uint8 *VolumeId
)
{
int RC = 0;
PXIDISK_COMMON_LOT_HEADER pLotHeader = NULL;
PXIDISK_VOLUME_INFO pVolumeInfo = NULL;
xc_int32 reason = 0;
DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_VOLINFO,
("Enter xixcore_checkVolume .\n" ));
XIXCORE_ASSERT(VolumeInfo);
XIXCORE_ASSERT(LotHeader);
XIXCORE_ASSERT(xixcore_GetBufferSizeWithOffset(VolumeInfo) >= XIDISK_DUP_VOLUME_INFO_SIZE);
XIXCORE_ASSERT(xixcore_GetBufferSizeWithOffset(LotHeader) >= XIDISK_DUP_COMMON_LOT_HEADER_SIZE);
memset(xixcore_GetDataBuffer(VolumeInfo),0,XIDISK_DUP_VOLUME_INFO_SIZE);
memset(xixcore_GetDataBuffer(LotHeader),0,XIDISK_DUP_COMMON_LOT_HEADER_SIZE);
RC = xixcore_RawReadLotHeader(
xixBlkDev,
LotSize,
sectorsize,
sectorsizeBit,
LotNumber,
LotHeader,
&reason
);
if( RC < 0 ) {
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("FAIL xixcore_checkVolume : xixcore_RawReadLotHeader %x .\n", RC ));
goto error_out;
}
pLotHeader = (PXIDISK_COMMON_LOT_HEADER)xixcore_GetDataBufferWithOffset(LotHeader);
if(pLotHeader->LotInfo.Type != LOT_INFO_TYPE_VOLUME)
{
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("Fail(0x%x) Is Not LOT_INFO_TYPE_VOLUME .\n",
pLotHeader->LotInfo.Type));
RC = XCCODE_EINVAL;
goto error_out;
}
RC = xixcore_RawReadVolumeHeader(xixBlkDev,
LotSize,
sectorsize,
sectorsizeBit,
LotNumber,
VolumeInfo,
&reason
);
if( RC < 0 ) {
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("FAIL xixcore_checkVolume : xixcore_RawReadVolumeHeader %x .\n", RC ));
goto error_out;
}
pVolumeInfo = (PXIDISK_VOLUME_INFO)xixcore_GetDataBufferWithOffset(VolumeInfo);
if(pVolumeInfo->VolumeSignature != XIFS_VOLUME_SIGNATURE)
{
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("Fail (0x%llx) Is XIFS_VOLUME_SIGNATURE .\n",
pVolumeInfo->VolumeSignature));
RC = XCCODE_EINVAL;
goto error_out;
}
if((pVolumeInfo->XifsVesion > XIFS_CURRENT_VERSION))
{
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("Fail(0x%x) Is XIFS_CURRENT_VERSION .\n",
pVolumeInfo->XifsVesion ));
RC = XCCODE_EINVAL;
goto error_out;
}
memcpy(VolumeId, pVolumeInfo->VolumeId, 16);
RC = 0;
error_out:
DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_VOLINFO,
("End xixcore_checkVolume %x.\n", RC ));
return RC;
}
int
xixcore_call
xixcore_GetSuperBlockInformation(
PXIXCORE_VCB pVCB,
xc_uint32 LotSize,
xc_sector_t VolumeIndex
)
{
int RC = 0;
PXIDISK_VOLUME_INFO VolInfo = NULL;
PXIXCORE_BUFFER xbuf = NULL;
xc_int32 reason = 0;
DebugTrace(DEBUG_LEVEL_TRACE, DEBUG_TARGET_VOLINFO,
("Enter xixcore_GetSuperBlockInformation .\n" ));
xbuf = xixcore_AllocateBuffer(XIDISK_DUP_VOLUME_INFO_SIZE);
if(!xbuf){
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("FAIL xixcore_GetSuperBlockInformation : can't allocat xbuf .\n" ));
return XCCODE_ENOMEM;
}
memset(xixcore_GetDataBuffer(xbuf), 0, XIDISK_DUP_VOLUME_INFO_SIZE);
RC = xixcore_RawReadVolumeHeader(pVCB->XixcoreBlockDevice,
LotSize,
pVCB->SectorSize,
pVCB->SectorSizeBit,
VolumeIndex,
xbuf,
&reason
);
if( RC < 0 ) {
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("FAIL xixcore_checkVolume : xixcore_RawReadVolumeHeader %x .\n", RC ));
goto error_out;
}
VolInfo = (PXIDISK_VOLUME_INFO)xixcore_GetDataBufferWithOffset(xbuf);
DebugTrace(DEBUG_LEVEL_INFO, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ),
("VolInfo HostRegLotMap %lld:RootLotMap %lld: LotSize: %d : TotalLotNumber %lld .\n",
VolInfo->HostRegLotMapIndex, VolInfo->RootDirectoryLotIndex, VolInfo->LotSize, VolInfo->NumLots));
pVCB->MetaContext.HostRegLotMapIndex = VolInfo->HostRegLotMapIndex;
pVCB->RootDirectoryLotIndex = VolInfo->RootDirectoryLotIndex;
pVCB->NumLots = VolInfo->NumLots;
pVCB->LotSize = VolInfo->LotSize;
pVCB->VolumeLotSignature = VolInfo->LotSignature;
// Changed by ILGU HONG
memcpy(pVCB->VolumeId, VolInfo->VolumeId,16);
pVCB->VolCreateTime = VolInfo->VolCreationTime;
pVCB->VolSerialNumber = VolInfo->VolSerialNumber;
DebugTrace(DEBUG_LEVEL_INFO, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ),
("VCB HostRegLotMap %lld:RootLotMap %lld: LotSize: %d : TotalLotNumber %lld .\n",
pVCB->MetaContext.HostRegLotMapIndex, pVCB->RootDirectoryLotIndex, pVCB->LotSize, pVCB->NumLots));
DebugTrace(DEBUG_LEVEL_INFO, (DEBUG_TARGET_FSCTL|DEBUG_TARGET_VOLINFO ),
("VCB Volume Signature (0x%x).\n",
pVCB->VolumeLotSignature));
if(pVCB->VolumeName != NULL) {
xixcore_FreeMem(pVCB->VolumeName, XCTAG_VOLNAME);
}
if(VolInfo->VolLabelLength > 0 ) {
if(pVCB->VolumeName) {
if(VolInfo->VolLabelLength > SECTORALIGNSIZE_512(pVCB->VolumeNameLength) ) {
xixcore_FreeMem(pVCB->VolumeName, XCTAG_VOLNAME);
pVCB->VolumeName = xixcore_AllocateMem(SECTORALIGNSIZE_512( VolInfo->VolLabelLength),0,XCTAG_VOLNAME);
if(pVCB->VolumeName == NULL) {
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("FAIL xixcore_checkVolume : can't allocate volume name.\n"));
RC = XCCODE_ENOMEM;
goto error_out;
}
}
}else {
pVCB->VolumeName = xixcore_AllocateMem(SECTORALIGNSIZE_512(VolInfo->VolLabelLength), 0, XCTAG_VOLNAME);
if(pVCB->VolumeName == NULL) {
DebugTrace(DEBUG_LEVEL_ERROR, DEBUG_TARGET_ALL,
("FAIL xixcore_checkVolume : can't allocate volume name.\n"));
RC = XCCODE_ENOMEM;
goto error_out;
}
}
pVCB->VolumeNameLength = VolInfo->VolLabelLength;
memset(pVCB->VolumeName, 0, SECTORALIGNSIZE_512(pVCB->VolumeNameLength));
memcpy(pVCB->VolumeName, VolInfo->VolLabel, pVCB->VolumeNameLength);
}
error_out:
xixcore_FreeBuffer(xbuf);
return RC;
}
| gpl-2.0 |
EdwinMoq/android_kernel_lge_omap4-common | drivers/misc/mpu3050/mpuirq.c | 24 | 8135 | /*
$License:
Copyright (C) 2010 InvenSense Corporation, All Rights Reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
$
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/irq.h>
#include <linux/signal.h>
#include <linux/miscdevice.h>
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/workqueue.h>
#include <linux/poll.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/io.h>
//#include <linux/gpio.h> //eugene.goh
#include "mpu.h"
#include "mpuirq.h"
#include "mldl_cfg.h"
#include "mpu-i2c.h"
#define MPUIRQ_NAME "mpuirq"
/* function which gets accel data and sends it to MPU */
DECLARE_WAIT_QUEUE_HEAD(mpuirq_wait);
struct mpuirq_dev_data {
struct work_struct work;
struct i2c_client *mpu_client;
struct miscdevice *dev;
int irq;
int pid;
int accel_divider;
int data_ready;
int timeout;
};
static struct mpuirq_dev_data mpuirq_dev_data;
static struct mpuirq_data mpuirq_data;
static char *interface = MPUIRQ_NAME;
static void mpu_accel_data_work_fcn(struct work_struct *work);
static int mpuirq_open(struct inode *inode, struct file *file)
{
dev_dbg(mpuirq_dev_data.dev->this_device,
"%s current->pid %d\n", __func__, current->pid);
mpuirq_dev_data.pid = current->pid;
file->private_data = &mpuirq_dev_data;
/* we could do some checking on the flags supplied by "open" */
/* i.e. O_NONBLOCK */
/* -> set some flag to disable interruptible_sleep_on in mpuirq_read */
return 0;
}
/* close function - called when the "file" /dev/mpuirq is closed in userspace */
static int mpuirq_release(struct inode *inode, struct file *file)
{
dev_dbg(mpuirq_dev_data.dev->this_device, "mpuirq_release\n");
return 0;
}
/* read function called when from /dev/mpuirq is read */
static ssize_t mpuirq_read(struct file *file,
char *buf, size_t count, loff_t *ppos)
{
int len, err;
struct mpuirq_dev_data *p_mpuirq_dev_data = file->private_data;
if (!mpuirq_dev_data.data_ready &&
mpuirq_dev_data.timeout > 0) {
wait_event_interruptible_timeout(mpuirq_wait,
mpuirq_dev_data.
data_ready,
mpuirq_dev_data.timeout);
}
if (mpuirq_dev_data.data_ready && NULL != buf
&& count >= sizeof(mpuirq_data)) {
err = copy_to_user(buf, &mpuirq_data, sizeof(mpuirq_data));
mpuirq_data.data_type = 0;
} else {
return 0;
}
if (err != 0) {
dev_err(p_mpuirq_dev_data->dev->this_device,
"Copy to user returned %d\n", err);
return -EFAULT;
}
mpuirq_dev_data.data_ready = 0;
len = sizeof(mpuirq_data);
return len;
}
unsigned int mpuirq_poll(struct file *file, struct poll_table_struct *poll)
{
int mask = 0;
poll_wait(file, &mpuirq_wait, poll);
if (mpuirq_dev_data.data_ready)
mask |= POLLIN | POLLRDNORM;
return mask;
}
/* ioctl - I/O control */
static long mpuirq_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int retval = 0;
int data;
switch (cmd) {
case MPUIRQ_SET_TIMEOUT:
mpuirq_dev_data.timeout = arg;
break;
case MPUIRQ_GET_INTERRUPT_CNT:
data = mpuirq_data.interruptcount - 1;
if (mpuirq_data.interruptcount > 1)
mpuirq_data.interruptcount = 1;
if (copy_to_user((int *) arg, &data, sizeof(int)))
return -EFAULT;
break;
case MPUIRQ_GET_IRQ_TIME:
if (copy_to_user((int *) arg, &mpuirq_data.irqtime,
sizeof(mpuirq_data.irqtime)))
return -EFAULT;
mpuirq_data.irqtime = 0;
break;
case MPUIRQ_SET_FREQUENCY_DIVIDER:
mpuirq_dev_data.accel_divider = arg;
break;
default:
retval = -EINVAL;
}
return retval;
}
static void mpu_accel_data_work_fcn(struct work_struct *work)
{
struct mpuirq_dev_data *mpuirq_dev_data =
(struct mpuirq_dev_data *) work;
struct mldl_cfg *mldl_cfg =
(struct mldl_cfg *)
i2c_get_clientdata(mpuirq_dev_data->mpu_client);
struct i2c_adapter *accel_adapter;
unsigned char wbuff[16];
unsigned char rbuff[16];
int ii;
accel_adapter = i2c_get_adapter(mldl_cfg->pdata->accel.adapt_num);
mldl_cfg->accel->read(accel_adapter,
mldl_cfg->accel,
&mldl_cfg->pdata->accel, rbuff);
/* @todo add other data formats here as well */
if (EXT_SLAVE_BIG_ENDIAN == mldl_cfg->accel->endian) {
for (ii = 0; ii < 3; ii++) {
wbuff[2 * ii + 1] = rbuff[2 * ii + 1];
wbuff[2 * ii + 2] = rbuff[2 * ii + 0];
}
} else {
memcpy(wbuff + 1, rbuff, mldl_cfg->accel->len);
}
wbuff[7] = 0;
wbuff[8] = 1; /*set semaphore */
mpu_memory_write(mpuirq_dev_data->mpu_client->adapter,
mldl_cfg->addr, 0x0108, 8, wbuff);
}
static irqreturn_t mpuirq_handler(int irq, void *dev_id)
{
static int mycount;
struct timeval irqtime;
mycount++;
mpuirq_data.interruptcount++;
/* wake up (unblock) for reading data from userspace */
/* and ignore first interrupt generated in module init */
mpuirq_dev_data.data_ready = 1;
do_gettimeofday(&irqtime);
mpuirq_data.irqtime = (((long long) irqtime.tv_sec) << 32);
mpuirq_data.irqtime += irqtime.tv_usec;
if ((mpuirq_dev_data.accel_divider >= 0) &&
(0 == (mycount % (mpuirq_dev_data.accel_divider + 1)))) {
schedule_work((struct work_struct
*) (&mpuirq_dev_data));
}
wake_up_interruptible(&mpuirq_wait);
return IRQ_HANDLED;
}
/* define which file operations are supported */
const struct file_operations mpuirq_fops = {
.owner = THIS_MODULE,
.read = mpuirq_read,
.poll = mpuirq_poll,
#if HAVE_COMPAT_IOCTL
.compat_ioctl = mpuirq_ioctl,
#endif
#if HAVE_UNLOCKED_IOCTL
.unlocked_ioctl = mpuirq_ioctl,
#endif
.open = mpuirq_open,
.release = mpuirq_release,
};
static struct miscdevice mpuirq_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = MPUIRQ_NAME,
.fops = &mpuirq_fops,
};
int mpuirq_init(struct i2c_client *mpu_client)
{
int res;
struct mldl_cfg *mldl_cfg =
(struct mldl_cfg *) i2c_get_clientdata(mpu_client);
/* work_struct initialization */
INIT_WORK((struct work_struct *) &mpuirq_dev_data,
mpu_accel_data_work_fcn);
mpuirq_dev_data.mpu_client = mpu_client;
dev_info(&mpu_client->adapter->dev,
"Module Param interface = %s\n", interface);
mpuirq_dev_data.irq = mpu_client->irq;
mpuirq_dev_data.pid = 0;
mpuirq_dev_data.accel_divider = -1;
mpuirq_dev_data.data_ready = 0;
mpuirq_dev_data.timeout = 0;
mpuirq_dev_data.dev = &mpuirq_device;
if (mpuirq_dev_data.irq) {
unsigned long flags;
if (BIT_ACTL_LOW ==
((mldl_cfg->pdata->int_config) & BIT_ACTL))
flags = IRQF_TRIGGER_FALLING;
else
flags = IRQF_TRIGGER_RISING;
res =
request_irq(mpuirq_dev_data.irq, mpuirq_handler, flags,
interface, &mpuirq_dev_data.irq); //eugene.goh
if (res) {
dev_err(&mpu_client->adapter->dev,
"myirqtest: cannot register IRQ %d\n",
mpuirq_dev_data.irq);
} else {
res = misc_register(&mpuirq_device);
if (res < 0) {
dev_err(&mpu_client->adapter->dev,
"misc_register returned %d\n",
res);
free_irq(mpuirq_dev_data.irq,
&mpuirq_dev_data.irq);
}
}
} else {
res = 0;
}
return res;
}
void mpuirq_exit(void)
{
/* Free the IRQ first before flushing the work */
if (mpuirq_dev_data.irq > 0)
free_irq(mpuirq_dev_data.irq, &mpuirq_dev_data.irq);
flush_scheduled_work();
dev_info(mpuirq_device.this_device, "Unregistering %s\n",
MPUIRQ_NAME);
misc_deregister(&mpuirq_device);
return;
}
module_param(interface, charp, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(interface, "The Interface name");
| gpl-2.0 |
mcdope/pf-kernel | drivers/acpi/acpica/dsargs.c | 536 | 12104 | /******************************************************************************
*
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2015, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
#include "acdispat.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsargs")
/* Local prototypes */
static acpi_status
acpi_ds_execute_arguments(struct acpi_namespace_node *node,
struct acpi_namespace_node *scope_node,
u32 aml_length, u8 *aml_start);
/*******************************************************************************
*
* FUNCTION: acpi_ds_execute_arguments
*
* PARAMETERS: node - Object NS node
* scope_node - Parent NS node
* aml_length - Length of executable AML
* aml_start - Pointer to the AML
*
* RETURN: Status.
*
* DESCRIPTION: Late (deferred) execution of region or field arguments
*
******************************************************************************/
static acpi_status
acpi_ds_execute_arguments(struct acpi_namespace_node *node,
struct acpi_namespace_node *scope_node,
u32 aml_length, u8 *aml_start)
{
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE(ds_execute_arguments);
/* Allocate a new parser op to be the root of the parsed tree */
op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Save the Node for use in acpi_ps_parse_aml */
op->common.node = scope_node;
/* Create and initialize a new parser state */
walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
/* Mark this parse as a deferred opcode */
walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
walk_state->deferred_node = node;
/* Pass1: Parse the entire declaration */
status = acpi_ps_parse_aml(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Get and init the Op created above */
op->common.node = node;
acpi_ps_delete_parse_tree(op);
/* Evaluate the deferred arguments */
op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
op->common.node = scope_node;
/* Create and initialize a new parser state */
walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Execute the opcode and arguments */
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, ACPI_IMODE_EXECUTE);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
/* Mark this execution as a deferred opcode */
walk_state->deferred_node = node;
status = acpi_ps_parse_aml(walk_state);
cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_buffer_field_arguments
*
* PARAMETERS: obj_desc - A valid buffer_field object
*
* RETURN: Status.
*
* DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
* evaluation of these field attributes.
*
******************************************************************************/
acpi_status
acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
{
union acpi_operand_object *extra_desc;
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the AML pointer (method object) and buffer_field node */
extra_desc = acpi_ns_get_secondary_object(obj_desc);
node = obj_desc->buffer_field.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_BUFFER_FIELD,
node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_bank_field_arguments
*
* PARAMETERS: obj_desc - A valid bank_field object
*
* RETURN: Status.
*
* DESCRIPTION: Get bank_field bank_value. This implements the late
* evaluation of these field attributes.
*
******************************************************************************/
acpi_status
acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
{
union acpi_operand_object *extra_desc;
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the AML pointer (method object) and bank_field node */
extra_desc = acpi_ns_get_secondary_object(obj_desc);
node = obj_desc->bank_field.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_ut_add_address_range(obj_desc->region.space_id,
obj_desc->region.address,
obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_buffer_arguments
*
* PARAMETERS: obj_desc - A valid Buffer object
*
* RETURN: Status.
*
* DESCRIPTION: Get Buffer length and initializer byte list. This implements
* the late evaluation of these attributes.
*
******************************************************************************/
acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the Buffer node */
node = obj_desc->buffer.node;
if (!node) {
ACPI_ERROR((AE_INFO,
"No pointer back to namespace node in buffer object %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node,
obj_desc->buffer.aml_length,
obj_desc->buffer.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_package_arguments
*
* PARAMETERS: obj_desc - A valid Package object
*
* RETURN: Status.
*
* DESCRIPTION: Get Package length and initializer byte list. This implements
* the late evaluation of these attributes.
*
******************************************************************************/
acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the Package node */
node = obj_desc->package.node;
if (!node) {
ACPI_ERROR((AE_INFO,
"No pointer back to namespace node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node,
obj_desc->package.aml_length,
obj_desc->package.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_region_arguments
*
* PARAMETERS: obj_desc - A valid region object
*
* RETURN: Status.
*
* DESCRIPTION: Get region address and length. This implements the late
* evaluation of these region attributes.
*
******************************************************************************/
acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
union acpi_operand_object *extra_desc;
ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
extra_desc = acpi_ns_get_secondary_object(obj_desc);
if (!extra_desc) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
/* Get the Region node */
node = obj_desc->region.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_REGION, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
acpi_ut_get_node_name(node),
extra_desc->extra.aml_start));
/* Execute the argument AML */
status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_ut_add_address_range(obj_desc->region.space_id,
obj_desc->region.address,
obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
wgoossens/linux-nios2 | drivers/i2c/i2c-dev.c | 536 | 18521 | /*
i2c-dev.c - i2c-bus driver, char device interface
Copyright (C) 1995-97 Simon G. Vogl
Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl>
Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA.
*/
/* Note that this is a complete rewrite of Simon Vogl's i2c-dev module.
But I have used so much of his original code and ideas that it seems
only fair to recognize him as co-author -- Frodo */
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/i2c.h>
#include <linux/i2c-dev.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
/*
* An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a
* slave (i2c_client) with which messages will be exchanged. It's coupled
* with a character special file which is accessed by user mode drivers.
*
* The list of i2c_dev structures is parallel to the i2c_adapter lists
* maintained by the driver model, and is updated using bus notifications.
*/
struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
struct device *dev;
};
#define I2C_MINORS 256
static LIST_HEAD(i2c_dev_list);
static DEFINE_SPINLOCK(i2c_dev_list_lock);
static struct i2c_dev *i2c_dev_get_by_minor(unsigned index)
{
struct i2c_dev *i2c_dev;
spin_lock(&i2c_dev_list_lock);
list_for_each_entry(i2c_dev, &i2c_dev_list, list) {
if (i2c_dev->adap->nr == index)
goto found;
}
i2c_dev = NULL;
found:
spin_unlock(&i2c_dev_list_lock);
return i2c_dev;
}
static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
{
struct i2c_dev *i2c_dev;
if (adap->nr >= I2C_MINORS) {
printk(KERN_ERR "i2c-dev: Out of device minors (%d)\n",
adap->nr);
return ERR_PTR(-ENODEV);
}
i2c_dev = kzalloc(sizeof(*i2c_dev), GFP_KERNEL);
if (!i2c_dev)
return ERR_PTR(-ENOMEM);
i2c_dev->adap = adap;
spin_lock(&i2c_dev_list_lock);
list_add_tail(&i2c_dev->list, &i2c_dev_list);
spin_unlock(&i2c_dev_list_lock);
return i2c_dev;
}
static void return_i2c_dev(struct i2c_dev *i2c_dev)
{
spin_lock(&i2c_dev_list_lock);
list_del(&i2c_dev->list);
spin_unlock(&i2c_dev_list_lock);
kfree(i2c_dev);
}
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt));
if (!i2c_dev)
return -ENODEV;
return sprintf(buf, "%s\n", i2c_dev->adap->name);
}
static DEVICE_ATTR_RO(name);
static struct attribute *i2c_attrs[] = {
&dev_attr_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(i2c);
/* ------------------------------------------------------------------------- */
/*
* After opening an instance of this character special file, a file
* descriptor starts out associated only with an i2c_adapter (and bus).
*
* Using the I2C_RDWR ioctl(), you can then *immediately* issue i2c_msg
* traffic to any devices on the bus used by that adapter. That's because
* the i2c_msg vectors embed all the addressing information they need, and
* are submitted directly to an i2c_adapter. However, SMBus-only adapters
* don't support that interface.
*
* To use read()/write() system calls on that file descriptor, or to use
* SMBus interfaces (and work with SMBus-only hosts!), you must first issue
* an I2C_SLAVE (or I2C_SLAVE_FORCE) ioctl. That configures an anonymous
* (never registered) i2c_client so it holds the addressing information
* needed by those system calls and by this SMBus interface.
*/
static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count,
loff_t *offset)
{
char *tmp;
int ret;
struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
tmp = kmalloc(count, GFP_KERNEL);
if (tmp == NULL)
return -ENOMEM;
pr_debug("i2c-dev: i2c-%d reading %zu bytes.\n",
iminor(file_inode(file)), count);
ret = i2c_master_recv(client, tmp, count);
if (ret >= 0)
ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret;
kfree(tmp);
return ret;
}
static ssize_t i2cdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
int ret;
char *tmp;
struct i2c_client *client = file->private_data;
if (count > 8192)
count = 8192;
tmp = memdup_user(buf, count);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
pr_debug("i2c-dev: i2c-%d writing %zu bytes.\n",
iminor(file_inode(file)), count);
ret = i2c_master_send(client, tmp, count);
kfree(tmp);
return ret;
}
static int i2cdev_check(struct device *dev, void *addrp)
{
struct i2c_client *client = i2c_verify_client(dev);
if (!client || client->addr != *(unsigned int *)addrp)
return 0;
return dev->driver ? -EBUSY : 0;
}
/* walk up mux tree */
static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
int result;
result = device_for_each_child(&adapter->dev, &addr, i2cdev_check);
if (!result && parent)
result = i2cdev_check_mux_parents(parent, addr);
return result;
}
/* recurse down mux tree */
static int i2cdev_check_mux_children(struct device *dev, void *addrp)
{
int result;
if (dev->type == &i2c_adapter_type)
result = device_for_each_child(dev, addrp,
i2cdev_check_mux_children);
else
result = i2cdev_check(dev, addrp);
return result;
}
/* This address checking function differs from the one in i2c-core
in that it considers an address with a registered device, but no
driver bound to it, as NOT busy. */
static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
int result = 0;
if (parent)
result = i2cdev_check_mux_parents(parent, addr);
if (!result)
result = device_for_each_child(&adapter->dev, &addr,
i2cdev_check_mux_children);
return result;
}
static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
unsigned long arg)
{
struct i2c_rdwr_ioctl_data rdwr_arg;
struct i2c_msg *rdwr_pa;
u8 __user **data_ptrs;
int i, res;
if (copy_from_user(&rdwr_arg,
(struct i2c_rdwr_ioctl_data __user *)arg,
sizeof(rdwr_arg)))
return -EFAULT;
/* Put an arbitrary limit on the number of messages that can
* be sent at once */
if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
return -EINVAL;
rdwr_pa = memdup_user(rdwr_arg.msgs,
rdwr_arg.nmsgs * sizeof(struct i2c_msg));
if (IS_ERR(rdwr_pa))
return PTR_ERR(rdwr_pa);
data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
if (data_ptrs == NULL) {
kfree(rdwr_pa);
return -ENOMEM;
}
res = 0;
for (i = 0; i < rdwr_arg.nmsgs; i++) {
/* Limit the size of the message to a sane amount */
if (rdwr_pa[i].len > 8192) {
res = -EINVAL;
break;
}
data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
if (IS_ERR(rdwr_pa[i].buf)) {
res = PTR_ERR(rdwr_pa[i].buf);
break;
}
/*
* If the message length is received from the slave (similar
* to SMBus block read), we must ensure that the buffer will
* be large enough to cope with a message length of
* I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus
* drivers allow. The first byte in the buffer must be
* pre-filled with the number of extra bytes, which must be
* at least one to hold the message length, but can be
* greater (for example to account for a checksum byte at
* the end of the message.)
*/
if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
if (!(rdwr_pa[i].flags & I2C_M_RD) ||
rdwr_pa[i].buf[0] < 1 ||
rdwr_pa[i].len < rdwr_pa[i].buf[0] +
I2C_SMBUS_BLOCK_MAX) {
res = -EINVAL;
break;
}
rdwr_pa[i].len = rdwr_pa[i].buf[0];
}
}
if (res < 0) {
int j;
for (j = 0; j < i; ++j)
kfree(rdwr_pa[j].buf);
kfree(data_ptrs);
kfree(rdwr_pa);
return res;
}
res = i2c_transfer(client->adapter, rdwr_pa, rdwr_arg.nmsgs);
while (i-- > 0) {
if (res >= 0 && (rdwr_pa[i].flags & I2C_M_RD)) {
if (copy_to_user(data_ptrs[i], rdwr_pa[i].buf,
rdwr_pa[i].len))
res = -EFAULT;
}
kfree(rdwr_pa[i].buf);
}
kfree(data_ptrs);
kfree(rdwr_pa);
return res;
}
static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
unsigned long arg)
{
struct i2c_smbus_ioctl_data data_arg;
union i2c_smbus_data temp;
int datasize, res;
if (copy_from_user(&data_arg,
(struct i2c_smbus_ioctl_data __user *) arg,
sizeof(struct i2c_smbus_ioctl_data)))
return -EFAULT;
if ((data_arg.size != I2C_SMBUS_BYTE) &&
(data_arg.size != I2C_SMBUS_QUICK) &&
(data_arg.size != I2C_SMBUS_BYTE_DATA) &&
(data_arg.size != I2C_SMBUS_WORD_DATA) &&
(data_arg.size != I2C_SMBUS_PROC_CALL) &&
(data_arg.size != I2C_SMBUS_BLOCK_DATA) &&
(data_arg.size != I2C_SMBUS_I2C_BLOCK_BROKEN) &&
(data_arg.size != I2C_SMBUS_I2C_BLOCK_DATA) &&
(data_arg.size != I2C_SMBUS_BLOCK_PROC_CALL)) {
dev_dbg(&client->adapter->dev,
"size out of range (%x) in ioctl I2C_SMBUS.\n",
data_arg.size);
return -EINVAL;
}
/* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1,
so the check is valid if size==I2C_SMBUS_QUICK too. */
if ((data_arg.read_write != I2C_SMBUS_READ) &&
(data_arg.read_write != I2C_SMBUS_WRITE)) {
dev_dbg(&client->adapter->dev,
"read_write out of range (%x) in ioctl I2C_SMBUS.\n",
data_arg.read_write);
return -EINVAL;
}
/* Note that command values are always valid! */
if ((data_arg.size == I2C_SMBUS_QUICK) ||
((data_arg.size == I2C_SMBUS_BYTE) &&
(data_arg.read_write == I2C_SMBUS_WRITE)))
/* These are special: we do not use data */
return i2c_smbus_xfer(client->adapter, client->addr,
client->flags, data_arg.read_write,
data_arg.command, data_arg.size, NULL);
if (data_arg.data == NULL) {
dev_dbg(&client->adapter->dev,
"data is NULL pointer in ioctl I2C_SMBUS.\n");
return -EINVAL;
}
if ((data_arg.size == I2C_SMBUS_BYTE_DATA) ||
(data_arg.size == I2C_SMBUS_BYTE))
datasize = sizeof(data_arg.data->byte);
else if ((data_arg.size == I2C_SMBUS_WORD_DATA) ||
(data_arg.size == I2C_SMBUS_PROC_CALL))
datasize = sizeof(data_arg.data->word);
else /* size == smbus block, i2c block, or block proc. call */
datasize = sizeof(data_arg.data->block);
if ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
(data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
(data_arg.size == I2C_SMBUS_I2C_BLOCK_DATA) ||
(data_arg.read_write == I2C_SMBUS_WRITE)) {
if (copy_from_user(&temp, data_arg.data, datasize))
return -EFAULT;
}
if (data_arg.size == I2C_SMBUS_I2C_BLOCK_BROKEN) {
/* Convert old I2C block commands to the new
convention. This preserves binary compatibility. */
data_arg.size = I2C_SMBUS_I2C_BLOCK_DATA;
if (data_arg.read_write == I2C_SMBUS_READ)
temp.block[0] = I2C_SMBUS_BLOCK_MAX;
}
res = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
data_arg.read_write, data_arg.command, data_arg.size, &temp);
if (!res && ((data_arg.size == I2C_SMBUS_PROC_CALL) ||
(data_arg.size == I2C_SMBUS_BLOCK_PROC_CALL) ||
(data_arg.read_write == I2C_SMBUS_READ))) {
if (copy_to_user(data_arg.data, &temp, datasize))
return -EFAULT;
}
return res;
}
static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct i2c_client *client = file->private_data;
unsigned long funcs;
dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n",
cmd, arg);
switch (cmd) {
case I2C_SLAVE:
case I2C_SLAVE_FORCE:
/* NOTE: devices set up to work with "new style" drivers
* can't use I2C_SLAVE, even when the device node is not
* bound to a driver. Only I2C_SLAVE_FORCE will work.
*
* Setting the PEC flag here won't affect kernel drivers,
* which will be using the i2c_client node registered with
* the driver model core. Likewise, when that client has
* the PEC flag already set, the i2c-dev driver won't see
* (or use) this setting.
*/
if ((arg > 0x3ff) ||
(((client->flags & I2C_M_TEN) == 0) && arg > 0x7f))
return -EINVAL;
if (cmd == I2C_SLAVE && i2cdev_check_addr(client->adapter, arg))
return -EBUSY;
/* REVISIT: address could become busy later */
client->addr = arg;
return 0;
case I2C_TENBIT:
if (arg)
client->flags |= I2C_M_TEN;
else
client->flags &= ~I2C_M_TEN;
return 0;
case I2C_PEC:
if (arg)
client->flags |= I2C_CLIENT_PEC;
else
client->flags &= ~I2C_CLIENT_PEC;
return 0;
case I2C_FUNCS:
funcs = i2c_get_functionality(client->adapter);
return put_user(funcs, (unsigned long __user *)arg);
case I2C_RDWR:
return i2cdev_ioctl_rdrw(client, arg);
case I2C_SMBUS:
return i2cdev_ioctl_smbus(client, arg);
case I2C_RETRIES:
client->adapter->retries = arg;
break;
case I2C_TIMEOUT:
/* For historical reasons, user-space sets the timeout
* value in units of 10 ms.
*/
client->adapter->timeout = msecs_to_jiffies(arg * 10);
break;
default:
/* NOTE: returning a fault code here could cause trouble
* in buggy userspace code. Some old kernel bugs returned
* zero in this case, and userspace code might accidentally
* have depended on that bug.
*/
return -ENOTTY;
}
return 0;
}
static int i2cdev_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct i2c_client *client;
struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
i2c_dev = i2c_dev_get_by_minor(minor);
if (!i2c_dev)
return -ENODEV;
adap = i2c_get_adapter(i2c_dev->adap->nr);
if (!adap)
return -ENODEV;
/* This creates an anonymous i2c_client, which may later be
* pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE.
*
* This client is ** NEVER REGISTERED ** with the driver model
* or I2C core code!! It just holds private copies of addressing
* information and maybe a PEC flag.
*/
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client) {
i2c_put_adapter(adap);
return -ENOMEM;
}
snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr);
client->adapter = adap;
file->private_data = client;
return 0;
}
static int i2cdev_release(struct inode *inode, struct file *file)
{
struct i2c_client *client = file->private_data;
i2c_put_adapter(client->adapter);
kfree(client);
file->private_data = NULL;
return 0;
}
static const struct file_operations i2cdev_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = i2cdev_read,
.write = i2cdev_write,
.unlocked_ioctl = i2cdev_ioctl,
.open = i2cdev_open,
.release = i2cdev_release,
};
/* ------------------------------------------------------------------------- */
static struct class *i2c_dev_class;
static int i2cdev_attach_adapter(struct device *dev, void *dummy)
{
struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
int res;
if (dev->type != &i2c_adapter_type)
return 0;
adap = to_i2c_adapter(dev);
i2c_dev = get_free_i2c_dev(adap);
if (IS_ERR(i2c_dev))
return PTR_ERR(i2c_dev);
/* register this i2c device with the driver core */
i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
MKDEV(I2C_MAJOR, adap->nr), NULL,
"i2c-%d", adap->nr);
if (IS_ERR(i2c_dev->dev)) {
res = PTR_ERR(i2c_dev->dev);
goto error;
}
pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
adap->name, adap->nr);
return 0;
error:
return_i2c_dev(i2c_dev);
return res;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
{
struct i2c_adapter *adap;
struct i2c_dev *i2c_dev;
if (dev->type != &i2c_adapter_type)
return 0;
adap = to_i2c_adapter(dev);
i2c_dev = i2c_dev_get_by_minor(adap->nr);
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
return_i2c_dev(i2c_dev);
device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
return 0;
}
static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
return i2cdev_attach_adapter(dev, NULL);
case BUS_NOTIFY_DEL_DEVICE:
return i2cdev_detach_adapter(dev, NULL);
}
return 0;
}
static struct notifier_block i2cdev_notifier = {
.notifier_call = i2cdev_notifier_call,
};
/* ------------------------------------------------------------------------- */
/*
* module load/unload record keeping
*/
static int __init i2c_dev_init(void)
{
int res;
printk(KERN_INFO "i2c /dev entries driver\n");
res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops);
if (res)
goto out;
i2c_dev_class = class_create(THIS_MODULE, "i2c-dev");
if (IS_ERR(i2c_dev_class)) {
res = PTR_ERR(i2c_dev_class);
goto out_unreg_chrdev;
}
i2c_dev_class->dev_groups = i2c_groups;
/* Keep track of adapters which will be added or removed later */
res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
if (res)
goto out_unreg_class;
/* Bind to already existing adapters right away */
i2c_for_each_dev(NULL, i2cdev_attach_adapter);
return 0;
out_unreg_class:
class_destroy(i2c_dev_class);
out_unreg_chrdev:
unregister_chrdev(I2C_MAJOR, "i2c");
out:
printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
return res;
}
static void __exit i2c_dev_exit(void)
{
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
i2c_for_each_dev(NULL, i2cdev_detach_adapter);
class_destroy(i2c_dev_class);
unregister_chrdev(I2C_MAJOR, "i2c");
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
"Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C /dev entries driver");
MODULE_LICENSE("GPL");
module_init(i2c_dev_init);
module_exit(i2c_dev_exit);
| gpl-2.0 |
Josemurillo/Core-Wow | dep/acelite/ace/TTY_IO.cpp | 536 | 18515 | // $Id: TTY_IO.cpp 91286 2010-08-05 09:04:31Z johnnyw $
#include "ace/TTY_IO.h"
#include "ace/OS_NS_errno.h"
#include "ace/OS_NS_string.h"
#include "ace/OS_NS_strings.h"
#if defined (ACE_HAS_TERMIOS)
# include "ace/os_include/os_termios.h"
#elif defined (ACE_HAS_TERMIO)
# include <termio.h>
#endif
namespace
{
const char ACE_TTY_IO_NONE[] = "none";
#if defined (ACE_HAS_TERMIOS) || defined (ACE_HAS_TERMIO) || defined (ACE_WIN32)
const char ACE_TTY_IO_ODD[] = "odd";
const char ACE_TTY_IO_EVEN[] = "even";
#endif
#if defined (ACE_WIN32)
const char ACE_TTY_IO_MARK[] = "mark";
const char ACE_TTY_IO_SPACE[] = "space";
#endif /* ACE_WIN32 */
}
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
ACE_TTY_IO::Serial_Params::Serial_Params (void)
{
baudrate = 9600;
xonlim = 0;
xofflim = 0;
readmincharacters = 0;
readtimeoutmsec = 10000;
paritymode = ACE_TTY_IO_NONE;
ctsenb = false;
rtsenb = 0;
xinenb = false;
xoutenb = false;
modem = false;
rcvenb = true;
dsrenb = false;
dtrdisable = false;
databits = 8;
stopbits = 1;
}
// Interface for reading/writing serial device parameters
int ACE_TTY_IO::control (Control_Mode cmd, Serial_Params *arg) const
{
#if defined (ACE_HAS_TERMIOS) || defined (ACE_HAS_TERMIO)
#if defined (ACE_HAS_TERMIOS)
struct termios devpar;
speed_t newbaudrate = 0;
if (tcgetattr (get_handle () , &devpar) == -1)
#elif defined (TCGETS)
struct termios devpar;
unsigned int newbaudrate = 0;
if (this->ACE_IO_SAP::control (TCGETS, static_cast<void*>(&devpar)) == -1)
#elif defined (TCGETA)
struct termio devpar;
unsigned int newbaudrate = 0;
if (this->ACE_IO_SAP::control (TCGETA, static_cast<void*>(&devpar)) == -1)
#else
errno = ENOSYS;
#endif /* ACE_HAS_TERMIOS */
return -1;
switch (cmd)
{
case SETPARAMS:
switch (arg->baudrate)
{
#if defined (B0)
case 0: newbaudrate = B0; break;
#endif /* B0 */
#if defined (B50)
case 50: newbaudrate = B50; break;
#endif /* B50 */
#if defined (B75)
case 75: newbaudrate = B75; break;
#endif /* B75 */
#if defined (B110)
case 110: newbaudrate = B110; break;
#endif /* B110 */
#if defined (B134)
case 134: newbaudrate = B134; break;
#endif /* B134 */
#if defined (B150)
case 150: newbaudrate = B150; break;
#endif /* B150 */
#if defined (B200)
case 200: newbaudrate = B200; break;
#endif /* B200 */
#if defined (B300)
case 300: newbaudrate = B300; break;
#endif /* B300 */
#if defined (B600)
case 600: newbaudrate = B600; break;
#endif /* B600 */
#if defined (B1200)
case 1200: newbaudrate = B1200; break;
#endif /* B1200 */
#if defined (B1800)
case 1800: newbaudrate = B1800; break;
#endif /* B1800 */
#if defined (B2400)
case 2400: newbaudrate = B2400; break;
#endif /* B2400 */
#if defined (B4800)
case 4800: newbaudrate = B4800; break;
#endif /* B4800 */
#if defined (B9600)
case 9600: newbaudrate = B9600; break;
#endif /* B9600 */
#if defined (B19200)
case 19200: newbaudrate = B19200; break;
#endif /* B19200 */
#if defined (B38400)
case 38400: newbaudrate = B38400; break;
#endif /* B38400 */
#if defined (B56000)
case 56000: newbaudrate = B56000; break;
#endif /* B56000 */
#if defined (B57600)
case 57600: newbaudrate = B57600; break;
#endif /* B57600 */
#if defined (B76800)
case 76800: newbaudrate = B76800; break;
#endif /* B76800 */
#if defined (B115200)
case 115200: newbaudrate = B115200; break;
#endif /* B115200 */
#if defined (B128000)
case 128000: newbaudrate = B128000; break;
#endif /* B128000 */
#if defined (B153600)
case 153600: newbaudrate = B153600; break;
#endif /* B153600 */
#if defined (B230400)
case 230400: newbaudrate = B230400; break;
#endif /* B230400 */
#if defined (B307200)
case 307200: newbaudrate = B307200; break;
#endif /* B307200 */
#if defined (B256000)
case 256000: newbaudrate = B256000; break;
#endif /* B256000 */
#if defined (B460800)
case 460800: newbaudrate = B460800; break;
#endif /* B460800 */
#if defined (B500000)
case 500000: newbaudrate = B500000; break;
#endif /* B500000 */
#if defined (B576000)
case 576000: newbaudrate = B576000; break;
#endif /* B576000 */
#if defined (B921600)
case 921600: newbaudrate = B921600; break;
#endif /* B921600 */
#if defined (B1000000)
case 1000000: newbaudrate = B1000000; break;
#endif /* B1000000 */
#if defined (B1152000)
case 1152000: newbaudrate = B1152000; break;
#endif /* B1152000 */
#if defined (B1500000)
case 1500000: newbaudrate = B1500000; break;
#endif /* B1500000 */
#if defined (B2000000)
case 2000000: newbaudrate = B2000000; break;
#endif /* B2000000 */
#if defined (B2500000)
case 2500000: newbaudrate = B2500000; break;
#endif /* B2500000 */
#if defined (B3000000)
case 3000000: newbaudrate = B3000000; break;
#endif /* B3000000 */
#if defined (B3500000)
case 3500000: newbaudrate = B3500000; break;
#endif /* B3500000 */
#if defined (B4000000)
case 4000000: newbaudrate = B4000000; break;
#endif /* B4000000 */
default:
return -1;
}
#if defined (ACE_HAS_TERMIOS)
// Can you really have different input and output baud rates?!
if (cfsetospeed (&devpar, newbaudrate) == -1)
return -1;
if (cfsetispeed (&devpar, newbaudrate) == -1)
return -1;
#else
devpar.c_cflag &= ~CBAUD;
# if defined (CBAUDEX)
devpar.c_cflag &= ~CBAUDEX;
# endif /* CBAUDEX */
devpar.c_cflag |= newbaudrate;
#endif /* ACE_HAS_TERMIOS */
devpar.c_cflag &= ~CSIZE;
switch (arg->databits)
{
case 5:
devpar.c_cflag |= CS5;
break;
case 6:
devpar.c_cflag |= CS6;
break;
case 7:
devpar.c_cflag |= CS7;
break;
case 8:
devpar.c_cflag |= CS8;
break;
default:
return -1;
}
switch (arg->stopbits)
{
case 1:
devpar.c_cflag &= ~CSTOPB;
break;
case 2:
devpar.c_cflag |= CSTOPB;
break;
default:
return -1;
}
if (arg->paritymode)
{
if (ACE_OS::strcasecmp (arg->paritymode, ACE_TTY_IO_ODD) == 0)
{
devpar.c_cflag |= PARENB;
devpar.c_cflag |= PARODD;
}
else if (ACE_OS::strcasecmp (arg->paritymode, ACE_TTY_IO_EVEN) == 0)
{
devpar.c_cflag |= PARENB;
devpar.c_cflag &= ~PARODD;
}
else if (ACE_OS::strcasecmp (arg->paritymode, ACE_TTY_IO_NONE) == 0)
devpar.c_cflag &= ~PARENB;
else
return -1;
}
else
{
devpar.c_cflag &= ~PARENB;
}
#if defined (CNEW_RTSCTS)
if ((arg->ctsenb) || (arg->rtsenb)) // Enable RTS/CTS protocol
devpar.c_cflag |= CNEW_RTSCTS;
else
devpar.c_cflag &= ~CNEW_RTSCTS;
#elif defined (CRTSCTS)
if ((arg->ctsenb) || (arg->rtsenb)) // Enable RTS/CTS protocol
devpar.c_cflag |= CRTSCTS;
else
devpar.c_cflag &= ~CRTSCTS;
#endif /* NEW_RTSCTS || CRTSCTS */
#if defined (CREAD)
// Enable/disable receiver
if (arg->rcvenb)
devpar.c_cflag |= CREAD;
else
devpar.c_cflag &= ~CREAD;
#endif /* CREAD */
#if defined (HUPCL)
// Cause DTR to drop after port close.
devpar.c_cflag |= HUPCL;
#endif /* HUPCL */
#if defined (CLOCAL)
// If device is not a modem set to local device.
if (arg->modem)
devpar.c_cflag &= ~CLOCAL;
else
devpar.c_cflag |= CLOCAL;
#endif /* CLOCAL */
devpar.c_iflag = IGNPAR | INPCK;
if (arg->databits < 8)
devpar.c_iflag |= ISTRIP;
#if defined (IGNBRK)
// If device is not a modem set to ignore break points
if(arg->modem)
devpar.c_iflag &= ~IGNBRK;
else
devpar.c_iflag |= IGNBRK;
#endif /* IGNBRK */
#if defined (IXOFF)
// Enable/disable software flow control on input
if (arg->xinenb)
devpar.c_iflag |= IXOFF;
else
devpar.c_iflag &= ~IXOFF;
#endif /* IXOFF */
#if defined (IXON)
// Enable/disable software flow control on output
if (arg->xoutenb)
devpar.c_iflag |= IXON;
else
devpar.c_iflag &= ~IXON;
#endif /* IXON */
#if defined (ICANON)
// Enable noncanonical input processing mode
devpar.c_lflag &= ~ICANON;
#endif /* ICANON */
#if defined (ECHO)
// Disable echoing of input characters
devpar.c_lflag &= ~ECHO;
#endif /* ECHO */
#if defined (ECHOE)
// Disable echoing erase chareacter as BS-SP-BS
devpar.c_lflag &= ~ECHOE;
#endif /* ECHOE */
#if defined (ISIG)
// Disable SIGINTR, SIGSUSP, SIGDSUSP and SIGQUIT signals
devpar.c_lflag &= ~ISIG;
#endif /* ISIG */
#if defined (OPOST)
// Disable post-processing of output data
devpar.c_oflag &= ~OPOST;
#endif /* OPOST */
if (arg->readtimeoutmsec < 0)
{
// Settings for infinite timeout.
devpar.c_cc[VTIME] = 0;
// In case of infinite timeout [VMIN] must be at least 1.
if (arg->readmincharacters > UCHAR_MAX)
devpar.c_cc[VMIN] = UCHAR_MAX;
else if (arg->readmincharacters < 1)
devpar.c_cc[VMIN] = 1;
else
devpar.c_cc[VMIN] = static_cast<unsigned char>(arg->readmincharacters);
}
else
{
devpar.c_cc[VTIME] = static_cast<unsigned char>(arg->readtimeoutmsec / 100);
if (arg->readmincharacters > UCHAR_MAX)
devpar.c_cc[VMIN] = UCHAR_MAX;
else if (arg->readmincharacters < 1)
devpar.c_cc[VMIN] = 0;
else
devpar.c_cc[VMIN] = static_cast<unsigned char>(arg->readmincharacters);
}
#if defined (TIOCMGET)
int status;
this->ACE_IO_SAP::control (TIOCMGET, &status);
if (arg->dtrdisable)
status &= ~TIOCM_DTR;
else
status |= TIOCM_DTR;
this->ACE_IO_SAP::control (TIOCMSET, &status);
#endif /* definded (TIOCMGET) */
#if defined (ACE_HAS_TERMIOS)
return tcsetattr (get_handle (), TCSANOW, &devpar);
#elif defined (TCSETS)
return this->ACE_IO_SAP::control (TCSETS, static_cast<void*>(&devpar));
#elif defined (TCSETA)
return this->ACE_IO_SAP::control (TCSETA, static_cast<void*>(&devpar));
#else
errno = ENOSYS;
return -1;
#endif /* ACE_HAS_TERMIOS */
case GETPARAMS:
return -1; // Not yet implemented.
default:
return -1; // Wrong cmd.
}
#elif defined (ACE_WIN32)
DCB dcb;
dcb.DCBlength = sizeof dcb;
if (!::GetCommState (this->get_handle (), &dcb))
{
ACE_OS::set_errno_to_last_error ();
return -1;
}
COMMTIMEOUTS timeouts;
if (!::GetCommTimeouts (this->get_handle(), &timeouts))
{
ACE_OS::set_errno_to_last_error ();
return -1;
}
switch (cmd)
{
case SETPARAMS:
dcb.BaudRate = arg->baudrate;
switch (arg->databits)
{
case 4:
case 5:
case 6:
case 7:
case 8:
dcb.ByteSize = arg->databits;
break;
default:
return -1;
}
switch (arg->stopbits)
{
case 1:
dcb.StopBits = ONESTOPBIT;
break;
case 2:
dcb.StopBits = TWOSTOPBITS;
break;
default:
return -1;
}
if (arg->paritymode)
{
dcb.fParity = TRUE;
if (ACE_OS::strcasecmp (arg->paritymode, ACE_TTY_IO_ODD) == 0)
dcb.Parity = ODDPARITY;
else if (ACE_OS::strcasecmp (arg->paritymode, ACE_TTY_IO_EVEN) == 0)
dcb.Parity = EVENPARITY;
else if (ACE_OS::strcasecmp (arg->paritymode, ACE_TTY_IO_NONE) == 0)
dcb.Parity = NOPARITY;
else if (ACE_OS::strcasecmp (arg->paritymode, ACE_TTY_IO_MARK) == 0)
dcb.Parity = MARKPARITY;
else if (ACE_OS::strcasecmp (arg->paritymode, ACE_TTY_IO_SPACE) == 0)
dcb.Parity = SPACEPARITY;
else
return -1;
}
else
{
dcb.fParity = FALSE;
dcb.Parity = NOPARITY;
}
// Enable/disable RTS protocol.
switch (arg->rtsenb)
{
case 1:
dcb.fRtsControl = RTS_CONTROL_ENABLE;
break;
case 2:
dcb.fRtsControl = RTS_CONTROL_HANDSHAKE;
break;
case 3:
dcb.fRtsControl = RTS_CONTROL_TOGGLE;
break;
default:
dcb.fRtsControl = RTS_CONTROL_DISABLE;
}
// Enable/disable CTS protocol.
if (arg->ctsenb)
dcb.fOutxCtsFlow = TRUE;
else
dcb.fOutxCtsFlow = FALSE;
// Enable/disable DSR protocol.
if (arg->dsrenb)
dcb.fOutxDsrFlow = TRUE;
else
dcb.fOutxDsrFlow = FALSE;
// Disable/enable DTR protocol
if (arg->dtrdisable)
dcb.fDtrControl = DTR_CONTROL_DISABLE;
else
dcb.fDtrControl = DTR_CONTROL_ENABLE;
// Enable/disable software flow control on input
if (arg->xinenb)
dcb.fInX = TRUE;
else
dcb.fInX = FALSE;
// Enable/disable software flow control on output
if (arg->xoutenb)
dcb.fOutX = TRUE;
else
dcb.fOutX = FALSE;
// Always set limits unless set to negative to use default.
if (arg->xonlim >= 0)
dcb.XonLim = static_cast<WORD>(arg->xonlim);
if (arg->xofflim >= 0)
dcb.XoffLim = static_cast<WORD>(arg->xofflim);
dcb.fAbortOnError = FALSE;
dcb.fErrorChar = FALSE;
dcb.fNull = FALSE;
dcb.fBinary = TRUE;
if (!::SetCommState (this->get_handle (), &dcb))
{
ACE_OS::set_errno_to_last_error ();
return -1;
}
if (arg->readtimeoutmsec < 0)
{
// Settings for infinite timeout.
timeouts.ReadIntervalTimeout = 0;
timeouts.ReadTotalTimeoutMultiplier = 0;
timeouts.ReadTotalTimeoutConstant = 0;
}
else if (arg->readtimeoutmsec == 0)
{
// Return immediately if no data in the input buffer.
timeouts.ReadIntervalTimeout = MAXDWORD;
timeouts.ReadTotalTimeoutMultiplier = 0;
timeouts.ReadTotalTimeoutConstant = 0;
}
else
{
// Wait for specified timeout for char to arrive before returning.
timeouts.ReadIntervalTimeout = MAXDWORD;
timeouts.ReadTotalTimeoutMultiplier = MAXDWORD;
timeouts.ReadTotalTimeoutConstant = arg->readtimeoutmsec;
}
if (!::SetCommTimeouts (this->get_handle (), &timeouts))
{
ACE_OS::set_errno_to_last_error ();
return -1;
}
return 0;
case GETPARAMS:
arg->baudrate = dcb.BaudRate;
switch (dcb.ByteSize)
{
case 4:
case 5:
case 6:
case 7:
case 8:
arg->databits = dcb.ByteSize;
break;
default:
return -1;
}
switch (dcb.StopBits)
{
case ONESTOPBIT:
arg->stopbits = 1;
break;
case TWOSTOPBITS:
arg->stopbits = 2;
break;
default:
return -1;
}
if (!dcb.fParity)
{
arg->paritymode = ACE_TTY_IO_NONE;
}
else
{
switch (dcb.Parity)
{
case ODDPARITY:
arg->paritymode = ACE_TTY_IO_ODD;
break;
case EVENPARITY:
arg->paritymode = ACE_TTY_IO_EVEN;
break;
case NOPARITY:
arg->paritymode = ACE_TTY_IO_NONE;
break;
case MARKPARITY:
arg->paritymode = ACE_TTY_IO_MARK;
break;
case SPACEPARITY:
arg->paritymode = ACE_TTY_IO_SPACE;
break;
default:
return -1;
}
}
// Enable/disable RTS protocol.
switch (dcb.fRtsControl)
{
case RTS_CONTROL_ENABLE:
arg->rtsenb = 1;
break;
case RTS_CONTROL_HANDSHAKE:
arg->rtsenb = 2;
break;
case RTS_CONTROL_TOGGLE:
arg->rtsenb = 3;
break;
case RTS_CONTROL_DISABLE:
arg->rtsenb = 0;
break;
default:
return -1;
}
// Enable/disable CTS protocol.
if (dcb.fOutxCtsFlow)
arg->ctsenb = true;
else
arg->ctsenb = false;
// Enable/disable DSR protocol.
if (dcb.fOutxDsrFlow)
arg->dsrenb = true;
else
arg->dsrenb = false;
// Disable/enable DTR protocol
// Attention: DTR_CONTROL_HANDSHAKE is not supported.
switch (dcb.fDtrControl)
{
case DTR_CONTROL_DISABLE:
arg->dtrdisable = true;
break;
case DTR_CONTROL_ENABLE:
arg->dtrdisable = false;
break;
default:
return -1;
}
// Enable/disable software flow control on input
if (dcb.fInX)
arg->xinenb = true;
else
arg->xinenb = false;
// Enable/disable software flow control on output
if (dcb.fOutX)
arg->xoutenb = true;
else
arg->xoutenb = false;
arg->xonlim = static_cast<int>(dcb.XonLim);
arg->xofflim = static_cast<int>(dcb.XoffLim);
if (timeouts.ReadIntervalTimeout == 0 &&
timeouts.ReadTotalTimeoutMultiplier == 0 &&
timeouts.ReadTotalTimeoutConstant == 0)
arg->readtimeoutmsec = -1;
else
arg->readtimeoutmsec = timeouts.ReadTotalTimeoutConstant;
return 0;
default:
return -1; // Wrong cmd.
} // arg switch
#else
ACE_UNUSED_ARG (cmd);
ACE_UNUSED_ARG (arg);
ACE_NOTSUP_RETURN (-1);
#endif /* ACE_HAS_TERMIOS || ACE_HAS_TERMIO */
}
#if defined (ACE_NEEDS_DEV_IO_CONVERSION)
ACE_TTY_IO::operator ACE_DEV_IO &()
{
return static_cast<ACE_DEV_IO &>(*this);
}
#endif /* ACE_NEEDS_DEV_IO_CONVERSION */
ACE_END_VERSIONED_NAMESPACE_DECL
| gpl-2.0 |
chrisch1974/htc7x30-2.6-flyer | arch/ia64/sn/kernel/irq.c | 1048 | 13753 | /*
* Platform dependent support for SGI SN
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/intr.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_feature_sets.h>
static void force_interrupt(int irq);
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
struct list_head **sn_irq_lh;
static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid,
(u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
(u64) req_nasid, (u64) req_slice);
return ret_stuff.status;
}
void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_FREE, (u64) local_nasid,
(u64) local_widget, (u64) sn_irq_info->irq_irq,
(u64) sn_irq_info->irq_cookie, 0, 0);
}
u64 sn_intr_redirect(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info,
nasid_t req_nasid, int req_slice)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_REDIRECT, (u64) local_nasid,
(u64) local_widget, __pa(sn_irq_info),
(u64) req_nasid, (u64) req_slice, 0);
return ret_stuff.status;
}
static unsigned int sn_startup_irq(unsigned int irq)
{
return 0;
}
static void sn_shutdown_irq(unsigned int irq)
{
}
extern void ia64_mca_register_cpev(int);
static void sn_disable_irq(unsigned int irq)
{
if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
ia64_mca_register_cpev(0);
}
static void sn_enable_irq(unsigned int irq)
{
if (irq == local_vector_to_irq(IA64_CPE_VECTOR))
ia64_mca_register_cpev(irq);
}
static void sn_ack_irq(unsigned int irq)
{
u64 event_occurred, mask;
irq = irq & 0xff;
event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED));
mask = event_occurred & SH_ALL_INT_MASK;
HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
move_native_irq(irq);
}
static void sn_end_irq(unsigned int irq)
{
int ivec;
u64 event_occurred;
ivec = irq & 0xff;
if (ivec == SGI_UART_VECTOR) {
event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED));
/* If the UART bit is set here, we may have received an
* interrupt from the UART that the driver missed. To
* make sure, we IPI ourselves to force us to look again.
*/
if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
IA64_IPI_DM_INT, 0);
}
}
__clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
if (sn_force_interrupt_flag)
force_interrupt(irq);
}
static void sn_irq_info_free(struct rcu_head *head);
struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
nasid_t nasid, int slice)
{
int vector;
int cpuid;
#ifdef CONFIG_SMP
int cpuphys;
#endif
int64_t bridge;
int local_widget, status;
nasid_t local_nasid;
struct sn_irq_info *new_irq_info;
struct sn_pcibus_provider *pci_provider;
bridge = (u64) sn_irq_info->irq_bridge;
if (!bridge) {
return NULL; /* irq is not a device interrupt */
}
local_nasid = NASID_GET(bridge);
if (local_nasid & 1)
local_widget = TIO_SWIN_WIDGETNUM(bridge);
else
local_widget = SWIN_WIDGETNUM(bridge);
vector = sn_irq_info->irq_irq;
/* Make use of SAL_INTR_REDIRECT if PROM supports it */
status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice);
if (!status) {
new_irq_info = sn_irq_info;
goto finish_up;
}
/*
* PROM does not support SAL_INTR_REDIRECT, or it failed.
* Revert to old method.
*/
new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
if (new_irq_info == NULL)
return NULL;
memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
/* Free the old PROM new_irq_info structure */
sn_intr_free(local_nasid, local_widget, new_irq_info);
unregister_intr_pda(new_irq_info);
/* allocate a new PROM new_irq_info struct */
status = sn_intr_alloc(local_nasid, local_widget,
new_irq_info, vector,
nasid, slice);
/* SAL call failed */
if (status) {
kfree(new_irq_info);
return NULL;
}
register_intr_pda(new_irq_info);
spin_lock(&sn_irq_info_lock);
list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
spin_unlock(&sn_irq_info_lock);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
finish_up:
/* Update kernels new_irq_info with new target info */
cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid,
new_irq_info->irq_slice);
new_irq_info->irq_cpuid = cpuid;
pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
/*
* If this represents a line interrupt, target it. If it's
* an msi (irq_int_bit < 0), it's already targeted.
*/
if (new_irq_info->irq_int_bit >= 0 &&
pci_provider && pci_provider->target_interrupt)
(pci_provider->target_interrupt)(new_irq_info);
#ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpuid);
set_irq_affinity_info((vector & 0xff), cpuphys, 0);
#endif
return new_irq_info;
}
static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
{
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
nasid_t nasid;
int slice;
nasid = cpuid_to_nasid(cpumask_first(mask));
slice = cpuid_to_slice(cpumask_first(mask));
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
sn_irq_lh[irq], list)
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
return 0;
}
#ifdef CONFIG_SMP
void sn_set_err_irq_affinity(unsigned int irq)
{
/*
* On systems which support CPU disabling (SHub2), all error interrupts
* are targetted at the boot CPU.
*/
if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT))
set_irq_affinity_info(irq, cpu_physical_id(0), 0);
}
#else
void sn_set_err_irq_affinity(unsigned int irq) { }
#endif
static void
sn_mask_irq(unsigned int irq)
{
}
static void
sn_unmask_irq(unsigned int irq)
{
}
struct irq_chip irq_type_sn = {
.name = "SN hub",
.startup = sn_startup_irq,
.shutdown = sn_shutdown_irq,
.enable = sn_enable_irq,
.disable = sn_disable_irq,
.ack = sn_ack_irq,
.end = sn_end_irq,
.mask = sn_mask_irq,
.unmask = sn_unmask_irq,
.set_affinity = sn_set_affinity_irq
};
ia64_vector sn_irq_to_vector(int irq)
{
if (irq >= IA64_NUM_VECTORS)
return 0;
return (ia64_vector)irq;
}
unsigned int sn_local_vector_to_irq(u8 vector)
{
return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
}
void sn_irq_init(void)
{
int i;
struct irq_desc *base_desc = irq_desc;
ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
for (i = 0; i < NR_IRQS; i++) {
if (base_desc[i].chip == &no_irq_chip) {
base_desc[i].chip = &irq_type_sn;
}
}
}
static void register_intr_pda(struct sn_irq_info *sn_irq_info)
{
int irq = sn_irq_info->irq_irq;
int cpu = sn_irq_info->irq_cpuid;
if (pdacpu(cpu)->sn_last_irq < irq) {
pdacpu(cpu)->sn_last_irq = irq;
}
if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq)
pdacpu(cpu)->sn_first_irq = irq;
}
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
{
int irq = sn_irq_info->irq_irq;
int cpu = sn_irq_info->irq_cpuid;
struct sn_irq_info *tmp_irq_info;
int i, foundmatch;
rcu_read_lock();
if (pdacpu(cpu)->sn_last_irq == irq) {
foundmatch = 0;
for (i = pdacpu(cpu)->sn_last_irq - 1;
i && !foundmatch; i--) {
list_for_each_entry_rcu(tmp_irq_info,
sn_irq_lh[i],
list) {
if (tmp_irq_info->irq_cpuid == cpu) {
foundmatch = 1;
break;
}
}
}
pdacpu(cpu)->sn_last_irq = i;
}
if (pdacpu(cpu)->sn_first_irq == irq) {
foundmatch = 0;
for (i = pdacpu(cpu)->sn_first_irq + 1;
i < NR_IRQS && !foundmatch; i++) {
list_for_each_entry_rcu(tmp_irq_info,
sn_irq_lh[i],
list) {
if (tmp_irq_info->irq_cpuid == cpu) {
foundmatch = 1;
break;
}
}
}
pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
}
rcu_read_unlock();
}
static void sn_irq_info_free(struct rcu_head *head)
{
struct sn_irq_info *sn_irq_info;
sn_irq_info = container_of(head, struct sn_irq_info, rcu);
kfree(sn_irq_info);
}
void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
{
nasid_t nasid = sn_irq_info->irq_nasid;
int slice = sn_irq_info->irq_slice;
int cpu = nasid_slice_to_cpuid(nasid, slice);
#ifdef CONFIG_SMP
int cpuphys;
struct irq_desc *desc;
#endif
pci_dev_get(pci_dev);
sn_irq_info->irq_cpuid = cpu;
sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
/* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock);
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
reserve_irq_vector(sn_irq_info->irq_irq);
spin_unlock(&sn_irq_info_lock);
register_intr_pda(sn_irq_info);
#ifdef CONFIG_SMP
cpuphys = cpu_physical_id(cpu);
set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0);
desc = irq_to_desc(sn_irq_info->irq_irq);
/*
* Affinity was set by the PROM, prevent it from
* being reset by the request_irq() path.
*/
desc->status |= IRQ_AFFINITY_SET;
#endif
}
void sn_irq_unfixup(struct pci_dev *pci_dev)
{
struct sn_irq_info *sn_irq_info;
/* Only cleanup IRQ stuff if this device has a host bus context */
if (!SN_PCIDEV_BUSSOFT(pci_dev))
return;
sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info;
if (!sn_irq_info)
return;
if (!sn_irq_info->irq_irq) {
kfree(sn_irq_info);
return;
}
unregister_intr_pda(sn_irq_info);
spin_lock(&sn_irq_info_lock);
list_del_rcu(&sn_irq_info->list);
spin_unlock(&sn_irq_info_lock);
if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
free_irq_vector(sn_irq_info->irq_irq);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
pci_dev_put(pci_dev);
}
static inline void
sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info)
{
struct sn_pcibus_provider *pci_provider;
pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type];
/* Don't force an interrupt if the irq has been disabled */
if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) &&
pci_provider && pci_provider->force_interrupt)
(*pci_provider->force_interrupt)(sn_irq_info);
}
static void force_interrupt(int irq)
{
struct sn_irq_info *sn_irq_info;
if (!sn_ioif_inited)
return;
rcu_read_lock();
list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list)
sn_call_force_intr_provider(sn_irq_info);
rcu_read_unlock();
}
/*
* Check for lost interrupts. If the PIC int_status reg. says that
* an interrupt has been sent, but not handled, and the interrupt
* is not pending in either the cpu irr regs or in the soft irr regs,
* and the interrupt is not in service, then the interrupt may have
* been lost. Force an interrupt on that pin. It is possible that
* the interrupt is in flight, so we may generate a spurious interrupt,
* but we should never miss a real lost interrupt.
*/
static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
{
u64 regval;
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
/*
* Bridge types attached to TIO (anything but PIC) do not need this WAR
* since they do not target Shub II interrupt registers. If that
* ever changes, this check needs to accomodate.
*/
if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC)
return;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (!pcidev_info)
return;
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
regval = pcireg_intr_status_get(pcibus_info);
if (!ia64_get_irr(irq_to_vector(irq))) {
if (!test_bit(irq, pda->sn_in_service_ivecs)) {
regval &= 0xff;
if (sn_irq_info->irq_int_bit & regval &
sn_irq_info->irq_last_intr) {
regval &= ~(sn_irq_info->irq_int_bit & regval);
sn_call_force_intr_provider(sn_irq_info);
}
}
}
sn_irq_info->irq_last_intr = regval;
}
void sn_lb_int_war_check(void)
{
struct sn_irq_info *sn_irq_info;
int i;
if (!sn_ioif_inited || pda->sn_first_irq == 0)
return;
rcu_read_lock();
for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) {
sn_check_intr(i, sn_irq_info);
}
}
rcu_read_unlock();
}
void __init sn_irq_lh_init(void)
{
int i;
sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL);
if (!sn_irq_lh)
panic("SN PCI INIT: Failed to allocate memory for PCI init\n");
for (i = 0; i < NR_IRQS; i++) {
sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL);
if (!sn_irq_lh[i])
panic("SN PCI INIT: Failed IRQ memory allocation\n");
INIT_LIST_HEAD(sn_irq_lh[i]);
}
}
| gpl-2.0 |
lujiefeng/gzsd210_Android4.0.4_kernel | sound/soc/s6000/s6000-pcm.c | 1816 | 14270 | /*
* ALSA PCM interface for the Stetch s6000 family
*
* Author: Daniel Gloeckner, <dg@emlix.com>
* Copyright: (C) 2009 emlix GmbH <info@emlix.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <asm/dma.h>
#include <variant/dmac.h>
#include "s6000-pcm.h"
#define S6_PCM_PREALLOCATE_SIZE (96 * 1024)
#define S6_PCM_PREALLOCATE_MAX (2048 * 1024)
static struct snd_pcm_hardware s6000_pcm_hardware = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX),
.formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE),
.rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_5512 | \
SNDRV_PCM_RATE_8000_192000),
.rate_min = 0,
.rate_max = 1562500,
.channels_min = 2,
.channels_max = 8,
.buffer_bytes_max = 0x7ffffff0,
.period_bytes_min = 16,
.period_bytes_max = 0xfffff0,
.periods_min = 2,
.periods_max = 1024, /* no limit */
.fifo_size = 0,
};
struct s6000_runtime_data {
spinlock_t lock;
int period; /* current DMA period */
};
static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct s6000_runtime_data *prtd = runtime->private_data;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
int channel;
unsigned int period_size;
unsigned int dma_offset;
dma_addr_t dma_pos;
dma_addr_t src, dst;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
period_size = snd_pcm_lib_period_bytes(substream);
dma_offset = prtd->period * period_size;
dma_pos = runtime->dma_addr + dma_offset;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
src = dma_pos;
dst = par->sif_out;
channel = par->dma_out;
} else {
src = par->sif_in;
dst = dma_pos;
channel = par->dma_in;
}
if (!s6dmac_channel_enabled(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel)))
return;
if (s6dmac_fifo_full(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))) {
printk(KERN_ERR "s6000-pcm: fifo full\n");
return;
}
BUG_ON(period_size & 15);
s6dmac_put_fifo(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel),
src, dst, period_size);
prtd->period++;
if (unlikely(prtd->period >= runtime->periods))
prtd->period = 0;
}
static irqreturn_t s6000_pcm_irq(int irq, void *data)
{
struct snd_pcm *pcm = data;
struct snd_soc_pcm_runtime *runtime = pcm->private_data;
struct s6000_runtime_data *prtd;
unsigned int has_xrun;
int i, ret = IRQ_NONE;
for (i = 0; i < 2; ++i) {
struct snd_pcm_substream *substream = pcm->streams[i].substream;
struct s6000_pcm_dma_params *params =
snd_soc_dai_get_dma_data(runtime->cpu_dai, substream);
u32 channel;
unsigned int pending;
if (substream == SNDRV_PCM_STREAM_PLAYBACK)
channel = params->dma_out;
else
channel = params->dma_in;
has_xrun = params->check_xrun(runtime->cpu_dai);
if (!channel)
continue;
if (unlikely(has_xrun & (1 << i)) &&
substream->runtime &&
snd_pcm_running(substream)) {
dev_dbg(pcm->dev, "xrun\n");
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
ret = IRQ_HANDLED;
}
pending = s6dmac_int_sources(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel));
if (pending & 1) {
ret = IRQ_HANDLED;
if (likely(substream->runtime &&
snd_pcm_running(substream))) {
snd_pcm_period_elapsed(substream);
dev_dbg(pcm->dev, "period elapsed %x %x\n",
s6dmac_cur_src(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel)),
s6dmac_cur_dst(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel)));
prtd = substream->runtime->private_data;
spin_lock(&prtd->lock);
s6000_pcm_enqueue_dma(substream);
spin_unlock(&prtd->lock);
}
}
if (unlikely(pending & ~7)) {
if (pending & (1 << 3))
printk(KERN_WARNING
"s6000-pcm: DMA %x Underflow\n",
channel);
if (pending & (1 << 4))
printk(KERN_WARNING
"s6000-pcm: DMA %x Overflow\n",
channel);
if (pending & 0x1e0)
printk(KERN_WARNING
"s6000-pcm: DMA %x Master Error "
"(mask %x)\n",
channel, pending >> 5);
}
}
return ret;
}
static int s6000_pcm_start(struct snd_pcm_substream *substream)
{
struct s6000_runtime_data *prtd = substream->runtime->private_data;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
unsigned long flags;
int srcinc;
u32 dma;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
spin_lock_irqsave(&prtd->lock, flags);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
srcinc = 1;
dma = par->dma_out;
} else {
srcinc = 0;
dma = par->dma_in;
}
s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma),
1 /* priority 1 (0 is max) */,
0 /* peripheral requests w/o xfer length mode */,
srcinc /* source address increment */,
srcinc^1 /* destination address increment */,
0 /* chunksize 0 (skip impossible on this dma) */,
0 /* source skip after chunk (impossible) */,
0 /* destination skip after chunk (impossible) */,
4 /* 16 byte burst size */,
-1 /* don't conserve bandwidth */,
0 /* low watermark irq descriptor threshold */,
0 /* disable hardware timestamps */,
1 /* enable channel */);
s6000_pcm_enqueue_dma(substream);
s6000_pcm_enqueue_dma(substream);
spin_unlock_irqrestore(&prtd->lock, flags);
return 0;
}
static int s6000_pcm_stop(struct snd_pcm_substream *substream)
{
struct s6000_runtime_data *prtd = substream->runtime->private_data;
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
unsigned long flags;
u32 channel;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
channel = par->dma_out;
else
channel = par->dma_in;
s6dmac_set_terminal_count(DMA_MASK_DMAC(channel),
DMA_INDEX_CHNL(channel), 0);
spin_lock_irqsave(&prtd->lock, flags);
s6dmac_disable_chan(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel));
spin_unlock_irqrestore(&prtd->lock, flags);
return 0;
}
static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
int ret;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
ret = par->trigger(substream, cmd, 0);
if (ret < 0)
return ret;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = s6000_pcm_start(substream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ret = s6000_pcm_stop(substream);
break;
default:
ret = -EINVAL;
}
if (ret < 0)
return ret;
return par->trigger(substream, cmd, 1);
}
static int s6000_pcm_prepare(struct snd_pcm_substream *substream)
{
struct s6000_runtime_data *prtd = substream->runtime->private_data;
prtd->period = 0;
return 0;
}
static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
struct snd_pcm_runtime *runtime = substream->runtime;
struct s6000_runtime_data *prtd = runtime->private_data;
unsigned long flags;
unsigned int offset;
dma_addr_t count;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
spin_lock_irqsave(&prtd->lock, flags);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
count = s6dmac_cur_src(DMA_MASK_DMAC(par->dma_out),
DMA_INDEX_CHNL(par->dma_out));
else
count = s6dmac_cur_dst(DMA_MASK_DMAC(par->dma_in),
DMA_INDEX_CHNL(par->dma_in));
count -= runtime->dma_addr;
spin_unlock_irqrestore(&prtd->lock, flags);
offset = bytes_to_frames(runtime, count);
if (unlikely(offset >= runtime->buffer_size))
offset = 0;
return offset;
}
static int s6000_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
struct snd_pcm_runtime *runtime = substream->runtime;
struct s6000_runtime_data *prtd;
int ret;
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware);
ret = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16);
if (ret < 0)
return ret;
ret = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
if (ret < 0)
return ret;
ret = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0)
return ret;
if (par->same_rate) {
int rate;
spin_lock(&par->lock); /* needed? */
rate = par->rate;
spin_unlock(&par->lock);
if (rate != -1) {
ret = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_RATE,
rate, rate);
if (ret < 0)
return ret;
}
}
prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL);
if (prtd == NULL)
return -ENOMEM;
spin_lock_init(&prtd->lock);
runtime->private_data = prtd;
return 0;
}
static int s6000_pcm_close(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct s6000_runtime_data *prtd = runtime->private_data;
kfree(prtd);
return 0;
}
static int s6000_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par;
int ret;
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0) {
printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n");
return ret;
}
par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
if (par->same_rate) {
spin_lock(&par->lock);
if (par->rate == -1 ||
!(par->in_use & ~(1 << substream->stream))) {
par->rate = params_rate(hw_params);
par->in_use |= 1 << substream->stream;
} else if (params_rate(hw_params) != par->rate) {
snd_pcm_lib_free_pages(substream);
par->in_use &= ~(1 << substream->stream);
ret = -EBUSY;
}
spin_unlock(&par->lock);
}
return ret;
}
static int s6000_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
struct s6000_pcm_dma_params *par =
snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
spin_lock(&par->lock);
par->in_use &= ~(1 << substream->stream);
if (!par->in_use)
par->rate = -1;
spin_unlock(&par->lock);
return snd_pcm_lib_free_pages(substream);
}
static struct snd_pcm_ops s6000_pcm_ops = {
.open = s6000_pcm_open,
.close = s6000_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = s6000_pcm_hw_params,
.hw_free = s6000_pcm_hw_free,
.trigger = s6000_pcm_trigger,
.prepare = s6000_pcm_prepare,
.pointer = s6000_pcm_pointer,
};
static void s6000_pcm_free(struct snd_pcm *pcm)
{
struct snd_soc_pcm_runtime *runtime = pcm->private_data;
struct s6000_pcm_dma_params *params =
snd_soc_dai_get_dma_data(runtime->cpu_dai, pcm->streams[0].substream);
free_irq(params->irq, pcm);
snd_pcm_lib_preallocate_free_for_all(pcm);
}
static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
static int s6000_pcm_new(struct snd_card *card,
struct snd_soc_dai *dai, struct snd_pcm *pcm)
{
struct snd_soc_pcm_runtime *runtime = pcm->private_data;
struct s6000_pcm_dma_params *params;
int res;
params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
pcm->streams[0].substream);
if (!card->dev->dma_mask)
card->dev->dma_mask = &s6000_pcm_dmamask;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
if (params->dma_in) {
s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
DMA_INDEX_CHNL(params->dma_in));
s6dmac_int_sources(DMA_MASK_DMAC(params->dma_in),
DMA_INDEX_CHNL(params->dma_in));
}
if (params->dma_out) {
s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_out),
DMA_INDEX_CHNL(params->dma_out));
s6dmac_int_sources(DMA_MASK_DMAC(params->dma_out),
DMA_INDEX_CHNL(params->dma_out));
}
res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED,
"s6000-audio", pcm);
if (res) {
printk(KERN_ERR "s6000-pcm couldn't get IRQ\n");
return res;
}
res = snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV,
card->dev,
S6_PCM_PREALLOCATE_SIZE,
S6_PCM_PREALLOCATE_MAX);
if (res)
printk(KERN_WARNING "s6000-pcm: preallocation failed\n");
spin_lock_init(¶ms->lock);
params->in_use = 0;
params->rate = -1;
return 0;
}
static struct snd_soc_platform_driver s6000_soc_platform = {
.ops = &s6000_pcm_ops,
.pcm_new = s6000_pcm_new,
.pcm_free = s6000_pcm_free,
};
static int __devinit s6000_soc_platform_probe(struct platform_device *pdev)
{
return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform);
}
static int __devexit s6000_soc_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_platform(&pdev->dev);
return 0;
}
static struct platform_driver s6000_pcm_driver = {
.driver = {
.name = "s6000-pcm-audio",
.owner = THIS_MODULE,
},
.probe = s6000_soc_platform_probe,
.remove = __devexit_p(s6000_soc_platform_remove),
};
static int __init snd_s6000_pcm_init(void)
{
return platform_driver_register(&s6000_pcm_driver);
}
module_init(snd_s6000_pcm_init);
static void __exit snd_s6000_pcm_exit(void)
{
platform_driver_unregister(&s6000_pcm_driver);
}
module_exit(snd_s6000_pcm_exit);
MODULE_AUTHOR("Daniel Gloeckner");
MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Biktorgj/Gear_1_Kernel | drivers/video/omap2/dss/core.c | 2072 | 13199 | /*
* linux/drivers/video/omap2/dss/core.c
*
* Copyright (C) 2009 Nokia Corporation
* Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
*
* Some code and ideas taken from drivers/video/omap/ driver
* by Imre Deak.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define DSS_SUBSYS_NAME "CORE"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/device.h>
#include <linux/regulator/consumer.h>
#include <video/omapdss.h>
#include "dss.h"
#include "dss_features.h"
static struct {
struct platform_device *pdev;
struct regulator *vdds_dsi_reg;
struct regulator *vdds_sdi_reg;
} core;
static char *def_disp_name;
module_param_named(def_disp, def_disp_name, charp, 0);
MODULE_PARM_DESC(def_disp, "default display name");
#ifdef DEBUG
unsigned int dss_debug;
module_param_named(debug, dss_debug, bool, 0644);
#endif
static int omap_dss_register_device(struct omap_dss_device *);
static void omap_dss_unregister_device(struct omap_dss_device *);
/* REGULATORS */
struct regulator *dss_get_vdds_dsi(void)
{
struct regulator *reg;
if (core.vdds_dsi_reg != NULL)
return core.vdds_dsi_reg;
reg = regulator_get(&core.pdev->dev, "vdds_dsi");
if (!IS_ERR(reg))
core.vdds_dsi_reg = reg;
return reg;
}
struct regulator *dss_get_vdds_sdi(void)
{
struct regulator *reg;
if (core.vdds_sdi_reg != NULL)
return core.vdds_sdi_reg;
reg = regulator_get(&core.pdev->dev, "vdds_sdi");
if (!IS_ERR(reg))
core.vdds_sdi_reg = reg;
return reg;
}
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
static int dss_debug_show(struct seq_file *s, void *unused)
{
void (*func)(struct seq_file *) = s->private;
func(s);
return 0;
}
static int dss_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, dss_debug_show, inode->i_private);
}
static const struct file_operations dss_debug_fops = {
.open = dss_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *dss_debugfs_dir;
static int dss_initialize_debugfs(void)
{
dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
if (IS_ERR(dss_debugfs_dir)) {
int err = PTR_ERR(dss_debugfs_dir);
dss_debugfs_dir = NULL;
return err;
}
debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
&dss_debug_dump_clocks, &dss_debug_fops);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
&dispc_dump_irqs, &dss_debug_fops);
#endif
#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
#endif
debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
&dss_dump_regs, &dss_debug_fops);
debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
&dispc_dump_regs, &dss_debug_fops);
#ifdef CONFIG_OMAP2_DSS_RFBI
debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
&rfbi_dump_regs, &dss_debug_fops);
#endif
#ifdef CONFIG_OMAP2_DSS_DSI
dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
#endif
#ifdef CONFIG_OMAP2_DSS_VENC
debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
&venc_dump_regs, &dss_debug_fops);
#endif
return 0;
}
static void dss_uninitialize_debugfs(void)
{
if (dss_debugfs_dir)
debugfs_remove_recursive(dss_debugfs_dir);
}
#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
static inline int dss_initialize_debugfs(void)
{
return 0;
}
static inline void dss_uninitialize_debugfs(void)
{
}
#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
/* PLATFORM DEVICE */
static int omap_dss_probe(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int r;
int i;
core.pdev = pdev;
dss_features_init();
dss_init_overlay_managers(pdev);
dss_init_overlays(pdev);
r = dss_init_platform_driver();
if (r) {
DSSERR("Failed to initialize DSS platform driver\n");
goto err_dss;
}
/* keep clocks enabled to prevent context saves/restores during init */
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK);
r = rfbi_init_platform_driver();
if (r) {
DSSERR("Failed to initialize rfbi platform driver\n");
goto err_rfbi;
}
r = dispc_init_platform_driver();
if (r) {
DSSERR("Failed to initialize dispc platform driver\n");
goto err_dispc;
}
r = venc_init_platform_driver();
if (r) {
DSSERR("Failed to initialize venc platform driver\n");
goto err_venc;
}
r = dsi_init_platform_driver();
if (r) {
DSSERR("Failed to initialize DSI platform driver\n");
goto err_dsi;
}
r = hdmi_init_platform_driver();
if (r) {
DSSERR("Failed to initialize hdmi\n");
goto err_hdmi;
}
r = dss_initialize_debugfs();
if (r)
goto err_debugfs;
for (i = 0; i < pdata->num_devices; ++i) {
struct omap_dss_device *dssdev = pdata->devices[i];
r = omap_dss_register_device(dssdev);
if (r) {
DSSERR("device %d %s register failed %d\n", i,
dssdev->name ?: "unnamed", r);
while (--i >= 0)
omap_dss_unregister_device(pdata->devices[i]);
goto err_register;
}
if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
pdata->default_device = dssdev;
}
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK);
return 0;
err_register:
dss_uninitialize_debugfs();
err_debugfs:
hdmi_uninit_platform_driver();
err_hdmi:
dsi_uninit_platform_driver();
err_dsi:
venc_uninit_platform_driver();
err_venc:
dispc_uninit_platform_driver();
err_dispc:
rfbi_uninit_platform_driver();
err_rfbi:
dss_uninit_platform_driver();
err_dss:
return r;
}
static int omap_dss_remove(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int i;
dss_uninitialize_debugfs();
venc_uninit_platform_driver();
dispc_uninit_platform_driver();
rfbi_uninit_platform_driver();
dsi_uninit_platform_driver();
hdmi_uninit_platform_driver();
dss_uninit_platform_driver();
dss_uninit_overlays(pdev);
dss_uninit_overlay_managers(pdev);
for (i = 0; i < pdata->num_devices; ++i)
omap_dss_unregister_device(pdata->devices[i]);
return 0;
}
static void omap_dss_shutdown(struct platform_device *pdev)
{
DSSDBG("shutdown\n");
dss_disable_all_devices();
}
static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
{
DSSDBG("suspend %d\n", state.event);
return dss_suspend_all_devices();
}
static int omap_dss_resume(struct platform_device *pdev)
{
DSSDBG("resume\n");
return dss_resume_all_devices();
}
static struct platform_driver omap_dss_driver = {
.probe = omap_dss_probe,
.remove = omap_dss_remove,
.shutdown = omap_dss_shutdown,
.suspend = omap_dss_suspend,
.resume = omap_dss_resume,
.driver = {
.name = "omapdss",
.owner = THIS_MODULE,
},
};
/* BUS */
static int dss_bus_match(struct device *dev, struct device_driver *driver)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
DSSDBG("bus_match. dev %s/%s, drv %s\n",
dev_name(dev), dssdev->driver_name, driver->name);
return strcmp(dssdev->driver_name, driver->name) == 0;
}
static ssize_t device_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
return snprintf(buf, PAGE_SIZE, "%s\n",
dssdev->name ?
dssdev->name : "");
}
static struct device_attribute default_dev_attrs[] = {
__ATTR(name, S_IRUGO, device_name_show, NULL),
__ATTR_NULL,
};
static ssize_t driver_name_show(struct device_driver *drv, char *buf)
{
struct omap_dss_driver *dssdrv = to_dss_driver(drv);
return snprintf(buf, PAGE_SIZE, "%s\n",
dssdrv->driver.name ?
dssdrv->driver.name : "");
}
static struct driver_attribute default_drv_attrs[] = {
__ATTR(name, S_IRUGO, driver_name_show, NULL),
__ATTR_NULL,
};
static struct bus_type dss_bus_type = {
.name = "omapdss",
.match = dss_bus_match,
.dev_attrs = default_dev_attrs,
.drv_attrs = default_drv_attrs,
};
static void dss_bus_release(struct device *dev)
{
DSSDBG("bus_release\n");
}
static struct device dss_bus = {
.release = dss_bus_release,
};
struct bus_type *dss_get_bus(void)
{
return &dss_bus_type;
}
/* DRIVER */
static int dss_driver_probe(struct device *dev)
{
int r;
struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
struct omap_dss_device *dssdev = to_dss_device(dev);
struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
bool force;
DSSDBG("driver_probe: dev %s/%s, drv %s\n",
dev_name(dev), dssdev->driver_name,
dssdrv->driver.name);
dss_init_device(core.pdev, dssdev);
force = pdata->default_device == dssdev;
dss_recheck_connections(dssdev, force);
r = dssdrv->probe(dssdev);
if (r) {
DSSERR("driver probe failed: %d\n", r);
dss_uninit_device(core.pdev, dssdev);
return r;
}
DSSDBG("probe done for device %s\n", dev_name(dev));
dssdev->driver = dssdrv;
return 0;
}
static int dss_driver_remove(struct device *dev)
{
struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
struct omap_dss_device *dssdev = to_dss_device(dev);
DSSDBG("driver_remove: dev %s/%s\n", dev_name(dev),
dssdev->driver_name);
dssdrv->remove(dssdev);
dss_uninit_device(core.pdev, dssdev);
dssdev->driver = NULL;
return 0;
}
int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
{
dssdriver->driver.bus = &dss_bus_type;
dssdriver->driver.probe = dss_driver_probe;
dssdriver->driver.remove = dss_driver_remove;
if (dssdriver->get_resolution == NULL)
dssdriver->get_resolution = omapdss_default_get_resolution;
if (dssdriver->get_recommended_bpp == NULL)
dssdriver->get_recommended_bpp =
omapdss_default_get_recommended_bpp;
return driver_register(&dssdriver->driver);
}
EXPORT_SYMBOL(omap_dss_register_driver);
void omap_dss_unregister_driver(struct omap_dss_driver *dssdriver)
{
driver_unregister(&dssdriver->driver);
}
EXPORT_SYMBOL(omap_dss_unregister_driver);
/* DEVICE */
static void reset_device(struct device *dev, int check)
{
u8 *dev_p = (u8 *)dev;
u8 *dev_end = dev_p + sizeof(*dev);
void *saved_pdata;
saved_pdata = dev->platform_data;
if (check) {
/*
* Check if there is any other setting than platform_data
* in struct device; warn that these will be reset by our
* init.
*/
dev->platform_data = NULL;
while (dev_p < dev_end) {
if (*dev_p) {
WARN("%s: struct device fields will be "
"discarded\n",
__func__);
break;
}
dev_p++;
}
}
memset(dev, 0, sizeof(*dev));
dev->platform_data = saved_pdata;
}
static void omap_dss_dev_release(struct device *dev)
{
reset_device(dev, 0);
}
static int omap_dss_register_device(struct omap_dss_device *dssdev)
{
static int dev_num;
WARN_ON(!dssdev->driver_name);
reset_device(&dssdev->dev, 1);
dssdev->dev.bus = &dss_bus_type;
dssdev->dev.parent = &dss_bus;
dssdev->dev.release = omap_dss_dev_release;
dev_set_name(&dssdev->dev, "display%d", dev_num++);
return device_register(&dssdev->dev);
}
static void omap_dss_unregister_device(struct omap_dss_device *dssdev)
{
device_unregister(&dssdev->dev);
}
/* BUS */
static int omap_dss_bus_register(void)
{
int r;
r = bus_register(&dss_bus_type);
if (r) {
DSSERR("bus register failed\n");
return r;
}
dev_set_name(&dss_bus, "omapdss");
r = device_register(&dss_bus);
if (r) {
DSSERR("bus driver register failed\n");
bus_unregister(&dss_bus_type);
return r;
}
return 0;
}
/* INIT */
#ifdef CONFIG_OMAP2_DSS_MODULE
static void omap_dss_bus_unregister(void)
{
device_unregister(&dss_bus);
bus_unregister(&dss_bus_type);
}
static int __init omap_dss_init(void)
{
int r;
r = omap_dss_bus_register();
if (r)
return r;
r = platform_driver_register(&omap_dss_driver);
if (r) {
omap_dss_bus_unregister();
return r;
}
return 0;
}
static void __exit omap_dss_exit(void)
{
if (core.vdds_dsi_reg != NULL) {
regulator_put(core.vdds_dsi_reg);
core.vdds_dsi_reg = NULL;
}
if (core.vdds_sdi_reg != NULL) {
regulator_put(core.vdds_sdi_reg);
core.vdds_sdi_reg = NULL;
}
platform_driver_unregister(&omap_dss_driver);
omap_dss_bus_unregister();
}
module_init(omap_dss_init);
module_exit(omap_dss_exit);
#else
static int __init omap_dss_init(void)
{
return omap_dss_bus_register();
}
static int __init omap_dss_init2(void)
{
return platform_driver_register(&omap_dss_driver);
}
core_initcall(omap_dss_init);
device_initcall(omap_dss_init2);
#endif
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
SlimRoms/kernel_asus_grouper | arch/tile/kernel/single_step.c | 2584 | 21763 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* A code-rewriter that enables instruction single-stepping.
* Derived from iLib's single-stepping code.
*/
#ifndef __tilegx__ /* Hardware support for single step unavailable. */
/* These functions are only used on the TILE platform */
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/mman.h>
#include <linux/types.h>
#include <linux/err.h>
#include <asm/cacheflush.h>
#include <asm/opcode-tile.h>
#include <asm/opcode_constants.h>
#include <arch/abi.h>
#define signExtend17(val) sign_extend((val), 17)
#define TILE_X1_MASK (0xffffffffULL << 31)
int unaligned_printk;
static int __init setup_unaligned_printk(char *str)
{
long val;
if (strict_strtol(str, 0, &val) != 0)
return 0;
unaligned_printk = val;
pr_info("Printk for each unaligned data accesses is %s\n",
unaligned_printk ? "enabled" : "disabled");
return 1;
}
__setup("unaligned_printk=", setup_unaligned_printk);
unsigned int unaligned_fixup_count;
enum mem_op {
MEMOP_NONE,
MEMOP_LOAD,
MEMOP_STORE,
MEMOP_LOAD_POSTINCR,
MEMOP_STORE_POSTINCR
};
static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
{
tile_bundle_bits result;
/* mask out the old offset */
tile_bundle_bits mask = create_BrOff_X1(-1);
result = n & (~mask);
/* or in the new offset */
result |= create_BrOff_X1(offset);
return result;
}
static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
{
tile_bundle_bits result;
tile_bundle_bits op;
result = n & (~TILE_X1_MASK);
op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
create_Dest_X1(dest) |
create_SrcB_X1(TREG_ZERO) |
create_SrcA_X1(src) ;
result |= op;
return result;
}
static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
{
return move_X1(n, TREG_ZERO, TREG_ZERO);
}
static inline tile_bundle_bits addi_X1(
tile_bundle_bits n, int dest, int src, int imm)
{
n &= ~TILE_X1_MASK;
n |= (create_SrcA_X1(src) |
create_Dest_X1(dest) |
create_Imm8_X1(imm) |
create_S_X1(0) |
create_Opcode_X1(IMM_0_OPCODE_X1) |
create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
return n;
}
static tile_bundle_bits rewrite_load_store_unaligned(
struct single_step_state *state,
tile_bundle_bits bundle,
struct pt_regs *regs,
enum mem_op mem_op,
int size, int sign_ext)
{
unsigned char __user *addr;
int val_reg, addr_reg, err, val;
/* Get address and value registers */
if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
addr_reg = get_SrcA_Y2(bundle);
val_reg = get_SrcBDest_Y2(bundle);
} else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
addr_reg = get_SrcA_X1(bundle);
val_reg = get_Dest_X1(bundle);
} else {
addr_reg = get_SrcA_X1(bundle);
val_reg = get_SrcB_X1(bundle);
}
/*
* If registers are not GPRs, don't try to handle it.
*
* FIXME: we could handle non-GPR loads by getting the real value
* from memory, writing it to the single step buffer, using a
* temp_reg to hold a pointer to that memory, then executing that
* instruction and resetting temp_reg. For non-GPR stores, it's a
* little trickier; we could use the single step buffer for that
* too, but we'd have to add some more state bits so that we could
* call back in here to copy that value to the real target. For
* now, we just handle the simple case.
*/
if ((val_reg >= PTREGS_NR_GPRS &&
(val_reg != TREG_ZERO ||
mem_op == MEMOP_LOAD ||
mem_op == MEMOP_LOAD_POSTINCR)) ||
addr_reg >= PTREGS_NR_GPRS)
return bundle;
/* If it's aligned, don't handle it specially */
addr = (void __user *)regs->regs[addr_reg];
if (((unsigned long)addr % size) == 0)
return bundle;
#ifndef __LITTLE_ENDIAN
# error We assume little-endian representation with copy_xx_user size 2 here
#endif
/* Handle unaligned load/store */
if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
unsigned short val_16;
switch (size) {
case 2:
err = copy_from_user(&val_16, addr, sizeof(val_16));
val = sign_ext ? ((short)val_16) : val_16;
break;
case 4:
err = copy_from_user(&val, addr, sizeof(val));
break;
default:
BUG();
}
if (err == 0) {
state->update_reg = val_reg;
state->update_value = val;
state->update = 1;
}
} else {
val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
err = copy_to_user(addr, &val, size);
}
if (err) {
siginfo_t info = {
.si_signo = SIGSEGV,
.si_code = SEGV_MAPERR,
.si_addr = addr
};
trace_unhandled_signal("segfault", regs,
(unsigned long)addr, SIGSEGV);
force_sig_info(info.si_signo, &info, current);
return (tile_bundle_bits) 0;
}
if (unaligned_fixup == 0) {
siginfo_t info = {
.si_signo = SIGBUS,
.si_code = BUS_ADRALN,
.si_addr = addr
};
trace_unhandled_signal("unaligned trap", regs,
(unsigned long)addr, SIGBUS);
force_sig_info(info.si_signo, &info, current);
return (tile_bundle_bits) 0;
}
if (unaligned_printk || unaligned_fixup_count == 0) {
pr_info("Process %d/%s: PC %#lx: Fixup of"
" unaligned %s at %#lx.\n",
current->pid, current->comm, regs->pc,
(mem_op == MEMOP_LOAD ||
mem_op == MEMOP_LOAD_POSTINCR) ?
"load" : "store",
(unsigned long)addr);
if (!unaligned_printk) {
#define P pr_info
P("\n");
P("Unaligned fixups in the kernel will slow your application considerably.\n");
P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
P("access will become a SIGBUS you can debug. No further warnings will be\n");
P("shown so as to avoid additional slowdown, but you can track the number\n");
P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
P("\n");
#undef P
}
}
++unaligned_fixup_count;
if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
/* Convert the Y2 instruction to a prefetch. */
bundle &= ~(create_SrcBDest_Y2(-1) |
create_Opcode_Y2(-1));
bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
create_Opcode_Y2(LW_OPCODE_Y2));
/* Replace the load postincr with an addi */
} else if (mem_op == MEMOP_LOAD_POSTINCR) {
bundle = addi_X1(bundle, addr_reg, addr_reg,
get_Imm8_X1(bundle));
/* Replace the store postincr with an addi */
} else if (mem_op == MEMOP_STORE_POSTINCR) {
bundle = addi_X1(bundle, addr_reg, addr_reg,
get_Dest_Imm8_X1(bundle));
} else {
/* Convert the X1 instruction to a nop. */
bundle &= ~(create_Opcode_X1(-1) |
create_UnShOpcodeExtension_X1(-1) |
create_UnOpcodeExtension_X1(-1));
bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
create_UnShOpcodeExtension_X1(
UN_0_SHUN_0_OPCODE_X1) |
create_UnOpcodeExtension_X1(
NOP_UN_0_SHUN_0_OPCODE_X1));
}
return bundle;
}
/*
* Called after execve() has started the new image. This allows us
* to reset the info state. Note that the the mmap'ed memory, if there
* was any, has already been unmapped by the exec.
*/
void single_step_execve(void)
{
struct thread_info *ti = current_thread_info();
kfree(ti->step_state);
ti->step_state = NULL;
}
/**
* single_step_once() - entry point when single stepping has been triggered.
* @regs: The machine register state
*
* When we arrive at this routine via a trampoline, the single step
* engine copies the executing bundle to the single step buffer.
* If the instruction is a condition branch, then the target is
* reset to one past the next instruction. If the instruction
* sets the lr, then that is noted. If the instruction is a jump
* or call, then the new target pc is preserved and the current
* bundle instruction set to null.
*
* The necessary post-single-step rewriting information is stored in
* single_step_state-> We use data segment values because the
* stack will be rewound when we run the rewritten single-stepped
* instruction.
*/
void single_step_once(struct pt_regs *regs)
{
extern tile_bundle_bits __single_step_ill_insn;
extern tile_bundle_bits __single_step_j_insn;
extern tile_bundle_bits __single_step_addli_insn;
extern tile_bundle_bits __single_step_auli_insn;
struct thread_info *info = (void *)current_thread_info();
struct single_step_state *state = info->step_state;
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
tile_bundle_bits __user *buffer, *pc;
tile_bundle_bits bundle;
int temp_reg;
int target_reg = TREG_LR;
int err;
enum mem_op mem_op = MEMOP_NONE;
int size = 0, sign_ext = 0; /* happy compiler */
asm(
" .pushsection .rodata.single_step\n"
" .align 8\n"
" .globl __single_step_ill_insn\n"
"__single_step_ill_insn:\n"
" ill\n"
" .globl __single_step_addli_insn\n"
"__single_step_addli_insn:\n"
" { nop; addli r0, zero, 0 }\n"
" .globl __single_step_auli_insn\n"
"__single_step_auli_insn:\n"
" { nop; auli r0, r0, 0 }\n"
" .globl __single_step_j_insn\n"
"__single_step_j_insn:\n"
" j .\n"
" .popsection\n"
);
/*
* Enable interrupts here to allow touching userspace and the like.
* The callers expect this: do_trap() already has interrupts
* enabled, and do_work_pending() handles functions that enable
* interrupts internally.
*/
local_irq_enable();
if (state == NULL) {
/* allocate a page of writable, executable memory */
state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
if (state == NULL) {
pr_err("Out of kernel memory trying to single-step\n");
return;
}
/* allocate a cache line of writable, executable memory */
down_write(¤t->mm->mmap_sem);
buffer = (void __user *) do_mmap(NULL, 0, 64,
PROT_EXEC | PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS,
0);
up_write(¤t->mm->mmap_sem);
if (IS_ERR((void __force *)buffer)) {
kfree(state);
pr_err("Out of kernel pages trying to single-step\n");
return;
}
state->buffer = buffer;
state->is_enabled = 0;
info->step_state = state;
/* Validate our stored instruction patterns */
BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
ADDLI_OPCODE_X1);
BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
AULI_OPCODE_X1);
BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
}
/*
* If we are returning from a syscall, we still haven't hit the
* "ill" for the swint1 instruction. So back the PC up to be
* pointing at the swint1, but we'll actually return directly
* back to the "ill" so we come back in via SIGILL as if we
* had "executed" the swint1 without ever being in kernel space.
*/
if (regs->faultnum == INT_SWINT_1)
regs->pc -= 8;
pc = (tile_bundle_bits __user *)(regs->pc);
if (get_user(bundle, pc) != 0) {
pr_err("Couldn't read instruction at %p trying to step\n", pc);
return;
}
/* We'll follow the instruction with 2 ill op bundles */
state->orig_pc = (unsigned long)pc;
state->next_pc = (unsigned long)(pc + 1);
state->branch_next_pc = 0;
state->update = 0;
if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) {
/* two wide, check for control flow */
int opcode = get_Opcode_X1(bundle);
switch (opcode) {
/* branches */
case BRANCH_OPCODE_X1:
{
s32 offset = signExtend17(get_BrOff_X1(bundle));
/*
* For branches, we use a rewriting trick to let the
* hardware evaluate whether the branch is taken or
* untaken. We record the target offset and then
* rewrite the branch instruction to target 1 insn
* ahead if the branch is taken. We then follow the
* rewritten branch with two bundles, each containing
* an "ill" instruction. The supervisor examines the
* pc after the single step code is executed, and if
* the pc is the first ill instruction, then the
* branch (if any) was not taken. If the pc is the
* second ill instruction, then the branch was
* taken. The new pc is computed for these cases, and
* inserted into the registers for the thread. If
* the pc is the start of the single step code, then
* an exception or interrupt was taken before the
* code started processing, and the same "original"
* pc is restored. This change, different from the
* original implementation, has the advantage of
* executing a single user instruction.
*/
state->branch_next_pc = (unsigned long)(pc + offset);
/* rewrite branch offset to go forward one bundle */
bundle = set_BrOff_X1(bundle, 2);
}
break;
/* jumps */
case JALB_OPCODE_X1:
case JALF_OPCODE_X1:
state->update = 1;
state->next_pc =
(unsigned long) (pc + get_JOffLong_X1(bundle));
break;
case JB_OPCODE_X1:
case JF_OPCODE_X1:
state->next_pc =
(unsigned long) (pc + get_JOffLong_X1(bundle));
bundle = nop_X1(bundle);
break;
case SPECIAL_0_OPCODE_X1:
switch (get_RRROpcodeExtension_X1(bundle)) {
/* jump-register */
case JALRP_SPECIAL_0_OPCODE_X1:
case JALR_SPECIAL_0_OPCODE_X1:
state->update = 1;
state->next_pc =
regs->regs[get_SrcA_X1(bundle)];
break;
case JRP_SPECIAL_0_OPCODE_X1:
case JR_SPECIAL_0_OPCODE_X1:
state->next_pc =
regs->regs[get_SrcA_X1(bundle)];
bundle = nop_X1(bundle);
break;
case LNK_SPECIAL_0_OPCODE_X1:
state->update = 1;
target_reg = get_Dest_X1(bundle);
break;
/* stores */
case SH_SPECIAL_0_OPCODE_X1:
mem_op = MEMOP_STORE;
size = 2;
break;
case SW_SPECIAL_0_OPCODE_X1:
mem_op = MEMOP_STORE;
size = 4;
break;
}
break;
/* loads and iret */
case SHUN_0_OPCODE_X1:
if (get_UnShOpcodeExtension_X1(bundle) ==
UN_0_SHUN_0_OPCODE_X1) {
switch (get_UnOpcodeExtension_X1(bundle)) {
case LH_UN_0_SHUN_0_OPCODE_X1:
mem_op = MEMOP_LOAD;
size = 2;
sign_ext = 1;
break;
case LH_U_UN_0_SHUN_0_OPCODE_X1:
mem_op = MEMOP_LOAD;
size = 2;
sign_ext = 0;
break;
case LW_UN_0_SHUN_0_OPCODE_X1:
mem_op = MEMOP_LOAD;
size = 4;
break;
case IRET_UN_0_SHUN_0_OPCODE_X1:
{
unsigned long ex0_0 = __insn_mfspr(
SPR_EX_CONTEXT_0_0);
unsigned long ex0_1 = __insn_mfspr(
SPR_EX_CONTEXT_0_1);
/*
* Special-case it if we're iret'ing
* to PL0 again. Otherwise just let
* it run and it will generate SIGILL.
*/
if (EX1_PL(ex0_1) == USER_PL) {
state->next_pc = ex0_0;
regs->ex1 = ex0_1;
bundle = nop_X1(bundle);
}
}
}
}
break;
#if CHIP_HAS_WH64()
/* postincrement operations */
case IMM_0_OPCODE_X1:
switch (get_ImmOpcodeExtension_X1(bundle)) {
case LWADD_IMM_0_OPCODE_X1:
mem_op = MEMOP_LOAD_POSTINCR;
size = 4;
break;
case LHADD_IMM_0_OPCODE_X1:
mem_op = MEMOP_LOAD_POSTINCR;
size = 2;
sign_ext = 1;
break;
case LHADD_U_IMM_0_OPCODE_X1:
mem_op = MEMOP_LOAD_POSTINCR;
size = 2;
sign_ext = 0;
break;
case SWADD_IMM_0_OPCODE_X1:
mem_op = MEMOP_STORE_POSTINCR;
size = 4;
break;
case SHADD_IMM_0_OPCODE_X1:
mem_op = MEMOP_STORE_POSTINCR;
size = 2;
break;
default:
break;
}
break;
#endif /* CHIP_HAS_WH64() */
}
if (state->update) {
/*
* Get an available register. We start with a
* bitmask with 1's for available registers.
* We truncate to the low 32 registers since
* we are guaranteed to have set bits in the
* low 32 bits, then use ctz to pick the first.
*/
u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
(1ULL << get_SrcA_X0(bundle)) |
(1ULL << get_SrcB_X0(bundle)) |
(1ULL << target_reg));
temp_reg = __builtin_ctz(mask);
state->update_reg = temp_reg;
state->update_value = regs->regs[temp_reg];
regs->regs[temp_reg] = (unsigned long) (pc+1);
regs->flags |= PT_FLAGS_RESTORE_REGS;
bundle = move_X1(bundle, target_reg, temp_reg);
}
} else {
int opcode = get_Opcode_Y2(bundle);
switch (opcode) {
/* loads */
case LH_OPCODE_Y2:
mem_op = MEMOP_LOAD;
size = 2;
sign_ext = 1;
break;
case LH_U_OPCODE_Y2:
mem_op = MEMOP_LOAD;
size = 2;
sign_ext = 0;
break;
case LW_OPCODE_Y2:
mem_op = MEMOP_LOAD;
size = 4;
break;
/* stores */
case SH_OPCODE_Y2:
mem_op = MEMOP_STORE;
size = 2;
break;
case SW_OPCODE_Y2:
mem_op = MEMOP_STORE;
size = 4;
break;
}
}
/*
* Check if we need to rewrite an unaligned load/store.
* Returning zero is a special value meaning we need to SIGSEGV.
*/
if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
bundle = rewrite_load_store_unaligned(state, bundle, regs,
mem_op, size, sign_ext);
if (bundle == 0)
return;
}
/* write the bundle to our execution area */
buffer = state->buffer;
err = __put_user(bundle, buffer++);
/*
* If we're really single-stepping, we take an INT_ILL after.
* If we're just handling an unaligned access, we can just
* jump directly back to where we were in user code.
*/
if (is_single_step) {
err |= __put_user(__single_step_ill_insn, buffer++);
err |= __put_user(__single_step_ill_insn, buffer++);
} else {
long delta;
if (state->update) {
/* We have some state to update; do it inline */
int ha16;
bundle = __single_step_addli_insn;
bundle |= create_Dest_X1(state->update_reg);
bundle |= create_Imm16_X1(state->update_value);
err |= __put_user(bundle, buffer++);
bundle = __single_step_auli_insn;
bundle |= create_Dest_X1(state->update_reg);
bundle |= create_SrcA_X1(state->update_reg);
ha16 = (state->update_value + 0x8000) >> 16;
bundle |= create_Imm16_X1(ha16);
err |= __put_user(bundle, buffer++);
state->update = 0;
}
/* End with a jump back to the next instruction */
delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
(unsigned long)buffer) >>
TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
bundle = __single_step_j_insn;
bundle |= create_JOffLong_X1(delta);
err |= __put_user(bundle, buffer++);
}
if (err) {
pr_err("Fault when writing to single-step buffer\n");
return;
}
/*
* Flush the buffer.
* We do a local flush only, since this is a thread-specific buffer.
*/
__flush_icache_range((unsigned long)state->buffer,
(unsigned long)buffer);
/* Indicate enabled */
state->is_enabled = is_single_step;
regs->pc = (unsigned long)state->buffer;
/* Fault immediately if we are coming back from a syscall. */
if (regs->faultnum == INT_SWINT_1)
regs->pc += 8;
}
#else
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <arch/spr_def.h>
static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
/*
* Called directly on the occasion of an interrupt.
*
* If the process doesn't have single step set, then we use this as an
* opportunity to turn single step off.
*
* It has been mentioned that we could conditionally turn off single stepping
* on each entry into the kernel and rely on single_step_once to turn it
* on for the processes that matter (as we already do), but this
* implementation is somewhat more efficient in that we muck with registers
* once on a bum interrupt rather than on every entry into the kernel.
*
* If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
* so we have to run through this process again before we can say that an
* instruction has executed.
*
* swint will set CANCELED, but it's a legitimate instruction. Fortunately
* it changes the PC. If it hasn't changed, then we know that the interrupt
* wasn't generated by swint and we'll need to run this process again before
* we can say an instruction has executed.
*
* If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
* on with our lives.
*/
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
{
unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
struct thread_info *info = (void *)current_thread_info();
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
if (is_single_step == 0) {
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
} else if ((*ss_pc != regs->pc) ||
(!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
ptrace_notify(SIGTRAP);
control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
}
}
/*
* Called from need_singlestep. Set up the control registers and the enable
* register, then return back.
*/
void single_step_once(struct pt_regs *regs)
{
unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
*ss_pc = regs->pc;
control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
}
void single_step_execve(void)
{
/* Nothing */
}
#endif /* !__tilegx__ */
| gpl-2.0 |
nicon8/Sched_deadline_rt | arch/powerpc/kernel/kgdb.c | 2840 | 14844 | /*
* PowerPC backend to the KGDB stub.
*
* 1998 (c) Michael AK Tesch (tesch@cs.wisc.edu)
* Copyright (C) 2003 Timesys Corporation.
* Copyright (C) 2004-2006 MontaVista Software, Inc.
* PPC64 Mods (C) 2005 Frank Rowand (frowand@mvista.com)
* PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
* Sergei Shtylyov <sshtylyov@ru.mvista.com>
* Copyright (C) 2007-2008 Wind River Systems, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program as licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/ptrace.h>
#include <linux/kdebug.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/machdep.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
* signals, which are primarily what GDB understands. GDB and the kernel
* don't always agree on values, so we use constants taken from gdb-6.2.
*/
static struct hard_trap_info
{
unsigned int tt; /* Trap type code for powerpc */
unsigned char signo; /* Signal that we map this trap into */
} hard_trap_info[] = {
{ 0x0100, 0x02 /* SIGINT */ }, /* system reset */
{ 0x0200, 0x0b /* SIGSEGV */ }, /* machine check */
{ 0x0300, 0x0b /* SIGSEGV */ }, /* data access */
{ 0x0400, 0x0b /* SIGSEGV */ }, /* instruction access */
{ 0x0500, 0x02 /* SIGINT */ }, /* external interrupt */
{ 0x0600, 0x0a /* SIGBUS */ }, /* alignment */
{ 0x0700, 0x05 /* SIGTRAP */ }, /* program check */
{ 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
{ 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
{ 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
{ 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
#if defined(CONFIG_FSL_BOOKE)
{ 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
{ 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
{ 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
{ 0x2040, 0x08 /* SIGFPE */ }, /* spe fp data */
{ 0x2050, 0x08 /* SIGFPE */ }, /* spe fp round */
{ 0x2060, 0x0e /* SIGILL */ }, /* performance monitor */
{ 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
{ 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
{ 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
#else /* ! CONFIG_FSL_BOOKE */
{ 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
{ 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
{ 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
{ 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
{ 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
#endif
#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
{ 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
#if defined(CONFIG_8xx)
{ 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
#else /* ! CONFIG_8xx */
{ 0x0f00, 0x04 /* SIGILL */ }, /* performance monitor */
{ 0x0f20, 0x08 /* SIGFPE */ }, /* altivec unavailable */
{ 0x1300, 0x05 /* SIGTRAP */ }, /* instruction address break */
#if defined(CONFIG_PPC64)
{ 0x1200, 0x05 /* SIGILL */ }, /* system error */
{ 0x1500, 0x04 /* SIGILL */ }, /* soft patch */
{ 0x1600, 0x04 /* SIGILL */ }, /* maintenance */
{ 0x1700, 0x08 /* SIGFPE */ }, /* altivec assist */
{ 0x1800, 0x04 /* SIGILL */ }, /* thermal */
#else /* ! CONFIG_PPC64 */
{ 0x1400, 0x02 /* SIGINT */ }, /* SMI */
{ 0x1600, 0x08 /* SIGFPE */ }, /* altivec assist */
{ 0x1700, 0x04 /* SIGILL */ }, /* TAU */
{ 0x2000, 0x05 /* SIGTRAP */ }, /* run mode */
#endif
#endif
#endif
{ 0x0000, 0x00 } /* Must be last */
};
static int computeSignal(unsigned int tt)
{
struct hard_trap_info *ht;
for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
if (ht->tt == tt)
return ht->signo;
return SIGHUP; /* default for things we don't know about */
}
static int kgdb_call_nmi_hook(struct pt_regs *regs)
{
kgdb_nmicallback(raw_smp_processor_id(), regs);
return 0;
}
#ifdef CONFIG_SMP
void kgdb_roundup_cpus(unsigned long flags)
{
smp_send_debugger_break();
}
#endif
/* KGDB functions to use existing PowerPC64 hooks. */
static int kgdb_debugger(struct pt_regs *regs)
{
return !kgdb_handle_exception(1, computeSignal(TRAP(regs)),
DIE_OOPS, regs);
}
static int kgdb_handle_breakpoint(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return 0;
if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
regs->nip += BREAK_INSTR_SIZE;
return 1;
}
static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
if (user_mode(regs))
return 0;
/*
* On Book E and perhaps other processors, singlestep is handled on
* the critical exception stack. This causes current_thread_info()
* to fail, since it it locates the thread_info by masking off
* the low bits of the current stack pointer. We work around
* this issue by copying the thread_info from the kernel stack
* before calling kgdb_handle_exception, and copying it back
* afterwards. On most processors the copy is avoided since
* exception_thread_info == thread_info.
*/
thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1));
exception_thread_info = current_thread_info();
if (thread_info != exception_thread_info)
memcpy(exception_thread_info, thread_info, sizeof *thread_info);
kgdb_handle_exception(0, SIGTRAP, 0, regs);
if (thread_info != exception_thread_info)
memcpy(thread_info, exception_thread_info, sizeof *thread_info);
return 1;
}
static int kgdb_iabr_match(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
return 0;
return 1;
}
static int kgdb_dabr_match(struct pt_regs *regs)
{
if (user_mode(regs))
return 0;
if (kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs) != 0)
return 0;
return 1;
}
#define PACK64(ptr, src) do { *(ptr++) = (src); } while (0)
#define PACK32(ptr, src) do { \
u32 *ptr32; \
ptr32 = (u32 *)ptr; \
*(ptr32++) = (src); \
ptr = (unsigned long *)ptr32; \
} while (0)
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp +
STACK_FRAME_OVERHEAD);
unsigned long *ptr = gdb_regs;
int reg;
memset(gdb_regs, 0, NUMREGBYTES);
/* Regs GPR0-2 */
for (reg = 0; reg < 3; reg++)
PACK64(ptr, regs->gpr[reg]);
/* Regs GPR3-13 are caller saved, not in regs->gpr[] */
ptr += 11;
/* Regs GPR14-31 */
for (reg = 14; reg < 32; reg++)
PACK64(ptr, regs->gpr[reg]);
#ifdef CONFIG_FSL_BOOKE
#ifdef CONFIG_SPE
for (reg = 0; reg < 32; reg++)
PACK64(ptr, p->thread.evr[reg]);
#else
ptr += 32;
#endif
#else
/* fp registers not used by kernel, leave zero */
ptr += 32 * 8 / sizeof(long);
#endif
PACK64(ptr, regs->nip);
PACK64(ptr, regs->msr);
PACK32(ptr, regs->ccr);
PACK64(ptr, regs->link);
PACK64(ptr, regs->ctr);
PACK32(ptr, regs->xer);
BUG_ON((unsigned long)ptr >
(unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
}
#define GDB_SIZEOF_REG sizeof(unsigned long)
#define GDB_SIZEOF_REG_U32 sizeof(u32)
#ifdef CONFIG_FSL_BOOKE
#define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long)
#else
#define GDB_SIZEOF_FLOAT_REG sizeof(u64)
#endif
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[0]) },
{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[1]) },
{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[2]) },
{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[3]) },
{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[4]) },
{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[5]) },
{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[6]) },
{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[7]) },
{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[8]) },
{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[9]) },
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[10]) },
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[11]) },
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[12]) },
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[13]) },
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[14]) },
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[15]) },
{ "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[16]) },
{ "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[17]) },
{ "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[18]) },
{ "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[19]) },
{ "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[20]) },
{ "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[21]) },
{ "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[22]) },
{ "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[23]) },
{ "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[24]) },
{ "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[25]) },
{ "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[26]) },
{ "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[27]) },
{ "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[28]) },
{ "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[29]) },
{ "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[30]) },
{ "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[31]) },
{ "f0", GDB_SIZEOF_FLOAT_REG, 0 },
{ "f1", GDB_SIZEOF_FLOAT_REG, 1 },
{ "f2", GDB_SIZEOF_FLOAT_REG, 2 },
{ "f3", GDB_SIZEOF_FLOAT_REG, 3 },
{ "f4", GDB_SIZEOF_FLOAT_REG, 4 },
{ "f5", GDB_SIZEOF_FLOAT_REG, 5 },
{ "f6", GDB_SIZEOF_FLOAT_REG, 6 },
{ "f7", GDB_SIZEOF_FLOAT_REG, 7 },
{ "f8", GDB_SIZEOF_FLOAT_REG, 8 },
{ "f9", GDB_SIZEOF_FLOAT_REG, 9 },
{ "f10", GDB_SIZEOF_FLOAT_REG, 10 },
{ "f11", GDB_SIZEOF_FLOAT_REG, 11 },
{ "f12", GDB_SIZEOF_FLOAT_REG, 12 },
{ "f13", GDB_SIZEOF_FLOAT_REG, 13 },
{ "f14", GDB_SIZEOF_FLOAT_REG, 14 },
{ "f15", GDB_SIZEOF_FLOAT_REG, 15 },
{ "f16", GDB_SIZEOF_FLOAT_REG, 16 },
{ "f17", GDB_SIZEOF_FLOAT_REG, 17 },
{ "f18", GDB_SIZEOF_FLOAT_REG, 18 },
{ "f19", GDB_SIZEOF_FLOAT_REG, 19 },
{ "f20", GDB_SIZEOF_FLOAT_REG, 20 },
{ "f21", GDB_SIZEOF_FLOAT_REG, 21 },
{ "f22", GDB_SIZEOF_FLOAT_REG, 22 },
{ "f23", GDB_SIZEOF_FLOAT_REG, 23 },
{ "f24", GDB_SIZEOF_FLOAT_REG, 24 },
{ "f25", GDB_SIZEOF_FLOAT_REG, 25 },
{ "f26", GDB_SIZEOF_FLOAT_REG, 26 },
{ "f27", GDB_SIZEOF_FLOAT_REG, 27 },
{ "f28", GDB_SIZEOF_FLOAT_REG, 28 },
{ "f29", GDB_SIZEOF_FLOAT_REG, 29 },
{ "f30", GDB_SIZEOF_FLOAT_REG, 30 },
{ "f31", GDB_SIZEOF_FLOAT_REG, 31 },
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, nip) },
{ "msr", GDB_SIZEOF_REG, offsetof(struct pt_regs, msr) },
{ "cr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ccr) },
{ "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, link) },
{ "ctr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ctr) },
{ "xer", GDB_SIZEOF_REG, offsetof(struct pt_regs, xer) },
};
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
if (regno < 32 || regno >= 64)
/* First 0 -> 31 gpr registers*/
/* pc, msr, ls... registers 64 -> 69 */
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
if (regno >= 32 && regno < 64) {
/* FP registers 32 -> 63 */
#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
if (current)
memcpy(mem, ¤t->thread.evr[regno-32],
dbg_reg_def[regno].size);
#else
/* fp registers not used by kernel, leave zero */
memset(mem, 0, dbg_reg_def[regno].size);
#endif
}
return dbg_reg_def[regno].name;
}
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return -EINVAL;
if (regno < 32 || regno >= 64)
/* First 0 -> 31 gpr registers*/
/* pc, msr, ls... registers 64 -> 69 */
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
if (regno >= 32 && regno < 64) {
/* FP registers 32 -> 63 */
#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
memcpy(¤t->thread.evr[regno-32], mem,
dbg_reg_def[regno].size);
#else
/* fp registers not used by kernel, leave zero */
return 0;
#endif
}
return 0;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
regs->nip = pc;
}
/*
* This function does PowerPC specific procesing for interfacing to gdb.
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
struct pt_regs *linux_regs)
{
char *ptr = &remcom_in_buffer[1];
unsigned long addr;
switch (remcom_in_buffer[0]) {
/*
* sAA..AA Step one instruction from AA..AA
* This will return an error to gdb ..
*/
case 's':
case 'c':
/* handle the optional parameter */
if (kgdb_hex2long(&ptr, &addr))
linux_regs->nip = addr;
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
if (remcom_in_buffer[0] == 's') {
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DBCR0,
mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
linux_regs->msr |= MSR_DE;
#else
linux_regs->msr |= MSR_SE;
#endif
kgdb_single_step = 1;
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
}
return 0;
}
return -1;
}
/*
* Global data
*/
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
};
static int kgdb_not_implemented(struct pt_regs *regs)
{
return 0;
}
static void *old__debugger_ipi;
static void *old__debugger;
static void *old__debugger_bpt;
static void *old__debugger_sstep;
static void *old__debugger_iabr_match;
static void *old__debugger_dabr_match;
static void *old__debugger_fault_handler;
int kgdb_arch_init(void)
{
old__debugger_ipi = __debugger_ipi;
old__debugger = __debugger;
old__debugger_bpt = __debugger_bpt;
old__debugger_sstep = __debugger_sstep;
old__debugger_iabr_match = __debugger_iabr_match;
old__debugger_dabr_match = __debugger_dabr_match;
old__debugger_fault_handler = __debugger_fault_handler;
__debugger_ipi = kgdb_call_nmi_hook;
__debugger = kgdb_debugger;
__debugger_bpt = kgdb_handle_breakpoint;
__debugger_sstep = kgdb_singlestep;
__debugger_iabr_match = kgdb_iabr_match;
__debugger_dabr_match = kgdb_dabr_match;
__debugger_fault_handler = kgdb_not_implemented;
return 0;
}
void kgdb_arch_exit(void)
{
__debugger_ipi = old__debugger_ipi;
__debugger = old__debugger;
__debugger_bpt = old__debugger_bpt;
__debugger_sstep = old__debugger_sstep;
__debugger_iabr_match = old__debugger_iabr_match;
__debugger_dabr_match = old__debugger_dabr_match;
__debugger_fault_handler = old__debugger_fault_handler;
}
| gpl-2.0 |
EloYGomeZ/caf-j1-exp | fs/ubifs/sb.c | 2840 | 23687 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Authors: Artem Bityutskiy (Битюцкий Артём)
* Adrian Hunter
*/
/*
* This file implements UBIFS superblock. The superblock is stored at the first
* LEB of the volume and is never changed by UBIFS. Only user-space tools may
* change it. The superblock node mostly contains geometry information.
*/
#include "ubifs.h"
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/math64.h>
/*
* Default journal size in logical eraseblocks as a percent of total
* flash size.
*/
#define DEFAULT_JNL_PERCENT 5
/* Default maximum journal size in bytes */
#define DEFAULT_MAX_JNL (32*1024*1024)
/* Default indexing tree fanout */
#define DEFAULT_FANOUT 8
/* Default number of data journal heads */
#define DEFAULT_JHEADS_CNT 1
/* Default positions of different LEBs in the main area */
#define DEFAULT_IDX_LEB 0
#define DEFAULT_DATA_LEB 1
#define DEFAULT_GC_LEB 2
/* Default number of LEB numbers in LPT's save table */
#define DEFAULT_LSAVE_CNT 256
/* Default reserved pool size as a percent of maximum free space */
#define DEFAULT_RP_PERCENT 5
/* The default maximum size of reserved pool in bytes */
#define DEFAULT_MAX_RP_SIZE (5*1024*1024)
/* Default time granularity in nanoseconds */
#define DEFAULT_TIME_GRAN 1000000000
/**
* create_default_filesystem - format empty UBI volume.
* @c: UBIFS file-system description object
*
* This function creates default empty file-system. Returns zero in case of
* success and a negative error code in case of failure.
*/
static int create_default_filesystem(struct ubifs_info *c)
{
struct ubifs_sb_node *sup;
struct ubifs_mst_node *mst;
struct ubifs_idx_node *idx;
struct ubifs_branch *br;
struct ubifs_ino_node *ino;
struct ubifs_cs_node *cs;
union ubifs_key key;
int err, tmp, jnl_lebs, log_lebs, max_buds, main_lebs, main_first;
int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0;
int min_leb_cnt = UBIFS_MIN_LEB_CNT;
long long tmp64, main_bytes;
__le64 tmp_le64;
/* Some functions called from here depend on the @c->key_len filed */
c->key_len = UBIFS_SK_LEN;
/*
* First of all, we have to calculate default file-system geometry -
* log size, journal size, etc.
*/
if (c->leb_cnt < 0x7FFFFFFF / DEFAULT_JNL_PERCENT)
/* We can first multiply then divide and have no overflow */
jnl_lebs = c->leb_cnt * DEFAULT_JNL_PERCENT / 100;
else
jnl_lebs = (c->leb_cnt / 100) * DEFAULT_JNL_PERCENT;
if (jnl_lebs < UBIFS_MIN_JNL_LEBS)
jnl_lebs = UBIFS_MIN_JNL_LEBS;
if (jnl_lebs * c->leb_size > DEFAULT_MAX_JNL)
jnl_lebs = DEFAULT_MAX_JNL / c->leb_size;
/*
* The log should be large enough to fit reference nodes for all bud
* LEBs. Because buds do not have to start from the beginning of LEBs
* (half of the LEB may contain committed data), the log should
* generally be larger, make it twice as large.
*/
tmp = 2 * (c->ref_node_alsz * jnl_lebs) + c->leb_size - 1;
log_lebs = tmp / c->leb_size;
/* Plus one LEB reserved for commit */
log_lebs += 1;
if (c->leb_cnt - min_leb_cnt > 8) {
/* And some extra space to allow writes while committing */
log_lebs += 1;
min_leb_cnt += 1;
}
max_buds = jnl_lebs - log_lebs;
if (max_buds < UBIFS_MIN_BUD_LEBS)
max_buds = UBIFS_MIN_BUD_LEBS;
/*
* Orphan nodes are stored in a separate area. One node can store a lot
* of orphan inode numbers, but when new orphan comes we just add a new
* orphan node. At some point the nodes are consolidated into one
* orphan node.
*/
orph_lebs = UBIFS_MIN_ORPH_LEBS;
#ifdef CONFIG_UBIFS_FS_DEBUG
if (c->leb_cnt - min_leb_cnt > 1)
/*
* For debugging purposes it is better to have at least 2
* orphan LEBs, because the orphan subsystem would need to do
* consolidations and would be stressed more.
*/
orph_lebs += 1;
#endif
main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS - log_lebs;
main_lebs -= orph_lebs;
lpt_first = UBIFS_LOG_LNUM + log_lebs;
c->lsave_cnt = DEFAULT_LSAVE_CNT;
c->max_leb_cnt = c->leb_cnt;
err = ubifs_create_dflt_lpt(c, &main_lebs, lpt_first, &lpt_lebs,
&big_lpt);
if (err)
return err;
dbg_gen("LEB Properties Tree created (LEBs %d-%d)", lpt_first,
lpt_first + lpt_lebs - 1);
main_first = c->leb_cnt - main_lebs;
/* Create default superblock */
tmp = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size);
sup = kzalloc(tmp, GFP_KERNEL);
if (!sup)
return -ENOMEM;
tmp64 = (long long)max_buds * c->leb_size;
if (big_lpt)
sup_flags |= UBIFS_FLG_BIGLPT;
sup->ch.node_type = UBIFS_SB_NODE;
sup->key_hash = UBIFS_KEY_HASH_R5;
sup->flags = cpu_to_le32(sup_flags);
sup->min_io_size = cpu_to_le32(c->min_io_size);
sup->leb_size = cpu_to_le32(c->leb_size);
sup->leb_cnt = cpu_to_le32(c->leb_cnt);
sup->max_leb_cnt = cpu_to_le32(c->max_leb_cnt);
sup->max_bud_bytes = cpu_to_le64(tmp64);
sup->log_lebs = cpu_to_le32(log_lebs);
sup->lpt_lebs = cpu_to_le32(lpt_lebs);
sup->orph_lebs = cpu_to_le32(orph_lebs);
sup->jhead_cnt = cpu_to_le32(DEFAULT_JHEADS_CNT);
sup->fanout = cpu_to_le32(DEFAULT_FANOUT);
sup->lsave_cnt = cpu_to_le32(c->lsave_cnt);
sup->fmt_version = cpu_to_le32(UBIFS_FORMAT_VERSION);
sup->time_gran = cpu_to_le32(DEFAULT_TIME_GRAN);
if (c->mount_opts.override_compr)
sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
else
sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
generate_random_uuid(sup->uuid);
main_bytes = (long long)main_lebs * c->leb_size;
tmp64 = div_u64(main_bytes * DEFAULT_RP_PERCENT, 100);
if (tmp64 > DEFAULT_MAX_RP_SIZE)
tmp64 = DEFAULT_MAX_RP_SIZE;
sup->rp_size = cpu_to_le64(tmp64);
sup->ro_compat_version = cpu_to_le32(UBIFS_RO_COMPAT_VERSION);
err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0, UBI_LONGTERM);
kfree(sup);
if (err)
return err;
dbg_gen("default superblock created at LEB 0:0");
/* Create default master node */
mst = kzalloc(c->mst_node_alsz, GFP_KERNEL);
if (!mst)
return -ENOMEM;
mst->ch.node_type = UBIFS_MST_NODE;
mst->log_lnum = cpu_to_le32(UBIFS_LOG_LNUM);
mst->highest_inum = cpu_to_le64(UBIFS_FIRST_INO);
mst->cmt_no = 0;
mst->root_lnum = cpu_to_le32(main_first + DEFAULT_IDX_LEB);
mst->root_offs = 0;
tmp = ubifs_idx_node_sz(c, 1);
mst->root_len = cpu_to_le32(tmp);
mst->gc_lnum = cpu_to_le32(main_first + DEFAULT_GC_LEB);
mst->ihead_lnum = cpu_to_le32(main_first + DEFAULT_IDX_LEB);
mst->ihead_offs = cpu_to_le32(ALIGN(tmp, c->min_io_size));
mst->index_size = cpu_to_le64(ALIGN(tmp, 8));
mst->lpt_lnum = cpu_to_le32(c->lpt_lnum);
mst->lpt_offs = cpu_to_le32(c->lpt_offs);
mst->nhead_lnum = cpu_to_le32(c->nhead_lnum);
mst->nhead_offs = cpu_to_le32(c->nhead_offs);
mst->ltab_lnum = cpu_to_le32(c->ltab_lnum);
mst->ltab_offs = cpu_to_le32(c->ltab_offs);
mst->lsave_lnum = cpu_to_le32(c->lsave_lnum);
mst->lsave_offs = cpu_to_le32(c->lsave_offs);
mst->lscan_lnum = cpu_to_le32(main_first);
mst->empty_lebs = cpu_to_le32(main_lebs - 2);
mst->idx_lebs = cpu_to_le32(1);
mst->leb_cnt = cpu_to_le32(c->leb_cnt);
/* Calculate lprops statistics */
tmp64 = main_bytes;
tmp64 -= ALIGN(ubifs_idx_node_sz(c, 1), c->min_io_size);
tmp64 -= ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size);
mst->total_free = cpu_to_le64(tmp64);
tmp64 = ALIGN(ubifs_idx_node_sz(c, 1), c->min_io_size);
ino_waste = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size) -
UBIFS_INO_NODE_SZ;
tmp64 += ino_waste;
tmp64 -= ALIGN(ubifs_idx_node_sz(c, 1), 8);
mst->total_dirty = cpu_to_le64(tmp64);
/* The indexing LEB does not contribute to dark space */
tmp64 = ((long long)(c->main_lebs - 1) * c->dark_wm);
mst->total_dark = cpu_to_le64(tmp64);
mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ);
err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0,
UBI_UNKNOWN);
if (err) {
kfree(mst);
return err;
}
err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1, 0,
UBI_UNKNOWN);
kfree(mst);
if (err)
return err;
dbg_gen("default master node created at LEB %d:0", UBIFS_MST_LNUM);
/* Create the root indexing node */
tmp = ubifs_idx_node_sz(c, 1);
idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL);
if (!idx)
return -ENOMEM;
c->key_fmt = UBIFS_SIMPLE_KEY_FMT;
c->key_hash = key_r5_hash;
idx->ch.node_type = UBIFS_IDX_NODE;
idx->child_cnt = cpu_to_le16(1);
ino_key_init(c, &key, UBIFS_ROOT_INO);
br = ubifs_idx_branch(c, idx, 0);
key_write_idx(c, &key, &br->key);
br->lnum = cpu_to_le32(main_first + DEFAULT_DATA_LEB);
br->len = cpu_to_le32(UBIFS_INO_NODE_SZ);
err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0,
UBI_UNKNOWN);
kfree(idx);
if (err)
return err;
dbg_gen("default root indexing node created LEB %d:0",
main_first + DEFAULT_IDX_LEB);
/* Create default root inode */
tmp = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size);
ino = kzalloc(tmp, GFP_KERNEL);
if (!ino)
return -ENOMEM;
ino_key_init_flash(c, &ino->key, UBIFS_ROOT_INO);
ino->ch.node_type = UBIFS_INO_NODE;
ino->creat_sqnum = cpu_to_le64(++c->max_sqnum);
ino->nlink = cpu_to_le32(2);
tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec);
ino->atime_sec = tmp_le64;
ino->ctime_sec = tmp_le64;
ino->mtime_sec = tmp_le64;
ino->atime_nsec = 0;
ino->ctime_nsec = 0;
ino->mtime_nsec = 0;
ino->mode = cpu_to_le32(S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO);
ino->size = cpu_to_le64(UBIFS_INO_NODE_SZ);
/* Set compression enabled by default */
ino->flags = cpu_to_le32(UBIFS_COMPR_FL);
err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ,
main_first + DEFAULT_DATA_LEB, 0,
UBI_UNKNOWN);
kfree(ino);
if (err)
return err;
dbg_gen("root inode created at LEB %d:0",
main_first + DEFAULT_DATA_LEB);
/*
* The first node in the log has to be the commit start node. This is
* always the case during normal file-system operation. Write a fake
* commit start node to the log.
*/
tmp = ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size);
cs = kzalloc(tmp, GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->ch.node_type = UBIFS_CS_NODE;
err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM,
0, UBI_UNKNOWN);
kfree(cs);
ubifs_msg("default file-system created");
return 0;
}
/**
* validate_sb - validate superblock node.
* @c: UBIFS file-system description object
* @sup: superblock node
*
* This function validates superblock node @sup. Since most of data was read
* from the superblock and stored in @c, the function validates fields in @c
* instead. Returns zero in case of success and %-EINVAL in case of validation
* failure.
*/
static int validate_sb(struct ubifs_info *c, struct ubifs_sb_node *sup)
{
long long max_bytes;
int err = 1, min_leb_cnt;
if (!c->key_hash) {
err = 2;
goto failed;
}
if (sup->key_fmt != UBIFS_SIMPLE_KEY_FMT) {
err = 3;
goto failed;
}
if (le32_to_cpu(sup->min_io_size) != c->min_io_size) {
ubifs_err("min. I/O unit mismatch: %d in superblock, %d real",
le32_to_cpu(sup->min_io_size), c->min_io_size);
goto failed;
}
if (le32_to_cpu(sup->leb_size) != c->leb_size) {
ubifs_err("LEB size mismatch: %d in superblock, %d real",
le32_to_cpu(sup->leb_size), c->leb_size);
goto failed;
}
if (c->log_lebs < UBIFS_MIN_LOG_LEBS ||
c->lpt_lebs < UBIFS_MIN_LPT_LEBS ||
c->orph_lebs < UBIFS_MIN_ORPH_LEBS ||
c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
err = 4;
goto failed;
}
/*
* Calculate minimum allowed amount of main area LEBs. This is very
* similar to %UBIFS_MIN_LEB_CNT, but we take into account real what we
* have just read from the superblock.
*/
min_leb_cnt = UBIFS_SB_LEBS + UBIFS_MST_LEBS + c->log_lebs;
min_leb_cnt += c->lpt_lebs + c->orph_lebs + c->jhead_cnt + 6;
if (c->leb_cnt < min_leb_cnt || c->leb_cnt > c->vi.size) {
ubifs_err("bad LEB count: %d in superblock, %d on UBI volume, "
"%d minimum required", c->leb_cnt, c->vi.size,
min_leb_cnt);
goto failed;
}
if (c->max_leb_cnt < c->leb_cnt) {
ubifs_err("max. LEB count %d less than LEB count %d",
c->max_leb_cnt, c->leb_cnt);
goto failed;
}
if (c->main_lebs < UBIFS_MIN_MAIN_LEBS) {
ubifs_err("too few main LEBs count %d, must be at least %d",
c->main_lebs, UBIFS_MIN_MAIN_LEBS);
goto failed;
}
max_bytes = (long long)c->leb_size * UBIFS_MIN_BUD_LEBS;
if (c->max_bud_bytes < max_bytes) {
ubifs_err("too small journal (%lld bytes), must be at least "
"%lld bytes", c->max_bud_bytes, max_bytes);
goto failed;
}
max_bytes = (long long)c->leb_size * c->main_lebs;
if (c->max_bud_bytes > max_bytes) {
ubifs_err("too large journal size (%lld bytes), only %lld bytes"
"available in the main area",
c->max_bud_bytes, max_bytes);
goto failed;
}
if (c->jhead_cnt < NONDATA_JHEADS_CNT + 1 ||
c->jhead_cnt > NONDATA_JHEADS_CNT + UBIFS_MAX_JHEADS) {
err = 9;
goto failed;
}
if (c->fanout < UBIFS_MIN_FANOUT ||
ubifs_idx_node_sz(c, c->fanout) > c->leb_size) {
err = 10;
goto failed;
}
if (c->lsave_cnt < 0 || (c->lsave_cnt > DEFAULT_LSAVE_CNT &&
c->lsave_cnt > c->max_leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS -
c->log_lebs - c->lpt_lebs - c->orph_lebs)) {
err = 11;
goto failed;
}
if (UBIFS_SB_LEBS + UBIFS_MST_LEBS + c->log_lebs + c->lpt_lebs +
c->orph_lebs + c->main_lebs != c->leb_cnt) {
err = 12;
goto failed;
}
if (c->default_compr < 0 || c->default_compr >= UBIFS_COMPR_TYPES_CNT) {
err = 13;
goto failed;
}
if (c->rp_size < 0 || max_bytes < c->rp_size) {
err = 14;
goto failed;
}
if (le32_to_cpu(sup->time_gran) > 1000000000 ||
le32_to_cpu(sup->time_gran) < 1) {
err = 15;
goto failed;
}
return 0;
failed:
ubifs_err("bad superblock, error %d", err);
dbg_dump_node(c, sup);
return -EINVAL;
}
/**
* ubifs_read_sb_node - read superblock node.
* @c: UBIFS file-system description object
*
* This function returns a pointer to the superblock node or a negative error
* code. Note, the user of this function is responsible of kfree()'ing the
* returned superblock buffer.
*/
struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
{
struct ubifs_sb_node *sup;
int err;
sup = kmalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_NOFS);
if (!sup)
return ERR_PTR(-ENOMEM);
err = ubifs_read_node(c, sup, UBIFS_SB_NODE, UBIFS_SB_NODE_SZ,
UBIFS_SB_LNUM, 0);
if (err) {
kfree(sup);
return ERR_PTR(err);
}
return sup;
}
/**
* ubifs_write_sb_node - write superblock node.
* @c: UBIFS file-system description object
* @sup: superblock node read with 'ubifs_read_sb_node()'
*
* This function returns %0 on success and a negative error code on failure.
*/
int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup)
{
int len = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size);
ubifs_prepare_node(c, sup, UBIFS_SB_NODE_SZ, 1);
return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len, UBI_LONGTERM);
}
/**
* ubifs_read_superblock - read superblock.
* @c: UBIFS file-system description object
*
* This function finds, reads and checks the superblock. If an empty UBI volume
* is being mounted, this function creates default superblock. Returns zero in
* case of success, and a negative error code in case of failure.
*/
int ubifs_read_superblock(struct ubifs_info *c)
{
int err, sup_flags;
struct ubifs_sb_node *sup;
if (c->empty) {
err = create_default_filesystem(c);
if (err)
return err;
}
sup = ubifs_read_sb_node(c);
if (IS_ERR(sup))
return PTR_ERR(sup);
c->fmt_version = le32_to_cpu(sup->fmt_version);
c->ro_compat_version = le32_to_cpu(sup->ro_compat_version);
/*
* The software supports all previous versions but not future versions,
* due to the unavailability of time-travelling equipment.
*/
if (c->fmt_version > UBIFS_FORMAT_VERSION) {
ubifs_assert(!c->ro_media || c->ro_mount);
if (!c->ro_mount ||
c->ro_compat_version > UBIFS_RO_COMPAT_VERSION) {
ubifs_err("on-flash format version is w%d/r%d, but "
"software only supports up to version "
"w%d/r%d", c->fmt_version,
c->ro_compat_version, UBIFS_FORMAT_VERSION,
UBIFS_RO_COMPAT_VERSION);
if (c->ro_compat_version <= UBIFS_RO_COMPAT_VERSION) {
ubifs_msg("only R/O mounting is possible");
err = -EROFS;
} else
err = -EINVAL;
goto out;
}
/*
* The FS is mounted R/O, and the media format is
* R/O-compatible with the UBIFS implementation, so we can
* mount.
*/
c->rw_incompat = 1;
}
if (c->fmt_version < 3) {
ubifs_err("on-flash format version %d is not supported",
c->fmt_version);
err = -EINVAL;
goto out;
}
switch (sup->key_hash) {
case UBIFS_KEY_HASH_R5:
c->key_hash = key_r5_hash;
c->key_hash_type = UBIFS_KEY_HASH_R5;
break;
case UBIFS_KEY_HASH_TEST:
c->key_hash = key_test_hash;
c->key_hash_type = UBIFS_KEY_HASH_TEST;
break;
};
c->key_fmt = sup->key_fmt;
switch (c->key_fmt) {
case UBIFS_SIMPLE_KEY_FMT:
c->key_len = UBIFS_SK_LEN;
break;
default:
ubifs_err("unsupported key format");
err = -EINVAL;
goto out;
}
c->leb_cnt = le32_to_cpu(sup->leb_cnt);
c->max_leb_cnt = le32_to_cpu(sup->max_leb_cnt);
c->max_bud_bytes = le64_to_cpu(sup->max_bud_bytes);
c->log_lebs = le32_to_cpu(sup->log_lebs);
c->lpt_lebs = le32_to_cpu(sup->lpt_lebs);
c->orph_lebs = le32_to_cpu(sup->orph_lebs);
c->jhead_cnt = le32_to_cpu(sup->jhead_cnt) + NONDATA_JHEADS_CNT;
c->fanout = le32_to_cpu(sup->fanout);
c->lsave_cnt = le32_to_cpu(sup->lsave_cnt);
c->rp_size = le64_to_cpu(sup->rp_size);
c->rp_uid = le32_to_cpu(sup->rp_uid);
c->rp_gid = le32_to_cpu(sup->rp_gid);
sup_flags = le32_to_cpu(sup->flags);
if (!c->mount_opts.override_compr)
c->default_compr = le16_to_cpu(sup->default_compr);
c->vfs_sb->s_time_gran = le32_to_cpu(sup->time_gran);
memcpy(&c->uuid, &sup->uuid, 16);
c->big_lpt = !!(sup_flags & UBIFS_FLG_BIGLPT);
c->space_fixup = !!(sup_flags & UBIFS_FLG_SPACE_FIXUP);
/* Automatically increase file system size to the maximum size */
c->old_leb_cnt = c->leb_cnt;
if (c->leb_cnt < c->vi.size && c->leb_cnt < c->max_leb_cnt) {
c->leb_cnt = min_t(int, c->max_leb_cnt, c->vi.size);
if (c->ro_mount)
dbg_mnt("Auto resizing (ro) from %d LEBs to %d LEBs",
c->old_leb_cnt, c->leb_cnt);
else {
dbg_mnt("Auto resizing (sb) from %d LEBs to %d LEBs",
c->old_leb_cnt, c->leb_cnt);
sup->leb_cnt = cpu_to_le32(c->leb_cnt);
err = ubifs_write_sb_node(c, sup);
if (err)
goto out;
c->old_leb_cnt = c->leb_cnt;
}
}
c->log_bytes = (long long)c->log_lebs * c->leb_size;
c->log_last = UBIFS_LOG_LNUM + c->log_lebs - 1;
c->lpt_first = UBIFS_LOG_LNUM + c->log_lebs;
c->lpt_last = c->lpt_first + c->lpt_lebs - 1;
c->orph_first = c->lpt_last + 1;
c->orph_last = c->orph_first + c->orph_lebs - 1;
c->main_lebs = c->leb_cnt - UBIFS_SB_LEBS - UBIFS_MST_LEBS;
c->main_lebs -= c->log_lebs + c->lpt_lebs + c->orph_lebs;
c->main_first = c->leb_cnt - c->main_lebs;
err = validate_sb(c, sup);
out:
kfree(sup);
return err;
}
/**
* fixup_leb - fixup/unmap an LEB containing free space.
* @c: UBIFS file-system description object
* @lnum: the LEB number to fix up
* @len: number of used bytes in LEB (starting at offset 0)
*
* This function reads the contents of the given LEB number @lnum, then fixes
* it up, so that empty min. I/O units in the end of LEB are actually erased on
* flash (rather than being just all-0xff real data). If the LEB is completely
* empty, it is simply unmapped.
*/
static int fixup_leb(struct ubifs_info *c, int lnum, int len)
{
int err;
ubifs_assert(len >= 0);
ubifs_assert(len % c->min_io_size == 0);
ubifs_assert(len < c->leb_size);
if (len == 0) {
dbg_mnt("unmap empty LEB %d", lnum);
return ubifs_leb_unmap(c, lnum);
}
dbg_mnt("fixup LEB %d, data len %d", lnum, len);
err = ubifs_leb_read(c, lnum, c->sbuf, 0, len, 1);
if (err)
return err;
return ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN);
}
/**
* fixup_free_space - find & remap all LEBs containing free space.
* @c: UBIFS file-system description object
*
* This function walks through all LEBs in the filesystem and fiexes up those
* containing free/empty space.
*/
static int fixup_free_space(struct ubifs_info *c)
{
int lnum, err = 0;
struct ubifs_lprops *lprops;
ubifs_get_lprops(c);
/* Fixup LEBs in the master area */
for (lnum = UBIFS_MST_LNUM; lnum < UBIFS_LOG_LNUM; lnum++) {
err = fixup_leb(c, lnum, c->mst_offs + c->mst_node_alsz);
if (err)
goto out;
}
/* Unmap unused log LEBs */
lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
while (lnum != c->ltail_lnum) {
err = fixup_leb(c, lnum, 0);
if (err)
goto out;
lnum = ubifs_next_log_lnum(c, lnum);
}
/* Fixup the current log head */
err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
if (err)
goto out;
/* Fixup LEBs in the LPT area */
for (lnum = c->lpt_first; lnum <= c->lpt_last; lnum++) {
int free = c->ltab[lnum - c->lpt_first].free;
if (free > 0) {
err = fixup_leb(c, lnum, c->leb_size - free);
if (err)
goto out;
}
}
/* Unmap LEBs in the orphans area */
for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
err = fixup_leb(c, lnum, 0);
if (err)
goto out;
}
/* Fixup LEBs in the main area */
for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
lprops = ubifs_lpt_lookup(c, lnum);
if (IS_ERR(lprops)) {
err = PTR_ERR(lprops);
goto out;
}
if (lprops->free > 0) {
err = fixup_leb(c, lnum, c->leb_size - lprops->free);
if (err)
goto out;
}
}
out:
ubifs_release_lprops(c);
return err;
}
/**
* ubifs_fixup_free_space - find & fix all LEBs with free space.
* @c: UBIFS file-system description object
*
* This function fixes up LEBs containing free space on first mount, if the
* appropriate flag was set when the FS was created. Each LEB with one or more
* empty min. I/O unit (i.e. free-space-count > 0) is re-written, to make sure
* the free space is actually erased. E.g., this is necessary for some NAND
* chips, since the free space may have been programmed like real "0xff" data
* (generating a non-0xff ECC), causing future writes to the not-really-erased
* NAND pages to behave badly. After the space is fixed up, the superblock flag
* is cleared, so that this is skipped for all future mounts.
*/
int ubifs_fixup_free_space(struct ubifs_info *c)
{
int err;
struct ubifs_sb_node *sup;
ubifs_assert(c->space_fixup);
ubifs_assert(!c->ro_mount);
ubifs_msg("start fixing up free space");
err = fixup_free_space(c);
if (err)
return err;
sup = ubifs_read_sb_node(c);
if (IS_ERR(sup))
return PTR_ERR(sup);
/* Free-space fixup is no longer required */
c->space_fixup = 0;
sup->flags &= cpu_to_le32(~UBIFS_FLG_SPACE_FIXUP);
err = ubifs_write_sb_node(c, sup);
kfree(sup);
if (err)
return err;
ubifs_msg("free space fixup complete");
return err;
}
| gpl-2.0 |
noonien-d/linux-Digilent-Dev | drivers/media/pci/ngene/ngene-cards.c | 2840 | 23260 | /*
* ngene-cards.c: nGene PCIe bridge driver - card specific info
*
* Copyright (C) 2005-2007 Micronas
*
* Copyright (C) 2008-2009 Ralph Metzler <rjkm@metzlerbros.de>
* Modifications for new nGene firmware,
* support for EEPROM-copying,
* support for new dual DVB-S2 card prototype
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 only, as published by the Free Software Foundation.
*
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
* Or, point your browser to http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include "ngene.h"
/* demods/tuners */
#include "stv6110x.h"
#include "stv090x.h"
#include "lnbh24.h"
#include "lgdt330x.h"
#include "mt2131.h"
#include "tda18271c2dd.h"
#include "drxk.h"
#include "drxd.h"
#include "dvb-pll.h"
/****************************************************************************/
/* Demod/tuner attachment ***************************************************/
/****************************************************************************/
static int tuner_attach_stv6110(struct ngene_channel *chan)
{
struct i2c_adapter *i2c;
struct stv090x_config *feconf = (struct stv090x_config *)
chan->dev->card_info->fe_config[chan->number];
struct stv6110x_config *tunerconf = (struct stv6110x_config *)
chan->dev->card_info->tuner_config[chan->number];
struct stv6110x_devctl *ctl;
/* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
if (chan->number < 2)
i2c = &chan->dev->channel[0].i2c_adapter;
else
i2c = &chan->dev->channel[1].i2c_adapter;
ctl = dvb_attach(stv6110x_attach, chan->fe, tunerconf, i2c);
if (ctl == NULL) {
printk(KERN_ERR DEVICE_NAME ": No STV6110X found!\n");
return -ENODEV;
}
feconf->tuner_init = ctl->tuner_init;
feconf->tuner_sleep = ctl->tuner_sleep;
feconf->tuner_set_mode = ctl->tuner_set_mode;
feconf->tuner_set_frequency = ctl->tuner_set_frequency;
feconf->tuner_get_frequency = ctl->tuner_get_frequency;
feconf->tuner_set_bandwidth = ctl->tuner_set_bandwidth;
feconf->tuner_get_bandwidth = ctl->tuner_get_bandwidth;
feconf->tuner_set_bbgain = ctl->tuner_set_bbgain;
feconf->tuner_get_bbgain = ctl->tuner_get_bbgain;
feconf->tuner_set_refclk = ctl->tuner_set_refclk;
feconf->tuner_get_status = ctl->tuner_get_status;
return 0;
}
static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct ngene_channel *chan = fe->sec_priv;
int status;
if (enable) {
down(&chan->dev->pll_mutex);
status = chan->gate_ctrl(fe, 1);
} else {
status = chan->gate_ctrl(fe, 0);
up(&chan->dev->pll_mutex);
}
return status;
}
static int tuner_attach_tda18271(struct ngene_channel *chan)
{
struct i2c_adapter *i2c;
struct dvb_frontend *fe;
i2c = &chan->dev->channel[0].i2c_adapter;
if (chan->fe->ops.i2c_gate_ctrl)
chan->fe->ops.i2c_gate_ctrl(chan->fe, 1);
fe = dvb_attach(tda18271c2dd_attach, chan->fe, i2c, 0x60);
if (chan->fe->ops.i2c_gate_ctrl)
chan->fe->ops.i2c_gate_ctrl(chan->fe, 0);
if (!fe) {
printk(KERN_ERR "No TDA18271 found!\n");
return -ENODEV;
}
return 0;
}
static int tuner_attach_probe(struct ngene_channel *chan)
{
if (chan->demod_type == 0)
return tuner_attach_stv6110(chan);
if (chan->demod_type == 1)
return tuner_attach_tda18271(chan);
return -EINVAL;
}
static int demod_attach_stv0900(struct ngene_channel *chan)
{
struct i2c_adapter *i2c;
struct stv090x_config *feconf = (struct stv090x_config *)
chan->dev->card_info->fe_config[chan->number];
/* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
/* Note: Both adapters share the same i2c bus, but the demod */
/* driver requires that each demod has its own i2c adapter */
if (chan->number < 2)
i2c = &chan->dev->channel[0].i2c_adapter;
else
i2c = &chan->dev->channel[1].i2c_adapter;
chan->fe = dvb_attach(stv090x_attach, feconf, i2c,
(chan->number & 1) == 0 ? STV090x_DEMODULATOR_0
: STV090x_DEMODULATOR_1);
if (chan->fe == NULL) {
printk(KERN_ERR DEVICE_NAME ": No STV0900 found!\n");
return -ENODEV;
}
/* store channel info */
if (feconf->tuner_i2c_lock)
chan->fe->analog_demod_priv = chan;
if (!dvb_attach(lnbh24_attach, chan->fe, i2c, 0,
0, chan->dev->card_info->lnb[chan->number])) {
printk(KERN_ERR DEVICE_NAME ": No LNBH24 found!\n");
dvb_frontend_detach(chan->fe);
chan->fe = NULL;
return -ENODEV;
}
return 0;
}
static void cineS2_tuner_i2c_lock(struct dvb_frontend *fe, int lock)
{
struct ngene_channel *chan = fe->analog_demod_priv;
if (lock)
down(&chan->dev->pll_mutex);
else
up(&chan->dev->pll_mutex);
}
static int i2c_read(struct i2c_adapter *adapter, u8 adr, u8 *val)
{
struct i2c_msg msgs[1] = {{.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = 1 } };
return (i2c_transfer(adapter, msgs, 1) == 1) ? 0 : -1;
}
static int i2c_read_reg16(struct i2c_adapter *adapter, u8 adr,
u16 reg, u8 *val)
{
u8 msg[2] = {reg>>8, reg&0xff};
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = msg, .len = 2},
{.addr = adr, .flags = I2C_M_RD,
.buf = val, .len = 1} };
return (i2c_transfer(adapter, msgs, 2) == 2) ? 0 : -1;
}
static int port_has_stv0900(struct i2c_adapter *i2c, int port)
{
u8 val;
if (i2c_read_reg16(i2c, 0x68+port/2, 0xf100, &val) < 0)
return 0;
return 1;
}
static int port_has_drxk(struct i2c_adapter *i2c, int port)
{
u8 val;
if (i2c_read(i2c, 0x29+port, &val) < 0)
return 0;
return 1;
}
static int demod_attach_drxk(struct ngene_channel *chan,
struct i2c_adapter *i2c)
{
struct drxk_config config;
memset(&config, 0, sizeof(config));
config.microcode_name = "drxk_a3.mc";
config.qam_demod_parameter_count = 4;
config.adr = 0x29 + (chan->number ^ 2);
chan->fe = dvb_attach(drxk_attach, &config, i2c);
if (!chan->fe) {
printk(KERN_ERR "No DRXK found!\n");
return -ENODEV;
}
chan->fe->sec_priv = chan;
chan->gate_ctrl = chan->fe->ops.i2c_gate_ctrl;
chan->fe->ops.i2c_gate_ctrl = drxk_gate_ctrl;
return 0;
}
static int cineS2_probe(struct ngene_channel *chan)
{
struct i2c_adapter *i2c;
struct stv090x_config *fe_conf;
u8 buf[3];
struct i2c_msg i2c_msg = { .flags = 0, .buf = buf };
int rc;
/* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
if (chan->number < 2)
i2c = &chan->dev->channel[0].i2c_adapter;
else
i2c = &chan->dev->channel[1].i2c_adapter;
if (port_has_stv0900(i2c, chan->number)) {
chan->demod_type = 0;
fe_conf = chan->dev->card_info->fe_config[chan->number];
/* demod found, attach it */
rc = demod_attach_stv0900(chan);
if (rc < 0 || chan->number < 2)
return rc;
/* demod #2: reprogram outputs DPN1 & DPN2 */
i2c_msg.addr = fe_conf->address;
i2c_msg.len = 3;
buf[0] = 0xf1;
switch (chan->number) {
case 2:
buf[1] = 0x5c;
buf[2] = 0xc2;
break;
case 3:
buf[1] = 0x61;
buf[2] = 0xcc;
break;
default:
return -ENODEV;
}
rc = i2c_transfer(i2c, &i2c_msg, 1);
if (rc != 1) {
printk(KERN_ERR DEVICE_NAME ": could not setup DPNx\n");
return -EIO;
}
} else if (port_has_drxk(i2c, chan->number^2)) {
chan->demod_type = 1;
demod_attach_drxk(chan, i2c);
} else {
printk(KERN_ERR "No demod found on chan %d\n", chan->number);
return -ENODEV;
}
return 0;
}
static struct lgdt330x_config aver_m780 = {
.demod_address = 0xb2 >> 1,
.demod_chip = LGDT3303,
.serial_mpeg = 0x00, /* PARALLEL */
.clock_polarity_flip = 1,
};
static struct mt2131_config m780_tunerconfig = {
0xc0 >> 1
};
/* A single func to attach the demo and tuner, rather than
* use two sep funcs like the current design mandates.
*/
static int demod_attach_lg330x(struct ngene_channel *chan)
{
chan->fe = dvb_attach(lgdt330x_attach, &aver_m780, &chan->i2c_adapter);
if (chan->fe == NULL) {
printk(KERN_ERR DEVICE_NAME ": No LGDT330x found!\n");
return -ENODEV;
}
dvb_attach(mt2131_attach, chan->fe, &chan->i2c_adapter,
&m780_tunerconfig, 0);
return (chan->fe) ? 0 : -ENODEV;
}
static int demod_attach_drxd(struct ngene_channel *chan)
{
struct drxd_config *feconf;
feconf = chan->dev->card_info->fe_config[chan->number];
chan->fe = dvb_attach(drxd_attach, feconf, chan,
&chan->i2c_adapter, &chan->dev->pci_dev->dev);
if (!chan->fe) {
pr_err("No DRXD found!\n");
return -ENODEV;
}
return 0;
}
static int tuner_attach_dtt7520x(struct ngene_channel *chan)
{
struct drxd_config *feconf;
feconf = chan->dev->card_info->fe_config[chan->number];
if (!dvb_attach(dvb_pll_attach, chan->fe, feconf->pll_address,
&chan->i2c_adapter,
feconf->pll_type)) {
pr_err("No pll(%d) found!\n", feconf->pll_type);
return -ENODEV;
}
return 0;
}
/****************************************************************************/
/* EEPROM TAGS **************************************************************/
/****************************************************************************/
#define MICNG_EE_START 0x0100
#define MICNG_EE_END 0x0FF0
#define MICNG_EETAG_END0 0x0000
#define MICNG_EETAG_END1 0xFFFF
/* 0x0001 - 0x000F reserved for housekeeping */
/* 0xFFFF - 0xFFFE reserved for housekeeping */
/* Micronas assigned tags
EEProm tags for hardware support */
#define MICNG_EETAG_DRXD1_OSCDEVIATION 0x1000 /* 2 Bytes data */
#define MICNG_EETAG_DRXD2_OSCDEVIATION 0x1001 /* 2 Bytes data */
#define MICNG_EETAG_MT2060_1_1STIF 0x1100 /* 2 Bytes data */
#define MICNG_EETAG_MT2060_2_1STIF 0x1101 /* 2 Bytes data */
/* Tag range for OEMs */
#define MICNG_EETAG_OEM_FIRST 0xC000
#define MICNG_EETAG_OEM_LAST 0xFFEF
static int i2c_write_eeprom(struct i2c_adapter *adapter,
u8 adr, u16 reg, u8 data)
{
u8 m[3] = {(reg >> 8), (reg & 0xff), data};
struct i2c_msg msg = {.addr = adr, .flags = 0, .buf = m,
.len = sizeof(m)};
if (i2c_transfer(adapter, &msg, 1) != 1) {
pr_err(DEVICE_NAME ": Error writing EEPROM!\n");
return -EIO;
}
return 0;
}
static int i2c_read_eeprom(struct i2c_adapter *adapter,
u8 adr, u16 reg, u8 *data, int len)
{
u8 msg[2] = {(reg >> 8), (reg & 0xff)};
struct i2c_msg msgs[2] = {{.addr = adr, .flags = 0,
.buf = msg, .len = 2 },
{.addr = adr, .flags = I2C_M_RD,
.buf = data, .len = len} };
if (i2c_transfer(adapter, msgs, 2) != 2) {
pr_err(DEVICE_NAME ": Error reading EEPROM\n");
return -EIO;
}
return 0;
}
static int ReadEEProm(struct i2c_adapter *adapter,
u16 Tag, u32 MaxLen, u8 *data, u32 *pLength)
{
int status = 0;
u16 Addr = MICNG_EE_START, Length, tag = 0;
u8 EETag[3];
while (Addr + sizeof(u16) + 1 < MICNG_EE_END) {
if (i2c_read_eeprom(adapter, 0x50, Addr, EETag, sizeof(EETag)))
return -1;
tag = (EETag[0] << 8) | EETag[1];
if (tag == MICNG_EETAG_END0 || tag == MICNG_EETAG_END1)
return -1;
if (tag == Tag)
break;
Addr += sizeof(u16) + 1 + EETag[2];
}
if (Addr + sizeof(u16) + 1 + EETag[2] > MICNG_EE_END) {
pr_err(DEVICE_NAME
": Reached EOEE @ Tag = %04x Length = %3d\n",
tag, EETag[2]);
return -1;
}
Length = EETag[2];
if (Length > MaxLen)
Length = (u16) MaxLen;
if (Length > 0) {
Addr += sizeof(u16) + 1;
status = i2c_read_eeprom(adapter, 0x50, Addr, data, Length);
if (!status) {
*pLength = EETag[2];
#if 0
if (Length < EETag[2])
status = STATUS_BUFFER_OVERFLOW;
#endif
}
}
return status;
}
static int WriteEEProm(struct i2c_adapter *adapter,
u16 Tag, u32 Length, u8 *data)
{
int status = 0;
u16 Addr = MICNG_EE_START;
u8 EETag[3];
u16 tag = 0;
int retry, i;
while (Addr + sizeof(u16) + 1 < MICNG_EE_END) {
if (i2c_read_eeprom(adapter, 0x50, Addr, EETag, sizeof(EETag)))
return -1;
tag = (EETag[0] << 8) | EETag[1];
if (tag == MICNG_EETAG_END0 || tag == MICNG_EETAG_END1)
return -1;
if (tag == Tag)
break;
Addr += sizeof(u16) + 1 + EETag[2];
}
if (Addr + sizeof(u16) + 1 + EETag[2] > MICNG_EE_END) {
pr_err(DEVICE_NAME
": Reached EOEE @ Tag = %04x Length = %3d\n",
tag, EETag[2]);
return -1;
}
if (Length > EETag[2])
return -EINVAL;
/* Note: We write the data one byte at a time to avoid
issues with page sizes. (which are different for
each manufacture and eeprom size)
*/
Addr += sizeof(u16) + 1;
for (i = 0; i < Length; i++, Addr++) {
status = i2c_write_eeprom(adapter, 0x50, Addr, data[i]);
if (status)
break;
/* Poll for finishing write cycle */
retry = 10;
while (retry) {
u8 Tmp;
msleep(50);
status = i2c_read_eeprom(adapter, 0x50, Addr, &Tmp, 1);
if (status)
break;
if (Tmp != data[i])
pr_err(DEVICE_NAME
"eeprom write error\n");
retry -= 1;
}
if (status) {
pr_err(DEVICE_NAME
": Timeout polling eeprom\n");
break;
}
}
return status;
}
static int eeprom_read_ushort(struct i2c_adapter *adapter, u16 tag, u16 *data)
{
int stat;
u8 buf[2];
u32 len = 0;
stat = ReadEEProm(adapter, tag, 2, buf, &len);
if (stat)
return stat;
if (len != 2)
return -EINVAL;
*data = (buf[0] << 8) | buf[1];
return 0;
}
static int eeprom_write_ushort(struct i2c_adapter *adapter, u16 tag, u16 data)
{
int stat;
u8 buf[2];
buf[0] = data >> 8;
buf[1] = data & 0xff;
stat = WriteEEProm(adapter, tag, 2, buf);
if (stat)
return stat;
return 0;
}
static s16 osc_deviation(void *priv, s16 deviation, int flag)
{
struct ngene_channel *chan = priv;
struct i2c_adapter *adap = &chan->i2c_adapter;
u16 data = 0;
if (flag) {
data = (u16) deviation;
pr_info(DEVICE_NAME ": write deviation %d\n",
deviation);
eeprom_write_ushort(adap, 0x1000 + chan->number, data);
} else {
if (eeprom_read_ushort(adap, 0x1000 + chan->number, &data))
data = 0;
pr_info(DEVICE_NAME ": read deviation %d\n",
(s16) data);
}
return (s16) data;
}
/****************************************************************************/
/* Switch control (I2C gates, etc.) *****************************************/
/****************************************************************************/
static struct stv090x_config fe_cineS2 = {
.device = STV0900,
.demod_mode = STV090x_DUAL,
.clk_mode = STV090x_CLK_EXT,
.xtal = 27000000,
.address = 0x68,
.ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.repeater_level = STV090x_RPTLEVEL_16,
.adc1_range = STV090x_ADC_1Vpp,
.adc2_range = STV090x_ADC_1Vpp,
.diseqc_envelope_mode = true,
.tuner_i2c_lock = cineS2_tuner_i2c_lock,
};
static struct stv090x_config fe_cineS2_2 = {
.device = STV0900,
.demod_mode = STV090x_DUAL,
.clk_mode = STV090x_CLK_EXT,
.xtal = 27000000,
.address = 0x69,
.ts1_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.ts2_mode = STV090x_TSMODE_SERIAL_PUNCTURED,
.repeater_level = STV090x_RPTLEVEL_16,
.adc1_range = STV090x_ADC_1Vpp,
.adc2_range = STV090x_ADC_1Vpp,
.diseqc_envelope_mode = true,
.tuner_i2c_lock = cineS2_tuner_i2c_lock,
};
static struct stv6110x_config tuner_cineS2_0 = {
.addr = 0x60,
.refclk = 27000000,
.clk_div = 1,
};
static struct stv6110x_config tuner_cineS2_1 = {
.addr = 0x63,
.refclk = 27000000,
.clk_div = 1,
};
static struct ngene_info ngene_info_cineS2 = {
.type = NGENE_SIDEWINDER,
.name = "Linux4Media cineS2 DVB-S2 Twin Tuner",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
.tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
.fe_config = {&fe_cineS2, &fe_cineS2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0b, 0x08},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static struct ngene_info ngene_info_satixS2 = {
.type = NGENE_SIDEWINDER,
.name = "Mystique SaTiX-S2 Dual",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900},
.tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110},
.fe_config = {&fe_cineS2, &fe_cineS2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0b, 0x08},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static struct ngene_info ngene_info_satixS2v2 = {
.type = NGENE_SIDEWINDER,
.name = "Mystique SaTiX-S2 Dual (v2)",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
.tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static struct ngene_info ngene_info_cineS2v5 = {
.type = NGENE_SIDEWINDER,
.name = "Linux4Media cineS2 DVB-S2 Twin Tuner (v5)",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {demod_attach_stv0900, demod_attach_stv0900, cineS2_probe, cineS2_probe},
.tuner_attach = {tuner_attach_stv6110, tuner_attach_stv6110, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static struct ngene_info ngene_info_duoFlex = {
.type = NGENE_SIDEWINDER,
.name = "Digital Devices DuoFlex PCIe or miniPCIe",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN, NGENE_IO_TSIN,
NGENE_IO_TSOUT},
.demod_attach = {cineS2_probe, cineS2_probe, cineS2_probe, cineS2_probe},
.tuner_attach = {tuner_attach_probe, tuner_attach_probe, tuner_attach_probe, tuner_attach_probe},
.fe_config = {&fe_cineS2, &fe_cineS2, &fe_cineS2_2, &fe_cineS2_2},
.tuner_config = {&tuner_cineS2_0, &tuner_cineS2_1, &tuner_cineS2_0, &tuner_cineS2_1},
.lnb = {0x0a, 0x08, 0x0b, 0x09},
.tsf = {3, 3},
.fw_version = 18,
.msi_supported = true,
};
static struct ngene_info ngene_info_m780 = {
.type = NGENE_APP,
.name = "Aver M780 ATSC/QAM-B",
/* Channel 0 is analog, which is currently unsupported */
.io_type = { NGENE_IO_NONE, NGENE_IO_TSIN },
.demod_attach = { NULL, demod_attach_lg330x },
/* Ensure these are NULL else the frame will call them (as funcs) */
.tuner_attach = { 0, 0, 0, 0 },
.fe_config = { NULL, &aver_m780 },
.avf = { 0 },
/* A custom electrical interface config for the demod to bridge */
.tsf = { 4, 4 },
.fw_version = 15,
};
static struct drxd_config fe_terratec_dvbt_0 = {
.index = 0,
.demod_address = 0x70,
.demod_revision = 0xa2,
.demoda_address = 0x00,
.pll_address = 0x60,
.pll_type = DVB_PLL_THOMSON_DTT7520X,
.clock = 20000,
.osc_deviation = osc_deviation,
};
static struct drxd_config fe_terratec_dvbt_1 = {
.index = 1,
.demod_address = 0x71,
.demod_revision = 0xa2,
.demoda_address = 0x00,
.pll_address = 0x60,
.pll_type = DVB_PLL_THOMSON_DTT7520X,
.clock = 20000,
.osc_deviation = osc_deviation,
};
static struct ngene_info ngene_info_terratec = {
.type = NGENE_TERRATEC,
.name = "Terratec Integra/Cinergy2400i Dual DVB-T",
.io_type = {NGENE_IO_TSIN, NGENE_IO_TSIN},
.demod_attach = {demod_attach_drxd, demod_attach_drxd},
.tuner_attach = {tuner_attach_dtt7520x, tuner_attach_dtt7520x},
.fe_config = {&fe_terratec_dvbt_0, &fe_terratec_dvbt_1},
.i2c_access = 1,
};
/****************************************************************************/
/****************************************************************************/
/* PCI Subsystem ID *********************************************************/
/****************************************************************************/
#define NGENE_ID(_subvend, _subdev, _driverdata) { \
.vendor = NGENE_VID, .device = NGENE_PID, \
.subvendor = _subvend, .subdevice = _subdev, \
.driver_data = (unsigned long) &_driverdata }
/****************************************************************************/
static const struct pci_device_id ngene_id_tbl[] = {
NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
NGENE_ID(0x18c3, 0xdb02, ngene_info_satixS2v2),
NGENE_ID(0x18c3, 0xdd00, ngene_info_cineS2v5),
NGENE_ID(0x18c3, 0xdd10, ngene_info_duoFlex),
NGENE_ID(0x18c3, 0xdd20, ngene_info_duoFlex),
NGENE_ID(0x1461, 0x062e, ngene_info_m780),
NGENE_ID(0x153b, 0x1167, ngene_info_terratec),
{0}
};
MODULE_DEVICE_TABLE(pci, ngene_id_tbl);
/****************************************************************************/
/* Init/Exit ****************************************************************/
/****************************************************************************/
static pci_ers_result_t ngene_error_detected(struct pci_dev *dev,
enum pci_channel_state state)
{
printk(KERN_ERR DEVICE_NAME ": PCI error\n");
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
if (state == pci_channel_io_frozen)
return PCI_ERS_RESULT_NEED_RESET;
return PCI_ERS_RESULT_CAN_RECOVER;
}
static pci_ers_result_t ngene_link_reset(struct pci_dev *dev)
{
printk(KERN_INFO DEVICE_NAME ": link reset\n");
return 0;
}
static pci_ers_result_t ngene_slot_reset(struct pci_dev *dev)
{
printk(KERN_INFO DEVICE_NAME ": slot reset\n");
return 0;
}
static void ngene_resume(struct pci_dev *dev)
{
printk(KERN_INFO DEVICE_NAME ": resume\n");
}
static const struct pci_error_handlers ngene_errors = {
.error_detected = ngene_error_detected,
.link_reset = ngene_link_reset,
.slot_reset = ngene_slot_reset,
.resume = ngene_resume,
};
static struct pci_driver ngene_pci_driver = {
.name = "ngene",
.id_table = ngene_id_tbl,
.probe = ngene_probe,
.remove = ngene_remove,
.err_handler = &ngene_errors,
.shutdown = ngene_shutdown,
};
static __init int module_init_ngene(void)
{
printk(KERN_INFO
"nGene PCIE bridge driver, Copyright (C) 2005-2007 Micronas\n");
return pci_register_driver(&ngene_pci_driver);
}
static __exit void module_exit_ngene(void)
{
pci_unregister_driver(&ngene_pci_driver);
}
module_init(module_init_ngene);
module_exit(module_exit_ngene);
MODULE_DESCRIPTION("nGene");
MODULE_AUTHOR("Micronas, Ralph Metzler, Manfred Voelkel");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Droid-Concepts/kernel_samsung_jf | drivers/input/keyboard/omap-keypad.c | 4888 | 12312 | /*
* linux/drivers/input/keyboard/omap-keypad.c
*
* OMAP Keypad Driver
*
* Copyright (C) 2003 Nokia Corporation
* Written by Timo Teräs <ext-timo.teras@nokia.com>
*
* Added support for H2 & H3 Keypad
* Copyright (C) 2004 Texas Instruments
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <asm/gpio.h>
#include <plat/keypad.h>
#include <plat/menelaus.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <asm/io.h>
#include <plat/mux.h>
#undef NEW_BOARD_LEARNING_MODE
static void omap_kp_tasklet(unsigned long);
static void omap_kp_timer(unsigned long);
static unsigned char keypad_state[8];
static DEFINE_MUTEX(kp_enable_mutex);
static int kp_enable = 1;
static int kp_cur_group = -1;
struct omap_kp {
struct input_dev *input;
struct timer_list timer;
int irq;
unsigned int rows;
unsigned int cols;
unsigned long delay;
unsigned int debounce;
};
static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0);
static unsigned int *row_gpios;
static unsigned int *col_gpios;
#ifdef CONFIG_ARCH_OMAP2
static void set_col_gpio_val(struct omap_kp *omap_kp, u8 value)
{
int col;
for (col = 0; col < omap_kp->cols; col++)
gpio_set_value(col_gpios[col], value & (1 << col));
}
static u8 get_row_gpio_val(struct omap_kp *omap_kp)
{
int row;
u8 value = 0;
for (row = 0; row < omap_kp->rows; row++) {
if (gpio_get_value(row_gpios[row]))
value |= (1 << row);
}
return value;
}
#else
#define set_col_gpio_val(x, y) do {} while (0)
#define get_row_gpio_val(x) 0
#endif
static irqreturn_t omap_kp_interrupt(int irq, void *dev_id)
{
struct omap_kp *omap_kp = dev_id;
/* disable keyboard interrupt and schedule for handling */
if (cpu_is_omap24xx()) {
int i;
for (i = 0; i < omap_kp->rows; i++) {
int gpio_irq = gpio_to_irq(row_gpios[i]);
/*
* The interrupt which we're currently handling should
* be disabled _nosync() to avoid deadlocks waiting
* for this handler to complete. All others should
* be disabled the regular way for SMP safety.
*/
if (gpio_irq == irq)
disable_irq_nosync(gpio_irq);
else
disable_irq(gpio_irq);
}
} else
/* disable keyboard interrupt and schedule for handling */
omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
tasklet_schedule(&kp_tasklet);
return IRQ_HANDLED;
}
static void omap_kp_timer(unsigned long data)
{
tasklet_schedule(&kp_tasklet);
}
static void omap_kp_scan_keypad(struct omap_kp *omap_kp, unsigned char *state)
{
int col = 0;
/* read the keypad status */
if (cpu_is_omap24xx()) {
/* read the keypad status */
for (col = 0; col < omap_kp->cols; col++) {
set_col_gpio_val(omap_kp, ~(1 << col));
state[col] = ~(get_row_gpio_val(omap_kp)) & 0xff;
}
set_col_gpio_val(omap_kp, 0);
} else {
/* disable keyboard interrupt and schedule for handling */
omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
/* read the keypad status */
omap_writew(0xff, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC);
for (col = 0; col < omap_kp->cols; col++) {
omap_writew(~(1 << col) & 0xff,
OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC);
udelay(omap_kp->delay);
state[col] = ~omap_readw(OMAP1_MPUIO_BASE +
OMAP_MPUIO_KBR_LATCH) & 0xff;
}
omap_writew(0x00, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC);
udelay(2);
}
}
static void omap_kp_tasklet(unsigned long data)
{
struct omap_kp *omap_kp_data = (struct omap_kp *) data;
unsigned short *keycodes = omap_kp_data->input->keycode;
unsigned int row_shift = get_count_order(omap_kp_data->cols);
unsigned char new_state[8], changed, key_down = 0;
int col, row;
int spurious = 0;
/* check for any changes */
omap_kp_scan_keypad(omap_kp_data, new_state);
/* check for changes and print those */
for (col = 0; col < omap_kp_data->cols; col++) {
changed = new_state[col] ^ keypad_state[col];
key_down |= new_state[col];
if (changed == 0)
continue;
for (row = 0; row < omap_kp_data->rows; row++) {
int key;
if (!(changed & (1 << row)))
continue;
#ifdef NEW_BOARD_LEARNING_MODE
printk(KERN_INFO "omap-keypad: key %d-%d %s\n", col,
row, (new_state[col] & (1 << row)) ?
"pressed" : "released");
#else
key = keycodes[MATRIX_SCAN_CODE(row, col, row_shift)];
if (key < 0) {
printk(KERN_WARNING
"omap-keypad: Spurious key event %d-%d\n",
col, row);
/* We scan again after a couple of seconds */
spurious = 1;
continue;
}
if (!(kp_cur_group == (key & GROUP_MASK) ||
kp_cur_group == -1))
continue;
kp_cur_group = key & GROUP_MASK;
input_report_key(omap_kp_data->input, key & ~GROUP_MASK,
new_state[col] & (1 << row));
#endif
}
}
input_sync(omap_kp_data->input);
memcpy(keypad_state, new_state, sizeof(keypad_state));
if (key_down) {
int delay = HZ / 20;
/* some key is pressed - keep irq disabled and use timer
* to poll the keypad */
if (spurious)
delay = 2 * HZ;
mod_timer(&omap_kp_data->timer, jiffies + delay);
} else {
/* enable interrupts */
if (cpu_is_omap24xx()) {
int i;
for (i = 0; i < omap_kp_data->rows; i++)
enable_irq(gpio_to_irq(row_gpios[i]));
} else {
omap_writew(0, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
kp_cur_group = -1;
}
}
}
static ssize_t omap_kp_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", kp_enable);
}
static ssize_t omap_kp_enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int state;
if (sscanf(buf, "%u", &state) != 1)
return -EINVAL;
if ((state != 1) && (state != 0))
return -EINVAL;
mutex_lock(&kp_enable_mutex);
if (state != kp_enable) {
if (state)
enable_irq(INT_KEYBOARD);
else
disable_irq(INT_KEYBOARD);
kp_enable = state;
}
mutex_unlock(&kp_enable_mutex);
return strnlen(buf, count);
}
static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, omap_kp_enable_show, omap_kp_enable_store);
#ifdef CONFIG_PM
static int omap_kp_suspend(struct platform_device *dev, pm_message_t state)
{
/* Nothing yet */
return 0;
}
static int omap_kp_resume(struct platform_device *dev)
{
/* Nothing yet */
return 0;
}
#else
#define omap_kp_suspend NULL
#define omap_kp_resume NULL
#endif
static int __devinit omap_kp_probe(struct platform_device *pdev)
{
struct omap_kp *omap_kp;
struct input_dev *input_dev;
struct omap_kp_platform_data *pdata = pdev->dev.platform_data;
int i, col_idx, row_idx, irq_idx, ret;
unsigned int row_shift, keycodemax;
if (!pdata->rows || !pdata->cols || !pdata->keymap_data) {
printk(KERN_ERR "No rows, cols or keymap_data from pdata\n");
return -EINVAL;
}
row_shift = get_count_order(pdata->cols);
keycodemax = pdata->rows << row_shift;
omap_kp = kzalloc(sizeof(struct omap_kp) +
keycodemax * sizeof(unsigned short), GFP_KERNEL);
input_dev = input_allocate_device();
if (!omap_kp || !input_dev) {
kfree(omap_kp);
input_free_device(input_dev);
return -ENOMEM;
}
platform_set_drvdata(pdev, omap_kp);
omap_kp->input = input_dev;
/* Disable the interrupt for the MPUIO keyboard */
if (!cpu_is_omap24xx())
omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
input_dev->keycode = &omap_kp[1];
input_dev->keycodesize = sizeof(unsigned short);
input_dev->keycodemax = keycodemax;
if (pdata->rep)
__set_bit(EV_REP, input_dev->evbit);
if (pdata->delay)
omap_kp->delay = pdata->delay;
if (pdata->row_gpios && pdata->col_gpios) {
row_gpios = pdata->row_gpios;
col_gpios = pdata->col_gpios;
}
omap_kp->rows = pdata->rows;
omap_kp->cols = pdata->cols;
if (cpu_is_omap24xx()) {
/* Cols: outputs */
for (col_idx = 0; col_idx < omap_kp->cols; col_idx++) {
if (gpio_request(col_gpios[col_idx], "omap_kp_col") < 0) {
printk(KERN_ERR "Failed to request"
"GPIO%d for keypad\n",
col_gpios[col_idx]);
goto err1;
}
gpio_direction_output(col_gpios[col_idx], 0);
}
/* Rows: inputs */
for (row_idx = 0; row_idx < omap_kp->rows; row_idx++) {
if (gpio_request(row_gpios[row_idx], "omap_kp_row") < 0) {
printk(KERN_ERR "Failed to request"
"GPIO%d for keypad\n",
row_gpios[row_idx]);
goto err2;
}
gpio_direction_input(row_gpios[row_idx]);
}
} else {
col_idx = 0;
row_idx = 0;
}
setup_timer(&omap_kp->timer, omap_kp_timer, (unsigned long)omap_kp);
/* get the irq and init timer*/
tasklet_enable(&kp_tasklet);
kp_tasklet.data = (unsigned long) omap_kp;
ret = device_create_file(&pdev->dev, &dev_attr_enable);
if (ret < 0)
goto err2;
/* setup input device */
__set_bit(EV_KEY, input_dev->evbit);
matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
input_dev->keycode, input_dev->keybit);
input_dev->name = "omap-keypad";
input_dev->phys = "omap-keypad/input0";
input_dev->dev.parent = &pdev->dev;
input_dev->id.bustype = BUS_HOST;
input_dev->id.vendor = 0x0001;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
ret = input_register_device(omap_kp->input);
if (ret < 0) {
printk(KERN_ERR "Unable to register omap-keypad input device\n");
goto err3;
}
if (pdata->dbounce)
omap_writew(0xff, OMAP1_MPUIO_BASE + OMAP_MPUIO_GPIO_DEBOUNCING);
/* scan current status and enable interrupt */
omap_kp_scan_keypad(omap_kp, keypad_state);
if (!cpu_is_omap24xx()) {
omap_kp->irq = platform_get_irq(pdev, 0);
if (omap_kp->irq >= 0) {
if (request_irq(omap_kp->irq, omap_kp_interrupt, 0,
"omap-keypad", omap_kp) < 0)
goto err4;
}
omap_writew(0, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
} else {
for (irq_idx = 0; irq_idx < omap_kp->rows; irq_idx++) {
if (request_irq(gpio_to_irq(row_gpios[irq_idx]),
omap_kp_interrupt,
IRQF_TRIGGER_FALLING,
"omap-keypad", omap_kp) < 0)
goto err5;
}
}
return 0;
err5:
for (i = irq_idx - 1; i >=0; i--)
free_irq(row_gpios[i], omap_kp);
err4:
input_unregister_device(omap_kp->input);
input_dev = NULL;
err3:
device_remove_file(&pdev->dev, &dev_attr_enable);
err2:
for (i = row_idx - 1; i >=0; i--)
gpio_free(row_gpios[i]);
err1:
for (i = col_idx - 1; i >=0; i--)
gpio_free(col_gpios[i]);
kfree(omap_kp);
input_free_device(input_dev);
return -EINVAL;
}
static int __devexit omap_kp_remove(struct platform_device *pdev)
{
struct omap_kp *omap_kp = platform_get_drvdata(pdev);
/* disable keypad interrupt handling */
tasklet_disable(&kp_tasklet);
if (cpu_is_omap24xx()) {
int i;
for (i = 0; i < omap_kp->cols; i++)
gpio_free(col_gpios[i]);
for (i = 0; i < omap_kp->rows; i++) {
gpio_free(row_gpios[i]);
free_irq(gpio_to_irq(row_gpios[i]), omap_kp);
}
} else {
omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT);
free_irq(omap_kp->irq, omap_kp);
}
del_timer_sync(&omap_kp->timer);
tasklet_kill(&kp_tasklet);
/* unregister everything */
input_unregister_device(omap_kp->input);
kfree(omap_kp);
return 0;
}
static struct platform_driver omap_kp_driver = {
.probe = omap_kp_probe,
.remove = __devexit_p(omap_kp_remove),
.suspend = omap_kp_suspend,
.resume = omap_kp_resume,
.driver = {
.name = "omap-keypad",
.owner = THIS_MODULE,
},
};
module_platform_driver(omap_kp_driver);
MODULE_AUTHOR("Timo Teräs");
MODULE_DESCRIPTION("OMAP Keypad Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:omap-keypad");
| gpl-2.0 |
muftiarfan/DWI_xm | fs/ext2/xattr.c | 4888 | 28615 | /*
* linux/fs/ext2/xattr.c
*
* Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
*
* Fix by Harrison Xing <harrison@mountainviewdata.com>.
* Extended attributes for symlinks and special files added per
* suggestion of Luka Renko <luka.renko@hermes.si>.
* xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
* Red Hat Inc.
*
*/
/*
* Extended attributes are stored on disk blocks allocated outside of
* any inode. The i_file_acl field is then made to point to this allocated
* block. If all extended attributes of an inode are identical, these
* inodes may share the same extended attribute block. Such situations
* are automatically detected by keeping a cache of recent attribute block
* numbers and hashes over the block's contents in memory.
*
*
* Extended attribute block layout:
*
* +------------------+
* | header |
* | entry 1 | |
* | entry 2 | | growing downwards
* | entry 3 | v
* | four null bytes |
* | . . . |
* | value 1 | ^
* | value 3 | | growing upwards
* | value 2 | |
* +------------------+
*
* The block header is followed by multiple entry descriptors. These entry
* descriptors are variable in size, and aligned to EXT2_XATTR_PAD
* byte boundaries. The entry descriptors are sorted by attribute name,
* so that two extended attribute blocks can be compared efficiently.
*
* Attribute values are aligned to the end of the block, stored in
* no specific order. They are also padded to EXT2_XATTR_PAD byte
* boundaries. No additional gaps are left between them.
*
* Locking strategy
* ----------------
* EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem.
* EA blocks are only changed if they are exclusive to an inode, so
* holding xattr_sem also means that nothing but the EA block's reference
* count will change. Multiple writers to an EA block are synchronized
* by the bh lock. No more than a single bh lock is held at any time
* to avoid deadlocks.
*/
#include <linux/buffer_head.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
#include <linux/security.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
#define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
#define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
#define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
#define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
#ifdef EXT2_XATTR_DEBUG
# define ea_idebug(inode, f...) do { \
printk(KERN_DEBUG "inode %s:%ld: ", \
inode->i_sb->s_id, inode->i_ino); \
printk(f); \
printk("\n"); \
} while (0)
# define ea_bdebug(bh, f...) do { \
char b[BDEVNAME_SIZE]; \
printk(KERN_DEBUG "block %s:%lu: ", \
bdevname(bh->b_bdev, b), \
(unsigned long) bh->b_blocknr); \
printk(f); \
printk("\n"); \
} while (0)
#else
# define ea_idebug(f...)
# define ea_bdebug(f...)
#endif
static int ext2_xattr_set2(struct inode *, struct buffer_head *,
struct ext2_xattr_header *);
static int ext2_xattr_cache_insert(struct buffer_head *);
static struct buffer_head *ext2_xattr_cache_find(struct inode *,
struct ext2_xattr_header *);
static void ext2_xattr_rehash(struct ext2_xattr_header *,
struct ext2_xattr_entry *);
static struct mb_cache *ext2_xattr_cache;
static const struct xattr_handler *ext2_xattr_handler_map[] = {
[EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler,
#ifdef CONFIG_EXT2_FS_POSIX_ACL
[EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext2_xattr_acl_access_handler,
[EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext2_xattr_acl_default_handler,
#endif
[EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler,
#ifdef CONFIG_EXT2_FS_SECURITY
[EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler,
#endif
};
const struct xattr_handler *ext2_xattr_handlers[] = {
&ext2_xattr_user_handler,
&ext2_xattr_trusted_handler,
#ifdef CONFIG_EXT2_FS_POSIX_ACL
&ext2_xattr_acl_access_handler,
&ext2_xattr_acl_default_handler,
#endif
#ifdef CONFIG_EXT2_FS_SECURITY
&ext2_xattr_security_handler,
#endif
NULL
};
static inline const struct xattr_handler *
ext2_xattr_handler(int name_index)
{
const struct xattr_handler *handler = NULL;
if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
handler = ext2_xattr_handler_map[name_index];
return handler;
}
/*
* ext2_xattr_get()
*
* Copy an extended attribute into the buffer
* provided, or compute the buffer size required.
* Buffer is NULL to compute the size of the buffer required.
*
* Returns a negative error number on failure, or the number of bytes
* used / required on success.
*/
int
ext2_xattr_get(struct inode *inode, int name_index, const char *name,
void *buffer, size_t buffer_size)
{
struct buffer_head *bh = NULL;
struct ext2_xattr_entry *entry;
size_t name_len, size;
char *end;
int error;
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
name_index, name, buffer, (long)buffer_size);
if (name == NULL)
return -EINVAL;
name_len = strlen(name);
if (name_len > 255)
return -ERANGE;
down_read(&EXT2_I(inode)->xattr_sem);
error = -ENODATA;
if (!EXT2_I(inode)->i_file_acl)
goto cleanup;
ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
error = -EIO;
if (!bh)
goto cleanup;
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
end = bh->b_data + bh->b_size;
if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
HDR(bh)->h_blocks != cpu_to_le32(1)) {
bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
"inode %ld: bad block %d", inode->i_ino,
EXT2_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
/* find named attribute */
entry = FIRST_ENTRY(bh);
while (!IS_LAST_ENTRY(entry)) {
struct ext2_xattr_entry *next =
EXT2_XATTR_NEXT(entry);
if ((char *)next >= end)
goto bad_block;
if (name_index == entry->e_name_index &&
name_len == entry->e_name_len &&
memcmp(name, entry->e_name, name_len) == 0)
goto found;
entry = next;
}
if (ext2_xattr_cache_insert(bh))
ea_idebug(inode, "cache insert failed");
error = -ENODATA;
goto cleanup;
found:
/* check the buffer size */
if (entry->e_value_block != 0)
goto bad_block;
size = le32_to_cpu(entry->e_value_size);
if (size > inode->i_sb->s_blocksize ||
le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
goto bad_block;
if (ext2_xattr_cache_insert(bh))
ea_idebug(inode, "cache insert failed");
if (buffer) {
error = -ERANGE;
if (size > buffer_size)
goto cleanup;
/* return value of attribute */
memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
size);
}
error = size;
cleanup:
brelse(bh);
up_read(&EXT2_I(inode)->xattr_sem);
return error;
}
/*
* ext2_xattr_list()
*
* Copy a list of attribute names into the buffer
* provided, or compute the buffer size required.
* Buffer is NULL to compute the size of the buffer required.
*
* Returns a negative error number on failure, or the number of bytes
* used / required on success.
*/
static int
ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = dentry->d_inode;
struct buffer_head *bh = NULL;
struct ext2_xattr_entry *entry;
char *end;
size_t rest = buffer_size;
int error;
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
buffer, (long)buffer_size);
down_read(&EXT2_I(inode)->xattr_sem);
error = 0;
if (!EXT2_I(inode)->i_file_acl)
goto cleanup;
ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
error = -EIO;
if (!bh)
goto cleanup;
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
end = bh->b_data + bh->b_size;
if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
HDR(bh)->h_blocks != cpu_to_le32(1)) {
bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
"inode %ld: bad block %d", inode->i_ino,
EXT2_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
/* check the on-disk data structure */
entry = FIRST_ENTRY(bh);
while (!IS_LAST_ENTRY(entry)) {
struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(entry);
if ((char *)next >= end)
goto bad_block;
entry = next;
}
if (ext2_xattr_cache_insert(bh))
ea_idebug(inode, "cache insert failed");
/* list the attribute names */
for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
entry = EXT2_XATTR_NEXT(entry)) {
const struct xattr_handler *handler =
ext2_xattr_handler(entry->e_name_index);
if (handler) {
size_t size = handler->list(dentry, buffer, rest,
entry->e_name,
entry->e_name_len,
handler->flags);
if (buffer) {
if (size > rest) {
error = -ERANGE;
goto cleanup;
}
buffer += size;
}
rest -= size;
}
}
error = buffer_size - rest; /* total size */
cleanup:
brelse(bh);
up_read(&EXT2_I(inode)->xattr_sem);
return error;
}
/*
* Inode operation listxattr()
*
* dentry->d_inode->i_mutex: don't care
*/
ssize_t
ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
return ext2_xattr_list(dentry, buffer, size);
}
/*
* If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
* not set, set it.
*/
static void ext2_xattr_update_super_block(struct super_block *sb)
{
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
return;
spin_lock(&EXT2_SB(sb)->s_lock);
EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
spin_unlock(&EXT2_SB(sb)->s_lock);
sb->s_dirt = 1;
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
/*
* ext2_xattr_set()
*
* Create, replace or remove an extended attribute for this inode. Value
* is NULL to remove an existing extended attribute, and non-NULL to
* either replace an existing extended attribute, or create a new extended
* attribute. The flags XATTR_REPLACE and XATTR_CREATE
* specify that an extended attribute must exist and must not exist
* previous to the call, respectively.
*
* Returns 0, or a negative error number on failure.
*/
int
ext2_xattr_set(struct inode *inode, int name_index, const char *name,
const void *value, size_t value_len, int flags)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *bh = NULL;
struct ext2_xattr_header *header = NULL;
struct ext2_xattr_entry *here, *last;
size_t name_len, free, min_offs = sb->s_blocksize;
int not_found = 1, error;
char *end;
/*
* header -- Points either into bh, or to a temporarily
* allocated buffer.
* here -- The named entry found, or the place for inserting, within
* the block pointed to by header.
* last -- Points right after the last named entry within the block
* pointed to by header.
* min_offs -- The offset of the first value (values are aligned
* towards the end of the block).
* end -- Points right after the block pointed to by header.
*/
ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
name_index, name, value, (long)value_len);
if (value == NULL)
value_len = 0;
if (name == NULL)
return -EINVAL;
name_len = strlen(name);
if (name_len > 255 || value_len > sb->s_blocksize)
return -ERANGE;
down_write(&EXT2_I(inode)->xattr_sem);
if (EXT2_I(inode)->i_file_acl) {
/* The inode already has an extended attribute block. */
bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
error = -EIO;
if (!bh)
goto cleanup;
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)),
le32_to_cpu(HDR(bh)->h_refcount));
header = HDR(bh);
end = bh->b_data + bh->b_size;
if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
header->h_blocks != cpu_to_le32(1)) {
bad_block: ext2_error(sb, "ext2_xattr_set",
"inode %ld: bad block %d", inode->i_ino,
EXT2_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
/* Find the named attribute. */
here = FIRST_ENTRY(bh);
while (!IS_LAST_ENTRY(here)) {
struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(here);
if ((char *)next >= end)
goto bad_block;
if (!here->e_value_block && here->e_value_size) {
size_t offs = le16_to_cpu(here->e_value_offs);
if (offs < min_offs)
min_offs = offs;
}
not_found = name_index - here->e_name_index;
if (!not_found)
not_found = name_len - here->e_name_len;
if (!not_found)
not_found = memcmp(name, here->e_name,name_len);
if (not_found <= 0)
break;
here = next;
}
last = here;
/* We still need to compute min_offs and last. */
while (!IS_LAST_ENTRY(last)) {
struct ext2_xattr_entry *next = EXT2_XATTR_NEXT(last);
if ((char *)next >= end)
goto bad_block;
if (!last->e_value_block && last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < min_offs)
min_offs = offs;
}
last = next;
}
/* Check whether we have enough space left. */
free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
} else {
/* We will use a new extended attribute block. */
free = sb->s_blocksize -
sizeof(struct ext2_xattr_header) - sizeof(__u32);
here = last = NULL; /* avoid gcc uninitialized warning. */
}
if (not_found) {
/* Request to remove a nonexistent attribute? */
error = -ENODATA;
if (flags & XATTR_REPLACE)
goto cleanup;
error = 0;
if (value == NULL)
goto cleanup;
} else {
/* Request to create an existing attribute? */
error = -EEXIST;
if (flags & XATTR_CREATE)
goto cleanup;
if (!here->e_value_block && here->e_value_size) {
size_t size = le32_to_cpu(here->e_value_size);
if (le16_to_cpu(here->e_value_offs) + size >
sb->s_blocksize || size > sb->s_blocksize)
goto bad_block;
free += EXT2_XATTR_SIZE(size);
}
free += EXT2_XATTR_LEN(name_len);
}
error = -ENOSPC;
if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len))
goto cleanup;
/* Here we know that we can set the new attribute. */
if (header) {
struct mb_cache_entry *ce;
/* assert(header == HDR(bh)); */
ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev,
bh->b_blocknr);
lock_buffer(bh);
if (header->h_refcount == cpu_to_le32(1)) {
ea_bdebug(bh, "modifying in-place");
if (ce)
mb_cache_entry_free(ce);
/* keep the buffer locked while modifying it. */
} else {
int offset;
if (ce)
mb_cache_entry_release(ce);
unlock_buffer(bh);
ea_bdebug(bh, "cloning");
header = kmalloc(bh->b_size, GFP_KERNEL);
error = -ENOMEM;
if (header == NULL)
goto cleanup;
memcpy(header, HDR(bh), bh->b_size);
header->h_refcount = cpu_to_le32(1);
offset = (char *)here - bh->b_data;
here = ENTRY((char *)header + offset);
offset = (char *)last - bh->b_data;
last = ENTRY((char *)header + offset);
}
} else {
/* Allocate a buffer where we construct the new block. */
header = kzalloc(sb->s_blocksize, GFP_KERNEL);
error = -ENOMEM;
if (header == NULL)
goto cleanup;
end = (char *)header + sb->s_blocksize;
header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
header->h_blocks = header->h_refcount = cpu_to_le32(1);
last = here = ENTRY(header+1);
}
/* Iff we are modifying the block in-place, bh is locked here. */
if (not_found) {
/* Insert the new name. */
size_t size = EXT2_XATTR_LEN(name_len);
size_t rest = (char *)last - (char *)here;
memmove((char *)here + size, here, rest);
memset(here, 0, size);
here->e_name_index = name_index;
here->e_name_len = name_len;
memcpy(here->e_name, name, name_len);
} else {
if (!here->e_value_block && here->e_value_size) {
char *first_val = (char *)header + min_offs;
size_t offs = le16_to_cpu(here->e_value_offs);
char *val = (char *)header + offs;
size_t size = EXT2_XATTR_SIZE(
le32_to_cpu(here->e_value_size));
if (size == EXT2_XATTR_SIZE(value_len)) {
/* The old and the new value have the same
size. Just replace. */
here->e_value_size = cpu_to_le32(value_len);
memset(val + size - EXT2_XATTR_PAD, 0,
EXT2_XATTR_PAD); /* Clear pad bytes. */
memcpy(val, value, value_len);
goto skip_replace;
}
/* Remove the old value. */
memmove(first_val + size, first_val, val - first_val);
memset(first_val, 0, size);
here->e_value_offs = 0;
min_offs += size;
/* Adjust all value offsets. */
last = ENTRY(header+1);
while (!IS_LAST_ENTRY(last)) {
size_t o = le16_to_cpu(last->e_value_offs);
if (!last->e_value_block && o < offs)
last->e_value_offs =
cpu_to_le16(o + size);
last = EXT2_XATTR_NEXT(last);
}
}
if (value == NULL) {
/* Remove the old name. */
size_t size = EXT2_XATTR_LEN(name_len);
last = ENTRY((char *)last - size);
memmove(here, (char*)here + size,
(char*)last - (char*)here);
memset(last, 0, size);
}
}
if (value != NULL) {
/* Insert the new value. */
here->e_value_size = cpu_to_le32(value_len);
if (value_len) {
size_t size = EXT2_XATTR_SIZE(value_len);
char *val = (char *)header + min_offs - size;
here->e_value_offs =
cpu_to_le16((char *)val - (char *)header);
memset(val + size - EXT2_XATTR_PAD, 0,
EXT2_XATTR_PAD); /* Clear the pad bytes. */
memcpy(val, value, value_len);
}
}
skip_replace:
if (IS_LAST_ENTRY(ENTRY(header+1))) {
/* This block is now empty. */
if (bh && header == HDR(bh))
unlock_buffer(bh); /* we were modifying in-place. */
error = ext2_xattr_set2(inode, bh, NULL);
} else {
ext2_xattr_rehash(header, here);
if (bh && header == HDR(bh))
unlock_buffer(bh); /* we were modifying in-place. */
error = ext2_xattr_set2(inode, bh, header);
}
cleanup:
brelse(bh);
if (!(bh && header == HDR(bh)))
kfree(header);
up_write(&EXT2_I(inode)->xattr_sem);
return error;
}
/*
* Second half of ext2_xattr_set(): Update the file system.
*/
static int
ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
struct ext2_xattr_header *header)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *new_bh = NULL;
int error;
if (header) {
new_bh = ext2_xattr_cache_find(inode, header);
if (new_bh) {
/* We found an identical block in the cache. */
if (new_bh == old_bh) {
ea_bdebug(new_bh, "keeping this block");
} else {
/* The old block is released after updating
the inode. */
ea_bdebug(new_bh, "reusing block");
error = dquot_alloc_block(inode, 1);
if (error) {
unlock_buffer(new_bh);
goto cleanup;
}
le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
ea_bdebug(new_bh, "refcount now=%d",
le32_to_cpu(HDR(new_bh)->h_refcount));
}
unlock_buffer(new_bh);
} else if (old_bh && header == HDR(old_bh)) {
/* Keep this block. No need to lock the block as we
don't need to change the reference count. */
new_bh = old_bh;
get_bh(new_bh);
ext2_xattr_cache_insert(new_bh);
} else {
/* We need to allocate a new block */
ext2_fsblk_t goal = ext2_group_first_block_no(sb,
EXT2_I(inode)->i_block_group);
int block = ext2_new_block(inode, goal, &error);
if (error)
goto cleanup;
ea_idebug(inode, "creating block %d", block);
new_bh = sb_getblk(sb, block);
if (!new_bh) {
ext2_free_blocks(inode, block, 1);
mark_inode_dirty(inode);
error = -EIO;
goto cleanup;
}
lock_buffer(new_bh);
memcpy(new_bh->b_data, header, new_bh->b_size);
set_buffer_uptodate(new_bh);
unlock_buffer(new_bh);
ext2_xattr_cache_insert(new_bh);
ext2_xattr_update_super_block(sb);
}
mark_buffer_dirty(new_bh);
if (IS_SYNC(inode)) {
sync_dirty_buffer(new_bh);
error = -EIO;
if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
goto cleanup;
}
}
/* Update the inode. */
EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
inode->i_ctime = CURRENT_TIME_SEC;
if (IS_SYNC(inode)) {
error = sync_inode_metadata(inode, 1);
/* In case sync failed due to ENOSPC the inode was actually
* written (only some dirty data were not) so we just proceed
* as if nothing happened and cleanup the unused block */
if (error && error != -ENOSPC) {
if (new_bh && new_bh != old_bh) {
dquot_free_block_nodirty(inode, 1);
mark_inode_dirty(inode);
}
goto cleanup;
}
} else
mark_inode_dirty(inode);
error = 0;
if (old_bh && old_bh != new_bh) {
struct mb_cache_entry *ce;
/*
* If there was an old block and we are no longer using it,
* release the old block.
*/
ce = mb_cache_entry_get(ext2_xattr_cache, old_bh->b_bdev,
old_bh->b_blocknr);
lock_buffer(old_bh);
if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
/* Free the old block. */
if (ce)
mb_cache_entry_free(ce);
ea_bdebug(old_bh, "freeing");
ext2_free_blocks(inode, old_bh->b_blocknr, 1);
mark_inode_dirty(inode);
/* We let our caller release old_bh, so we
* need to duplicate the buffer before. */
get_bh(old_bh);
bforget(old_bh);
} else {
/* Decrement the refcount only. */
le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
dquot_free_block_nodirty(inode, 1);
mark_inode_dirty(inode);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
le32_to_cpu(HDR(old_bh)->h_refcount));
}
unlock_buffer(old_bh);
}
cleanup:
brelse(new_bh);
return error;
}
/*
* ext2_xattr_delete_inode()
*
* Free extended attribute resources associated with this inode. This
* is called immediately before an inode is freed.
*/
void
ext2_xattr_delete_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
struct mb_cache_entry *ce;
down_write(&EXT2_I(inode)->xattr_sem);
if (!EXT2_I(inode)->i_file_acl)
goto cleanup;
bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
if (!bh) {
ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
"inode %ld: block %d read error", inode->i_ino,
EXT2_I(inode)->i_file_acl);
goto cleanup;
}
ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
if (HDR(bh)->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
HDR(bh)->h_blocks != cpu_to_le32(1)) {
ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
"inode %ld: bad block %d", inode->i_ino,
EXT2_I(inode)->i_file_acl);
goto cleanup;
}
ce = mb_cache_entry_get(ext2_xattr_cache, bh->b_bdev, bh->b_blocknr);
lock_buffer(bh);
if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
if (ce)
mb_cache_entry_free(ce);
ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
get_bh(bh);
bforget(bh);
unlock_buffer(bh);
} else {
le32_add_cpu(&HDR(bh)->h_refcount, -1);
if (ce)
mb_cache_entry_release(ce);
ea_bdebug(bh, "refcount now=%d",
le32_to_cpu(HDR(bh)->h_refcount));
unlock_buffer(bh);
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
dquot_free_block_nodirty(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
cleanup:
brelse(bh);
up_write(&EXT2_I(inode)->xattr_sem);
}
/*
* ext2_xattr_put_super()
*
* This is called when a file system is unmounted.
*/
void
ext2_xattr_put_super(struct super_block *sb)
{
mb_cache_shrink(sb->s_bdev);
}
/*
* ext2_xattr_cache_insert()
*
* Create a new entry in the extended attribute cache, and insert
* it unless such an entry is already in the cache.
*
* Returns 0, or a negative error number on failure.
*/
static int
ext2_xattr_cache_insert(struct buffer_head *bh)
{
__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
struct mb_cache_entry *ce;
int error;
ce = mb_cache_entry_alloc(ext2_xattr_cache, GFP_NOFS);
if (!ce)
return -ENOMEM;
error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
if (error) {
mb_cache_entry_free(ce);
if (error == -EBUSY) {
ea_bdebug(bh, "already in cache (%d cache entries)",
atomic_read(&ext2_xattr_cache->c_entry_count));
error = 0;
}
} else {
ea_bdebug(bh, "inserting [%x] (%d cache entries)", (int)hash,
atomic_read(&ext2_xattr_cache->c_entry_count));
mb_cache_entry_release(ce);
}
return error;
}
/*
* ext2_xattr_cmp()
*
* Compare two extended attribute blocks for equality.
*
* Returns 0 if the blocks are equal, 1 if they differ, and
* a negative error number on errors.
*/
static int
ext2_xattr_cmp(struct ext2_xattr_header *header1,
struct ext2_xattr_header *header2)
{
struct ext2_xattr_entry *entry1, *entry2;
entry1 = ENTRY(header1+1);
entry2 = ENTRY(header2+1);
while (!IS_LAST_ENTRY(entry1)) {
if (IS_LAST_ENTRY(entry2))
return 1;
if (entry1->e_hash != entry2->e_hash ||
entry1->e_name_index != entry2->e_name_index ||
entry1->e_name_len != entry2->e_name_len ||
entry1->e_value_size != entry2->e_value_size ||
memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
return 1;
if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
return -EIO;
if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
(char *)header2 + le16_to_cpu(entry2->e_value_offs),
le32_to_cpu(entry1->e_value_size)))
return 1;
entry1 = EXT2_XATTR_NEXT(entry1);
entry2 = EXT2_XATTR_NEXT(entry2);
}
if (!IS_LAST_ENTRY(entry2))
return 1;
return 0;
}
/*
* ext2_xattr_cache_find()
*
* Find an identical extended attribute block.
*
* Returns a locked buffer head to the block found, or NULL if such
* a block was not found or an error occurred.
*/
static struct buffer_head *
ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
{
__u32 hash = le32_to_cpu(header->h_hash);
struct mb_cache_entry *ce;
if (!header->h_hash)
return NULL; /* never share */
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
again:
ce = mb_cache_entry_find_first(ext2_xattr_cache, inode->i_sb->s_bdev,
hash);
while (ce) {
struct buffer_head *bh;
if (IS_ERR(ce)) {
if (PTR_ERR(ce) == -EAGAIN)
goto again;
break;
}
bh = sb_bread(inode->i_sb, ce->e_block);
if (!bh) {
ext2_error(inode->i_sb, "ext2_xattr_cache_find",
"inode %ld: block %ld read error",
inode->i_ino, (unsigned long) ce->e_block);
} else {
lock_buffer(bh);
if (le32_to_cpu(HDR(bh)->h_refcount) >
EXT2_XATTR_REFCOUNT_MAX) {
ea_idebug(inode, "block %ld refcount %d>%d",
(unsigned long) ce->e_block,
le32_to_cpu(HDR(bh)->h_refcount),
EXT2_XATTR_REFCOUNT_MAX);
} else if (!ext2_xattr_cmp(header, HDR(bh))) {
ea_bdebug(bh, "b_count=%d",
atomic_read(&(bh->b_count)));
mb_cache_entry_release(ce);
return bh;
}
unlock_buffer(bh);
brelse(bh);
}
ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
}
return NULL;
}
#define NAME_HASH_SHIFT 5
#define VALUE_HASH_SHIFT 16
/*
* ext2_xattr_hash_entry()
*
* Compute the hash of an extended attribute.
*/
static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
struct ext2_xattr_entry *entry)
{
__u32 hash = 0;
char *name = entry->e_name;
int n;
for (n=0; n < entry->e_name_len; n++) {
hash = (hash << NAME_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
*name++;
}
if (entry->e_value_block == 0 && entry->e_value_size != 0) {
__le32 *value = (__le32 *)((char *)header +
le16_to_cpu(entry->e_value_offs));
for (n = (le32_to_cpu(entry->e_value_size) +
EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
hash = (hash << VALUE_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
le32_to_cpu(*value++);
}
}
entry->e_hash = cpu_to_le32(hash);
}
#undef NAME_HASH_SHIFT
#undef VALUE_HASH_SHIFT
#define BLOCK_HASH_SHIFT 16
/*
* ext2_xattr_rehash()
*
* Re-compute the extended attribute hash value after an entry has changed.
*/
static void ext2_xattr_rehash(struct ext2_xattr_header *header,
struct ext2_xattr_entry *entry)
{
struct ext2_xattr_entry *here;
__u32 hash = 0;
ext2_xattr_hash_entry(header, entry);
here = ENTRY(header+1);
while (!IS_LAST_ENTRY(here)) {
if (!here->e_hash) {
/* Block is not shared if an entry's hash value == 0 */
hash = 0;
break;
}
hash = (hash << BLOCK_HASH_SHIFT) ^
(hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
le32_to_cpu(here->e_hash);
here = EXT2_XATTR_NEXT(here);
}
header->h_hash = cpu_to_le32(hash);
}
#undef BLOCK_HASH_SHIFT
int __init
init_ext2_xattr(void)
{
ext2_xattr_cache = mb_cache_create("ext2_xattr", 6);
if (!ext2_xattr_cache)
return -ENOMEM;
return 0;
}
void
exit_ext2_xattr(void)
{
mb_cache_destroy(ext2_xattr_cache);
}
| gpl-2.0 |
AOSParadox/android_kernel_oneplus_msm8974 | arch/arm/mach-omap1/mailbox.c | 5144 | 4446 | /*
* Mailbox reservation modules for OMAP1
*
* Copyright (C) 2006-2009 Nokia Corporation
* Written by: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <plat/mailbox.h>
#define MAILBOX_ARM2DSP1 0x00
#define MAILBOX_ARM2DSP1b 0x04
#define MAILBOX_DSP2ARM1 0x08
#define MAILBOX_DSP2ARM1b 0x0c
#define MAILBOX_DSP2ARM2 0x10
#define MAILBOX_DSP2ARM2b 0x14
#define MAILBOX_ARM2DSP1_Flag 0x18
#define MAILBOX_DSP2ARM1_Flag 0x1c
#define MAILBOX_DSP2ARM2_Flag 0x20
static void __iomem *mbox_base;
struct omap_mbox1_fifo {
unsigned long cmd;
unsigned long data;
unsigned long flag;
};
struct omap_mbox1_priv {
struct omap_mbox1_fifo tx_fifo;
struct omap_mbox1_fifo rx_fifo;
};
static inline int mbox_read_reg(size_t ofs)
{
return __raw_readw(mbox_base + ofs);
}
static inline void mbox_write_reg(u32 val, size_t ofs)
{
__raw_writew(val, mbox_base + ofs);
}
/* msg */
static mbox_msg_t omap1_mbox_fifo_read(struct omap_mbox *mbox)
{
struct omap_mbox1_fifo *fifo =
&((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
mbox_msg_t msg;
msg = mbox_read_reg(fifo->data);
msg |= ((mbox_msg_t) mbox_read_reg(fifo->cmd)) << 16;
return msg;
}
static void
omap1_mbox_fifo_write(struct omap_mbox *mbox, mbox_msg_t msg)
{
struct omap_mbox1_fifo *fifo =
&((struct omap_mbox1_priv *)mbox->priv)->tx_fifo;
mbox_write_reg(msg & 0xffff, fifo->data);
mbox_write_reg(msg >> 16, fifo->cmd);
}
static int omap1_mbox_fifo_empty(struct omap_mbox *mbox)
{
return 0;
}
static int omap1_mbox_fifo_full(struct omap_mbox *mbox)
{
struct omap_mbox1_fifo *fifo =
&((struct omap_mbox1_priv *)mbox->priv)->rx_fifo;
return mbox_read_reg(fifo->flag);
}
/* irq */
static void
omap1_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_type_t irq)
{
if (irq == IRQ_RX)
enable_irq(mbox->irq);
}
static void
omap1_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_type_t irq)
{
if (irq == IRQ_RX)
disable_irq(mbox->irq);
}
static int
omap1_mbox_is_irq(struct omap_mbox *mbox, omap_mbox_type_t irq)
{
if (irq == IRQ_TX)
return 0;
return 1;
}
static struct omap_mbox_ops omap1_mbox_ops = {
.type = OMAP_MBOX_TYPE1,
.fifo_read = omap1_mbox_fifo_read,
.fifo_write = omap1_mbox_fifo_write,
.fifo_empty = omap1_mbox_fifo_empty,
.fifo_full = omap1_mbox_fifo_full,
.enable_irq = omap1_mbox_enable_irq,
.disable_irq = omap1_mbox_disable_irq,
.is_irq = omap1_mbox_is_irq,
};
/* FIXME: the following struct should be created automatically by the user id */
/* DSP */
static struct omap_mbox1_priv omap1_mbox_dsp_priv = {
.tx_fifo = {
.cmd = MAILBOX_ARM2DSP1b,
.data = MAILBOX_ARM2DSP1,
.flag = MAILBOX_ARM2DSP1_Flag,
},
.rx_fifo = {
.cmd = MAILBOX_DSP2ARM1b,
.data = MAILBOX_DSP2ARM1,
.flag = MAILBOX_DSP2ARM1_Flag,
},
};
static struct omap_mbox mbox_dsp_info = {
.name = "dsp",
.ops = &omap1_mbox_ops,
.priv = &omap1_mbox_dsp_priv,
};
static struct omap_mbox *omap1_mboxes[] = { &mbox_dsp_info, NULL };
static int __devinit omap1_mbox_probe(struct platform_device *pdev)
{
struct resource *mem;
int ret;
struct omap_mbox **list;
list = omap1_mboxes;
list[0]->irq = platform_get_irq_byname(pdev, "dsp");
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mbox_base = ioremap(mem->start, resource_size(mem));
if (!mbox_base)
return -ENOMEM;
ret = omap_mbox_register(&pdev->dev, list);
if (ret) {
iounmap(mbox_base);
return ret;
}
return 0;
}
static int __devexit omap1_mbox_remove(struct platform_device *pdev)
{
omap_mbox_unregister();
iounmap(mbox_base);
return 0;
}
static struct platform_driver omap1_mbox_driver = {
.probe = omap1_mbox_probe,
.remove = __devexit_p(omap1_mbox_remove),
.driver = {
.name = "omap-mailbox",
},
};
static int __init omap1_mbox_init(void)
{
return platform_driver_register(&omap1_mbox_driver);
}
static void __exit omap1_mbox_exit(void)
{
platform_driver_unregister(&omap1_mbox_driver);
}
module_init(omap1_mbox_init);
module_exit(omap1_mbox_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("omap mailbox: omap1 architecture specific functions");
MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
MODULE_ALIAS("platform:omap1-mailbox");
| gpl-2.0 |
davidmueller13/davidskernel_lt03lte_tw_5.1.1 | drivers/gpu/drm/radeon/radeon_fb.c | 5400 | 10815 | /*
* Copyright © 2007 David Airlie
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* David Airlie
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include "radeon.h"
#include "drm_fb_helper.h"
#include <linux/vga_switcheroo.h>
/* object hierarchy -
this contains a helper + a radeon fb
the helper contains a pointer to radeon framebuffer baseclass.
*/
struct radeon_fbdev {
struct drm_fb_helper helper;
struct radeon_framebuffer rfb;
struct list_head fbdev_list;
struct radeon_device *rdev;
};
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
int pitch_mask = 0;
switch (bpp / 8) {
case 1:
pitch_mask = align_large ? 255 : 127;
break;
case 2:
pitch_mask = align_large ? 127 : 31;
break;
case 3:
case 4:
pitch_mask = align_large ? 63 : 15;
break;
}
aligned += pitch_mask;
aligned &= ~pitch_mask;
return aligned;
}
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
{
struct radeon_bo *rbo = gem_to_radeon_bo(gobj);
int ret;
ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
drm_gem_object_unreference_unlocked(gobj);
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
int ret;
int aligned_size, size;
int height = mode_cmd->height;
u32 bpp, depth;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
fb_tiled) * ((bpp + 1) / 8);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
false, true,
&gobj);
if (ret) {
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
return -ENOMEM;
}
rbo = gem_to_radeon_bo(gobj);
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
switch (bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
case 16:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;
}
#endif
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
tiling_flags | RADEON_TILING_SURFACE,
mode_cmd->pitches[0]);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
ret = radeon_bo_reserve(rbo, false);
if (unlikely(ret != 0))
goto out_unref;
/* Only 27 bit offset for legacy CRTC */
ret = radeon_bo_pin_restricted(rbo, RADEON_GEM_DOMAIN_VRAM,
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
NULL);
if (ret) {
radeon_bo_unreserve(rbo);
goto out_unref;
}
if (fb_tiled)
radeon_bo_check_tiling(rbo, 0, 0);
ret = radeon_bo_kmap(rbo, NULL);
radeon_bo_unreserve(rbo);
if (ret) {
goto out_unref;
}
*gobj_p = gobj;
return 0;
out_unref:
radeonfb_destroy_pinned_object(gobj);
*gobj_p = NULL;
return ret;
}
static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
int ret;
unsigned long tmp;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
/* avivo can't scanout real 24bpp */
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
sizes->surface_bpp = 32;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
}
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
if (info == NULL) {
ret = -ENOMEM;
goto out_unref;
}
info->par = rfbdev;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initalise framebuffer %d\n", ret);
goto out_unref;
}
fb = &rfbdev->rfb.base;
/* setup helper */
rfbdev->helper.fb = fb;
rfbdev->helper.fbdev = info;
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
strcpy(info->fix.id, "radeondrmfb");
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = radeon_bo_size(rbo);
info->screen_base = rbo->kptr;
info->screen_size = radeon_bo_size(rbo);
drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
/* setup aperture base/size for vesafb takeover */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out_unref;
}
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unref;
}
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_unref:
if (rbo) {
}
if (fb && ret) {
drm_gem_object_unreference(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
return ret;
}
static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
int new_fb = 0;
int ret;
if (!helper->fb) {
ret = radeonfb_create(rfbdev, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
static char *mode_option;
int radeon_parse_options(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
mode_option = this_opt;
}
return 0;
}
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
{
struct fb_info *info;
struct radeon_framebuffer *rfb = &rfbdev->rfb;
if (rfbdev->helper.fbdev) {
info = rfbdev->helper.fbdev;
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
}
drm_fb_helper_fini(&rfbdev->helper);
drm_framebuffer_cleanup(&rfb->base);
return 0;
}
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
.gamma_set = radeon_crtc_fb_gamma_set,
.gamma_get = radeon_crtc_fb_gamma_get,
.fb_probe = radeon_fb_find_or_create_single,
};
int radeon_fbdev_init(struct radeon_device *rdev)
{
struct radeon_fbdev *rfbdev;
int bpp_sel = 32;
int ret;
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL);
if (!rfbdev)
return -ENOMEM;
rfbdev->rdev = rdev;
rdev->mode_info.rfbdev = rfbdev;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
rdev->num_crtc,
RADEONFB_CONN_LIMIT);
if (ret) {
kfree(rfbdev);
return ret;
}
drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
return 0;
}
void radeon_fbdev_fini(struct radeon_device *rdev)
{
if (!rdev->mode_info.rfbdev)
return;
radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
kfree(rdev->mode_info.rfbdev);
rdev->mode_info.rfbdev = NULL;
}
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
}
int radeon_fbdev_total_size(struct radeon_device *rdev)
{
struct radeon_bo *robj;
int size = 0;
robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj);
size += radeon_bo_size(robj);
return size;
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
}
| gpl-2.0 |
rutvik95/android_kernel_ms013g | drivers/mtd/chips/jedec_probe.c | 7448 | 57819 | /*
Common Flash Interface probe code.
(C) 2000 Red Hat. GPL'd.
See JEDEC (http://www.jedec.org/) standard JESD21C (section 3.5)
for the standard this probe goes back to.
Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/gen_probe.h>
/* AMD */
#define AM29DL800BB 0x22CB
#define AM29DL800BT 0x224A
#define AM29F800BB 0x2258
#define AM29F800BT 0x22D6
#define AM29LV400BB 0x22BA
#define AM29LV400BT 0x22B9
#define AM29LV800BB 0x225B
#define AM29LV800BT 0x22DA
#define AM29LV160DT 0x22C4
#define AM29LV160DB 0x2249
#define AM29F017D 0x003D
#define AM29F016D 0x00AD
#define AM29F080 0x00D5
#define AM29F040 0x00A4
#define AM29LV040B 0x004F
#define AM29F032B 0x0041
#define AM29F002T 0x00B0
#define AM29SL800DB 0x226B
#define AM29SL800DT 0x22EA
/* Atmel */
#define AT49BV512 0x0003
#define AT29LV512 0x003d
#define AT49BV16X 0x00C0
#define AT49BV16XT 0x00C2
#define AT49BV32X 0x00C8
#define AT49BV32XT 0x00C9
/* Eon */
#define EN29SL800BB 0x226B
#define EN29SL800BT 0x22EA
/* Fujitsu */
#define MBM29F040C 0x00A4
#define MBM29F800BA 0x2258
#define MBM29LV650UE 0x22D7
#define MBM29LV320TE 0x22F6
#define MBM29LV320BE 0x22F9
#define MBM29LV160TE 0x22C4
#define MBM29LV160BE 0x2249
#define MBM29LV800BA 0x225B
#define MBM29LV800TA 0x22DA
#define MBM29LV400TC 0x22B9
#define MBM29LV400BC 0x22BA
/* Hyundai */
#define HY29F002T 0x00B0
/* Intel */
#define I28F004B3T 0x00d4
#define I28F004B3B 0x00d5
#define I28F400B3T 0x8894
#define I28F400B3B 0x8895
#define I28F008S5 0x00a6
#define I28F016S5 0x00a0
#define I28F008SA 0x00a2
#define I28F008B3T 0x00d2
#define I28F008B3B 0x00d3
#define I28F800B3T 0x8892
#define I28F800B3B 0x8893
#define I28F016S3 0x00aa
#define I28F016B3T 0x00d0
#define I28F016B3B 0x00d1
#define I28F160B3T 0x8890
#define I28F160B3B 0x8891
#define I28F320B3T 0x8896
#define I28F320B3B 0x8897
#define I28F640B3T 0x8898
#define I28F640B3B 0x8899
#define I28F640C3B 0x88CD
#define I28F160F3T 0x88F3
#define I28F160F3B 0x88F4
#define I28F160C3T 0x88C2
#define I28F160C3B 0x88C3
#define I82802AB 0x00ad
#define I82802AC 0x00ac
/* Macronix */
#define MX29LV040C 0x004F
#define MX29LV160T 0x22C4
#define MX29LV160B 0x2249
#define MX29F040 0x00A4
#define MX29F016 0x00AD
#define MX29F002T 0x00B0
#define MX29F004T 0x0045
#define MX29F004B 0x0046
/* NEC */
#define UPD29F064115 0x221C
/* PMC */
#define PM49FL002 0x006D
#define PM49FL004 0x006E
#define PM49FL008 0x006A
/* Sharp */
#define LH28F640BF 0x00b0
/* ST - www.st.com */
#define M29F800AB 0x0058
#define M29W800DT 0x22D7
#define M29W800DB 0x225B
#define M29W400DT 0x00EE
#define M29W400DB 0x00EF
#define M29W160DT 0x22C4
#define M29W160DB 0x2249
#define M29W040B 0x00E3
#define M50FW040 0x002C
#define M50FW080 0x002D
#define M50FW016 0x002E
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
#define M50FLW080B 0x0081
#define PSD4256G6V 0x00e9
/* SST */
#define SST29EE020 0x0010
#define SST29LE020 0x0012
#define SST29EE512 0x005d
#define SST29LE512 0x003d
#define SST39LF800 0x2781
#define SST39LF160 0x2782
#define SST39VF1601 0x234b
#define SST39VF3201 0x235b
#define SST39WF1601 0x274b
#define SST39WF1602 0x274a
#define SST39LF512 0x00D4
#define SST39LF010 0x00D5
#define SST39LF020 0x00D6
#define SST39LF040 0x00D7
#define SST39SF010A 0x00B5
#define SST39SF020A 0x00B6
#define SST39SF040 0x00B7
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
#define SST49LF030A 0x001C
#define SST49LF040A 0x0051
#define SST49LF080A 0x005B
#define SST36VF3203 0x7354
/* Toshiba */
#define TC58FVT160 0x00C2
#define TC58FVB160 0x0043
#define TC58FVT321 0x009A
#define TC58FVB321 0x009C
#define TC58FVT641 0x0093
#define TC58FVB641 0x0095
/* Winbond */
#define W49V002A 0x00b0
/*
* Unlock address sets for AMD command sets.
* Intel command sets use the MTD_UADDR_UNNECESSARY.
* Each identifier, except MTD_UADDR_UNNECESSARY, and
* MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[].
* MTD_UADDR_NOT_SUPPORTED must be 0 so that structure
* initialization need not require initializing all of the
* unlock addresses for all bit widths.
*/
enum uaddr {
MTD_UADDR_NOT_SUPPORTED = 0, /* data width not supported */
MTD_UADDR_0x0555_0x02AA,
MTD_UADDR_0x0555_0x0AAA,
MTD_UADDR_0x5555_0x2AAA,
MTD_UADDR_0x0AAA_0x0554,
MTD_UADDR_0x0AAA_0x0555,
MTD_UADDR_0xAAAA_0x5555,
MTD_UADDR_DONT_CARE, /* Requires an arbitrary address */
MTD_UADDR_UNNECESSARY, /* Does not require any address */
};
struct unlock_addr {
uint32_t addr1;
uint32_t addr2;
};
/*
* I don't like the fact that the first entry in unlock_addrs[]
* exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore,
* should not be used. The problem is that structures with
* initializers have extra fields initialized to 0. It is _very_
* desirable to have the unlock address entries for unsupported
* data widths automatically initialized - that means that
* MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here
* must go unused.
*/
static const struct unlock_addr unlock_addrs[] = {
[MTD_UADDR_NOT_SUPPORTED] = {
.addr1 = 0xffff,
.addr2 = 0xffff
},
[MTD_UADDR_0x0555_0x02AA] = {
.addr1 = 0x0555,
.addr2 = 0x02aa
},
[MTD_UADDR_0x0555_0x0AAA] = {
.addr1 = 0x0555,
.addr2 = 0x0aaa
},
[MTD_UADDR_0x5555_0x2AAA] = {
.addr1 = 0x5555,
.addr2 = 0x2aaa
},
[MTD_UADDR_0x0AAA_0x0554] = {
.addr1 = 0x0AAA,
.addr2 = 0x0554
},
[MTD_UADDR_0x0AAA_0x0555] = {
.addr1 = 0x0AAA,
.addr2 = 0x0555
},
[MTD_UADDR_0xAAAA_0x5555] = {
.addr1 = 0xaaaa,
.addr2 = 0x5555
},
[MTD_UADDR_DONT_CARE] = {
.addr1 = 0x0000, /* Doesn't matter which address */
.addr2 = 0x0000 /* is used - must be last entry */
},
[MTD_UADDR_UNNECESSARY] = {
.addr1 = 0x0000,
.addr2 = 0x0000
}
};
struct amd_flash_info {
const char *name;
const uint16_t mfr_id;
const uint16_t dev_id;
const uint8_t dev_size;
const uint8_t nr_regions;
const uint16_t cmd_set;
const uint32_t regions[6];
const uint8_t devtypes; /* Bitmask for x8, x16 etc. */
const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */
};
#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
#define SIZE_64KiB 16
#define SIZE_128KiB 17
#define SIZE_256KiB 18
#define SIZE_512KiB 19
#define SIZE_1MiB 20
#define SIZE_2MiB 21
#define SIZE_4MiB 22
#define SIZE_8MiB 23
/*
* Please keep this list ordered by manufacturer!
* Fortunately, the list isn't searched often and so a
* slow, linear search isn't so bad.
*/
static const struct amd_flash_info jedec_table[] = {
{
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F032B,
.name = "AMD AM29F032B",
.uaddr = MTD_UADDR_0x0555_0x02AA,
.devtypes = CFI_DEVICETYPE_X8,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,64)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DT,
.name = "AMD AM29LV160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV160DB,
.name = "AMD AM29LV160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BB,
.name = "AMD AM29LV400BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV400BT,
.name = "AMD AM29LV400BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BB,
.name = "AMD AM29LV800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
/* add DL */
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BB,
.name = "AMD AM29DL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 6,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,4),
ERASEINFO(0x08000,1),
ERASEINFO(0x04000,1),
ERASEINFO(0x10000,14)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29DL800BT,
.name = "AMD AM29DL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 6,
.regions = {
ERASEINFO(0x10000,14),
ERASEINFO(0x04000,1),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,4),
ERASEINFO(0x08000,1),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BB,
.name = "AMD AM29F800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV800BT,
.name = "AMD AM29LV800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F800BT,
.name = "AMD AM29F800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F017D,
.name = "AMD AM29F017D",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_DONT_CARE,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F016D,
.name = "AMD AM29F016D",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F080,
.name = "AMD AM29F080",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F040,
.name = "AMD AM29F040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29LV040B,
.name = "AMD AM29LV040B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29F002T,
.name = "AMD AM29F002T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DT,
.name = "AMD AM29SL800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_AMD,
.dev_id = AM29SL800DB,
.name = "AMD AM29SL800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV512,
.name = "Atmel AT49BV512",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_64KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,1)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT29LV512,
.name = "Atmel AT29LV512",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_64KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x80,256),
ERASEINFO(0x80,256)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16X,
.name = "Atmel AT49BV16X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV16XT,
.name = "Atmel AT49BV16XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32X,
.name = "Atmel AT49BV32X",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
}
}, {
.mfr_id = CFI_MFR_ATMEL,
.dev_id = AT49BV32XT,
.name = "Atmel AT49BV32XT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BT,
.name = "Eon EN29SL800BT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_EON,
.dev_id = EN29SL800BB,
.name = "Eon EN29SL800BB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F040C,
.name = "Fujitsu MBM29F040C",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29F800BA,
.name = "Fujitsu MBM29F800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV650UE,
.name = "Fujitsu MBM29LV650UE",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_DONT_CARE,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,128)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320TE,
.name = "Fujitsu MBM29LV320TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV320BE,
.name = "Fujitsu MBM29LV320BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160TE,
.name = "Fujitsu MBM29LV160TE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV160BE,
.name = "Fujitsu MBM29LV160BE",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800BA,
.name = "Fujitsu MBM29LV800BA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV800TA,
.name = "Fujitsu MBM29LV800TA",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400BC,
.name = "Fujitsu MBM29LV400BC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7)
}
}, {
.mfr_id = CFI_MFR_FUJITSU,
.dev_id = MBM29LV400TC,
.name = "Fujitsu MBM29LV400TC",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_HYUNDAI,
.dev_id = HY29F002T,
.name = "Hyundai HY29F002T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3B,
.name = "Intel 28F004B3B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 7),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F004B3T,
.name = "Intel 28F004B3T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 7),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3B,
.name = "Intel 28F400B3B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 7),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F400B3T,
.name = "Intel 28F400B3T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 7),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3B,
.name = "Intel 28F008B3B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 15),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008B3T,
.name = "Intel 28F008B3T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 15),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008S5,
.name = "Intel 28F008S5",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S5,
.name = "Intel 28F016S5",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F008SA,
.name = "Intel 28F008SA",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000, 16),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3B,
.name = "Intel 28F800B3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 15),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F800B3T,
.name = "Intel 28F800B3T",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 15),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3B,
.name = "Intel 28F016B3B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 31),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016S3,
.name = "Intel I28F016S3",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000, 32),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F016B3T,
.name = "Intel 28F016B3T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 31),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3B,
.name = "Intel 28F160B3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 31),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F160B3T,
.name = "Intel 28F160B3T",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 31),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3B,
.name = "Intel 28F320B3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 63),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F320B3T,
.name = "Intel 28F320B3T",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 63),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3B,
.name = "Intel 28F640B3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 127),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640B3T,
.name = "Intel 28F640B3T",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 127),
ERASEINFO(0x02000, 8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I28F640C3B,
.name = "Intel 28F640C3B",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 127),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AB,
.name = "Intel 82802AB",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_INTEL,
.dev_id = I82802AC,
.name = "Intel 82802AC",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV040C,
.name = "Macronix MX29LV040C",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160T,
.name = "MXIC MX29LV160T",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_NEC,
.dev_id = UPD29F064115,
.name = "NEC uPD29F064115",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 3,
.regions = {
ERASEINFO(0x2000,8),
ERASEINFO(0x10000,126),
ERASEINFO(0x2000,8),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29LV160B,
.name = "MXIC MX29LV160B",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F040,
.name = "Macronix MX29F040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004T,
.name = "Macronix MX29F004T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F004B,
.name = "Macronix MX29F004B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7),
}
}, {
.mfr_id = CFI_MFR_MACRONIX,
.dev_id = MX29F002T,
.name = "Macronix MX29F002T",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1),
}
}, {
.mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL002,
.name = "PMC Pm49FL002",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 64 )
}
}, {
.mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL004,
.name = "PMC Pm49FL004",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 128 )
}
}, {
.mfr_id = CFI_MFR_PMC,
.dev_id = PM49FL008,
.name = "PMC Pm49FL008",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 256 )
}
}, {
.mfr_id = CFI_MFR_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_INTEL_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x40000,16),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39LF512,
.name = "SST 39LF512",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_64KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,16),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39LF010,
.name = "SST 39LF010",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_128KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,32),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST29EE020,
.name = "SST 29EE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_SST_PAGE,
.nr_regions = 1,
.regions = {ERASEINFO(0x01000,64),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST29LE020,
.name = "SST 29LE020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_SST_PAGE,
.nr_regions = 1,
.regions = {ERASEINFO(0x01000,64),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39LF020,
.name = "SST 39LF020",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,64),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39LF040,
.name = "SST 39LF040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39SF010A,
.name = "SST 39SF010A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_128KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,32),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39SF020A,
.name = "SST 39SF020A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,64),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST39SF040,
.name = "SST 39SF040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040B,
.name = "SST 49LF040B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF004B,
.name = "SST 49LF004B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF008A,
.name = "SST 49LF008A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,256),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF030A,
.name = "SST 49LF030A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,96),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF040A,
.name = "SST 49LF040A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST49LF080A,
.name = "SST 49LF080A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x01000,256),
}
}, {
.mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39LF160,
.name = "SST 39LF160",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
.mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF1601,
.name = "SST 39VF1601",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
/* CFI is broken: reports AMD_STD, but needs custom uaddr */
.mfr_id = CFI_MFR_SST,
.dev_id = SST39WF1601,
.name = "SST 39WF1601",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
/* CFI is broken: reports AMD_STD, but needs custom uaddr */
.mfr_id = CFI_MFR_SST,
.dev_id = SST39WF1602,
.name = "SST 39WF1602",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
.mfr_id = CFI_MFR_SST, /* should be CFI */
.dev_id = SST39VF3201,
.name = "SST 39VF3201",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0xAAAA_0x5555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256),
ERASEINFO(0x1000,256)
}
}, {
.mfr_id = CFI_MFR_SST,
.dev_id = SST36VF3203,
.name = "SST 36VF3203",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,64),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M29F800AB,
.name = "ST M29F800AB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15),
}
}, {
.mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DT,
.name = "ST M29W800DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W800DB,
.name = "ST M29W800DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,15)
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M29W400DT,
.name = "ST M29W400DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,7),
ERASEINFO(0x02000,1),
ERASEINFO(0x08000,2),
ERASEINFO(0x10000,1)
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M29W400DB,
.name = "ST M29W400DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,7)
}
}, {
.mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DT,
.name = "ST M29W160DT",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_ST, /* FIXME - CFI device? */
.dev_id = M29W160DB,
.name = "ST M29W160DB",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M29W040B,
.name = "ST M29W040B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0555_0x02AA,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FW040,
.name = "ST M50FW040",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_512KiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FW080,
.name = "ST M50FW080",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FW016,
.name = "ST M50FW016",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50LPW080,
.name = "ST M50LPW080",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
},
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080A,
.name = "ST M50FLW080A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 4,
.regions = {
ERASEINFO(0x1000,16),
ERASEINFO(0x10000,13),
ERASEINFO(0x1000,16),
ERASEINFO(0x1000,16),
}
}, {
.mfr_id = CFI_MFR_ST,
.dev_id = M50FLW080B,
.name = "ST M50FLW080B",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_UNNECESSARY,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_INTEL_EXT,
.nr_regions = 4,
.regions = {
ERASEINFO(0x1000,16),
ERASEINFO(0x1000,16),
ERASEINFO(0x10000,13),
ERASEINFO(0x1000,16),
}
}, {
.mfr_id = 0xff00 | CFI_MFR_ST,
.dev_id = 0xff00 | PSD4256G6V,
.name = "ST PSD4256G6V",
.devtypes = CFI_DEVICETYPE_X16,
.uaddr = MTD_UADDR_0x0AAA_0x0554,
.dev_size = SIZE_1MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT160,
.name = "Toshiba TC58FVT160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x04000,1)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB160,
.name = "Toshiba TC58FVB160",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_2MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
ERASEINFO(0x08000,1),
ERASEINFO(0x10000,31)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB321,
.name = "Toshiba TC58FVB321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT321,
.name = "Toshiba TC58FVT321",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_4MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVB641,
.name = "Toshiba TC58FVB641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,127)
}
}, {
.mfr_id = CFI_MFR_TOSHIBA,
.dev_id = TC58FVT641,
.name = "Toshiba TC58FVT641",
.devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x0AAA_0x0555,
.dev_size = SIZE_8MiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 2,
.regions = {
ERASEINFO(0x10000,127),
ERASEINFO(0x02000,8)
}
}, {
.mfr_id = CFI_MFR_WINBOND,
.dev_id = W49V002A,
.name = "Winbond W49V002A",
.devtypes = CFI_DEVICETYPE_X8,
.uaddr = MTD_UADDR_0x5555_0x2AAA,
.dev_size = SIZE_256KiB,
.cmd_set = P_ID_AMD_STD,
.nr_regions = 4,
.regions = {
ERASEINFO(0x10000, 3),
ERASEINFO(0x08000, 1),
ERASEINFO(0x02000, 2),
ERASEINFO(0x04000, 1),
}
}
};
static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
struct cfi_private *cfi)
{
map_word result;
unsigned long mask;
int bank = 0;
/* According to JEDEC "Standard Manufacturer's Identification Code"
* (http://www.jedec.org/download/search/jep106W.pdf)
* several first banks can contain 0x7f instead of actual ID
*/
do {
uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
mask = (1 << (cfi->device_type * 8)) - 1;
result = map_read(map, base + ofs);
bank++;
} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
return result.x[0] & mask;
}
static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
struct cfi_private *cfi)
{
map_word result;
unsigned long mask;
u32 ofs = cfi_build_cmd_addr(1, map, cfi);
mask = (1 << (cfi->device_type * 8)) -1;
result = map_read(map, base + ofs);
return result.x[0] & mask;
}
static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
{
/* Reset */
/* after checking the datasheets for SST, MACRONIX and ATMEL
* (oh and incidentaly the jedec spec - 3.5.3.3) the reset
* sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
* 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
* as they will ignore the writes and don't care what address
* the F0 is written to */
if (cfi->addr_unlock1) {
pr_debug( "reset unlock called %x %x \n",
cfi->addr_unlock1,cfi->addr_unlock2);
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
}
cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
/* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
* so ensure we're in read mode. Send both the Intel and the AMD command
* for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
* this should be safe.
*/
cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL);
/* FIXME - should have reset delay before continuing */
}
static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
{
int i,num_erase_regions;
uint8_t uaddr;
if (!(jedec_table[index].devtypes & cfi->device_type)) {
pr_debug("Rejecting potential %s with incompatible %d-bit device type\n",
jedec_table[index].name, 4 * (1<<cfi->device_type));
return 0;
}
printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
num_erase_regions = jedec_table[index].nr_regions;
cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
if (!cfi->cfiq) {
//xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
return 0;
}
memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
cfi->cfiq->P_ID = jedec_table[index].cmd_set;
cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
cfi->cfiq->DevSize = jedec_table[index].dev_size;
cfi->cfi_mode = CFI_MODE_JEDEC;
cfi->sector_erase_cmd = CMD(0x30);
for (i=0; i<num_erase_regions; i++){
cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
}
cfi->cmdset_priv = NULL;
/* This may be redundant for some cases, but it doesn't hurt */
cfi->mfr = jedec_table[index].mfr_id;
cfi->id = jedec_table[index].dev_id;
uaddr = jedec_table[index].uaddr;
/* The table has unlock addresses in _bytes_, and we try not to let
our brains explode when we see the datasheets talking about address
lines numbered from A-1 to A18. The CFI table has unlock addresses
in device-words according to the mode the device is connected in */
cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
return 1; /* ok */
}
/*
* There is a BIG problem properly ID'ing the JEDEC device and guaranteeing
* the mapped address, unlock addresses, and proper chip ID. This function
* attempts to minimize errors. It is doubtfull that this probe will ever
* be perfect - consequently there should be some module parameters that
* could be manually specified to force the chip info.
*/
static inline int jedec_match( uint32_t base,
struct map_info *map,
struct cfi_private *cfi,
const struct amd_flash_info *finfo )
{
int rc = 0; /* failure until all tests pass */
u32 mfr, id;
uint8_t uaddr;
/*
* The IDs must match. For X16 and X32 devices operating in
* a lower width ( X8 or X16 ), the device ID's are usually just
* the lower byte(s) of the larger device ID for wider mode. If
* a part is found that doesn't fit this assumption (device id for
* smaller width mode is completely unrealated to full-width mode)
* then the jedec_table[] will have to be augmented with the IDs
* for different widths.
*/
switch (cfi->device_type) {
case CFI_DEVICETYPE_X8:
mfr = (uint8_t)finfo->mfr_id;
id = (uint8_t)finfo->dev_id;
/* bjd: it seems that if we do this, we can end up
* detecting 16bit flashes as an 8bit device, even though
* there aren't.
*/
if (finfo->dev_id > 0xff) {
pr_debug("%s(): ID is not 8bit\n",
__func__);
goto match_done;
}
break;
case CFI_DEVICETYPE_X16:
mfr = (uint16_t)finfo->mfr_id;
id = (uint16_t)finfo->dev_id;
break;
case CFI_DEVICETYPE_X32:
mfr = (uint16_t)finfo->mfr_id;
id = (uint32_t)finfo->dev_id;
break;
default:
printk(KERN_WARNING
"MTD %s(): Unsupported device type %d\n",
__func__, cfi->device_type);
goto match_done;
}
if ( cfi->mfr != mfr || cfi->id != id ) {
goto match_done;
}
/* the part size must fit in the memory window */
pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
__func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
__func__, finfo->mfr_id, finfo->dev_id,
1 << finfo->dev_size );
goto match_done;
}
if (! (finfo->devtypes & cfi->device_type))
goto match_done;
uaddr = finfo->uaddr;
pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
__func__, cfi->addr_unlock1, cfi->addr_unlock2 );
if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
&& ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n",
__func__,
unlock_addrs[uaddr].addr1,
unlock_addrs[uaddr].addr2);
goto match_done;
}
/*
* Make sure the ID's disappear when the device is taken out of
* ID mode. The only time this should fail when it should succeed
* is when the ID's are written as data to the same
* addresses. For this rare and unfortunate case the chip
* cannot be probed correctly.
* FIXME - write a driver that takes all of the chip info as
* module parameters, doesn't probe but forces a load.
*/
pr_debug("MTD %s(): check ID's disappear when not in ID mode\n",
__func__ );
jedec_reset( base, map, cfi );
mfr = jedec_read_mfr( map, base, cfi );
id = jedec_read_id( map, base, cfi );
if ( mfr == cfi->mfr && id == cfi->id ) {
pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
"You might need to manually specify JEDEC parameters.\n",
__func__, cfi->mfr, cfi->id );
goto match_done;
}
/* all tests passed - mark as success */
rc = 1;
/*
* Put the device back in ID mode - only need to do this if we
* were truly frobbing a real device.
*/
pr_debug("MTD %s(): return to ID mode\n", __func__ );
if (cfi->addr_unlock1) {
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
}
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
/* FIXME - should have a delay before continuing */
match_done:
return rc;
}
static int jedec_probe_chip(struct map_info *map, __u32 base,
unsigned long *chip_map, struct cfi_private *cfi)
{
int i;
enum uaddr uaddr_idx = MTD_UADDR_NOT_SUPPORTED;
u32 probe_offset1, probe_offset2;
retry:
if (!cfi->numchips) {
uaddr_idx++;
if (MTD_UADDR_UNNECESSARY == uaddr_idx)
return 0;
cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
}
/* Make certain we aren't probing past the end of map */
if (base >= map->size) {
printk(KERN_NOTICE
"Probe at base(0x%08x) past the end of the map(0x%08lx)\n",
base, map->size -1);
return 0;
}
/* Ensure the unlock addresses we try stay inside the map */
probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
((base + probe_offset2 + map_bankwidth(map)) >= map->size))
goto retry;
/* Reset */
jedec_reset(base, map, cfi);
/* Autoselect Mode */
if(cfi->addr_unlock1) {
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
}
cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
/* FIXME - should have a delay before continuing */
if (!cfi->numchips) {
/* This is the first time we're called. Set up the CFI
stuff accordingly and return */
cfi->mfr = jedec_read_mfr(map, base, cfi);
cfi->id = jedec_read_id(map, base, cfi);
pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n",
cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
__func__, cfi->mfr, cfi->id,
cfi->addr_unlock1, cfi->addr_unlock2 );
if (!cfi_jedec_setup(map, cfi, i))
return 0;
goto ok_out;
}
}
goto retry;
} else {
uint16_t mfr;
uint16_t id;
/* Make sure it is a chip of the same manufacturer and id */
mfr = jedec_read_mfr(map, base, cfi);
id = jedec_read_id(map, base, cfi);
if ((mfr != cfi->mfr) || (id != cfi->id)) {
printk(KERN_DEBUG "%s: Found different chip or no chip at all (mfr 0x%x, id 0x%x) at 0x%x\n",
map->name, mfr, id, base);
jedec_reset(base, map, cfi);
return 0;
}
}
/* Check each previous chip locations to see if it's an alias */
for (i=0; i < (base >> cfi->chipshift); i++) {
unsigned long start;
if(!test_bit(i, chip_map)) {
continue; /* Skip location; no valid chip at this address */
}
start = i << cfi->chipshift;
if (jedec_read_mfr(map, start, cfi) == cfi->mfr &&
jedec_read_id(map, start, cfi) == cfi->id) {
/* Eep. This chip also looks like it's in autoselect mode.
Is it an alias for the new one? */
jedec_reset(start, map, cfi);
/* If the device IDs go away, it's an alias */
if (jedec_read_mfr(map, base, cfi) != cfi->mfr ||
jedec_read_id(map, base, cfi) != cfi->id) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
map->name, base, start);
return 0;
}
/* Yes, it's actually got the device IDs as data. Most
* unfortunate. Stick the new chip in read mode
* too and if it's the same, assume it's an alias. */
/* FIXME: Use other modes to do a proper check */
jedec_reset(base, map, cfi);
if (jedec_read_mfr(map, base, cfi) == cfi->mfr &&
jedec_read_id(map, base, cfi) == cfi->id) {
printk(KERN_DEBUG "%s: Found an alias at 0x%x for the chip at 0x%lx\n",
map->name, base, start);
return 0;
}
}
}
/* OK, if we got to here, then none of the previous chips appear to
be aliases for the current one. */
set_bit((base >> cfi->chipshift), chip_map); /* Update chip map */
cfi->numchips++;
ok_out:
/* Put it back into Read Mode */
jedec_reset(base, map, cfi);
printk(KERN_INFO "%s: Found %d x%d devices at 0x%x in %d-bit bank\n",
map->name, cfi_interleave(cfi), cfi->device_type*8, base,
map->bankwidth*8);
return 1;
}
static struct chip_probe jedec_chip_probe = {
.name = "JEDEC",
.probe_chip = jedec_probe_chip
};
static struct mtd_info *jedec_probe(struct map_info *map)
{
/*
* Just use the generic probe stuff to call our CFI-specific
* chip_probe routine in all the possible permutations, etc.
*/
return mtd_do_chip_probe(map, &jedec_chip_probe);
}
static struct mtd_chip_driver jedec_chipdrv = {
.probe = jedec_probe,
.name = "jedec_probe",
.module = THIS_MODULE
};
static int __init jedec_probe_init(void)
{
register_mtd_chip_driver(&jedec_chipdrv);
return 0;
}
static void __exit jedec_probe_exit(void)
{
unregister_mtd_chip_driver(&jedec_chipdrv);
}
module_init(jedec_probe_init);
module_exit(jedec_probe_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
MODULE_DESCRIPTION("Probe code for JEDEC-compliant flash chips");
| gpl-2.0 |
Twisted-Kernel/Sick-Twisted-Unified | lib/halfmd4.c | 7960 | 2028 | #include <linux/kernel.h>
#include <linux/export.h>
#include <linux/cryptohash.h>
/* F, G and H are basic MD4 functions: selection, majority, parity */
#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
#define H(x, y, z) ((x) ^ (y) ^ (z))
/*
* The generic round function. The application is so specific that
* we don't bother protecting all the arguments with parens, as is generally
* good macro practice, in favor of extra legibility.
* Rotation is separate from addition to prevent recomputation
*/
#define ROUND(f, a, b, c, d, x, s) \
(a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
#define K1 0
#define K2 013240474631UL
#define K3 015666365641UL
/*
* Basic cut-down MD4 transform. Returns only 32 bits of result.
*/
__u32 half_md4_transform(__u32 buf[4], __u32 const in[8])
{
__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
/* Round 1 */
ROUND(F, a, b, c, d, in[0] + K1, 3);
ROUND(F, d, a, b, c, in[1] + K1, 7);
ROUND(F, c, d, a, b, in[2] + K1, 11);
ROUND(F, b, c, d, a, in[3] + K1, 19);
ROUND(F, a, b, c, d, in[4] + K1, 3);
ROUND(F, d, a, b, c, in[5] + K1, 7);
ROUND(F, c, d, a, b, in[6] + K1, 11);
ROUND(F, b, c, d, a, in[7] + K1, 19);
/* Round 2 */
ROUND(G, a, b, c, d, in[1] + K2, 3);
ROUND(G, d, a, b, c, in[3] + K2, 5);
ROUND(G, c, d, a, b, in[5] + K2, 9);
ROUND(G, b, c, d, a, in[7] + K2, 13);
ROUND(G, a, b, c, d, in[0] + K2, 3);
ROUND(G, d, a, b, c, in[2] + K2, 5);
ROUND(G, c, d, a, b, in[4] + K2, 9);
ROUND(G, b, c, d, a, in[6] + K2, 13);
/* Round 3 */
ROUND(H, a, b, c, d, in[3] + K3, 3);
ROUND(H, d, a, b, c, in[7] + K3, 9);
ROUND(H, c, d, a, b, in[2] + K3, 11);
ROUND(H, b, c, d, a, in[6] + K3, 15);
ROUND(H, a, b, c, d, in[1] + K3, 3);
ROUND(H, d, a, b, c, in[5] + K3, 9);
ROUND(H, c, d, a, b, in[0] + K3, 11);
ROUND(H, b, c, d, a, in[4] + K3, 15);
buf[0] += a;
buf[1] += b;
buf[2] += c;
buf[3] += d;
return buf[1]; /* "most hashed" word */
}
EXPORT_SYMBOL(half_md4_transform);
| gpl-2.0 |
RenderBroken/msm8974_OPO-CAF_render_kernel | arch/ia64/sn/pci/tioca_provider.c | 9240 | 18857 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/tioca_provider.h>
u32 tioca_gart_found;
EXPORT_SYMBOL(tioca_gart_found); /* used by agp-sgi */
LIST_HEAD(tioca_list);
EXPORT_SYMBOL(tioca_list); /* used by agp-sgi */
static int tioca_gart_init(struct tioca_kernel *);
/**
* tioca_gart_init - Initialize SGI TIOCA GART
* @tioca_common: ptr to common prom/kernel struct identifying the
*
* If the indicated tioca has devices present, initialize its associated
* GART MMR's and kernel memory.
*/
static int
tioca_gart_init(struct tioca_kernel *tioca_kern)
{
u64 ap_reg;
u64 offset;
struct page *tmp;
struct tioca_common *tioca_common;
struct tioca __iomem *ca_base;
tioca_common = tioca_kern->ca_common;
ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
if (list_empty(tioca_kern->ca_devices))
return 0;
ap_reg = 0;
/*
* Validate aperature size
*/
switch (CA_APERATURE_SIZE >> 20) {
case 4:
ap_reg |= (0x3ff << CA_GART_AP_SIZE_SHFT); /* 4MB */
break;
case 8:
ap_reg |= (0x3fe << CA_GART_AP_SIZE_SHFT); /* 8MB */
break;
case 16:
ap_reg |= (0x3fc << CA_GART_AP_SIZE_SHFT); /* 16MB */
break;
case 32:
ap_reg |= (0x3f8 << CA_GART_AP_SIZE_SHFT); /* 32 MB */
break;
case 64:
ap_reg |= (0x3f0 << CA_GART_AP_SIZE_SHFT); /* 64 MB */
break;
case 128:
ap_reg |= (0x3e0 << CA_GART_AP_SIZE_SHFT); /* 128 MB */
break;
case 256:
ap_reg |= (0x3c0 << CA_GART_AP_SIZE_SHFT); /* 256 MB */
break;
case 512:
ap_reg |= (0x380 << CA_GART_AP_SIZE_SHFT); /* 512 MB */
break;
case 1024:
ap_reg |= (0x300 << CA_GART_AP_SIZE_SHFT); /* 1GB */
break;
case 2048:
ap_reg |= (0x200 << CA_GART_AP_SIZE_SHFT); /* 2GB */
break;
case 4096:
ap_reg |= (0x000 << CA_GART_AP_SIZE_SHFT); /* 4 GB */
break;
default:
printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE "
"0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE);
return -1;
}
/*
* Set up other aperature parameters
*/
if (PAGE_SIZE >= 16384) {
tioca_kern->ca_ap_pagesize = 16384;
ap_reg |= CA_GART_PAGE_SIZE;
} else {
tioca_kern->ca_ap_pagesize = 4096;
}
tioca_kern->ca_ap_size = CA_APERATURE_SIZE;
tioca_kern->ca_ap_bus_base = CA_APERATURE_BASE;
tioca_kern->ca_gart_entries =
tioca_kern->ca_ap_size / tioca_kern->ca_ap_pagesize;
ap_reg |= (CA_GART_AP_ENB_AGP | CA_GART_AP_ENB_PCI);
ap_reg |= tioca_kern->ca_ap_bus_base;
/*
* Allocate and set up the GART
*/
tioca_kern->ca_gart_size = tioca_kern->ca_gart_entries * sizeof(u64);
tmp =
alloc_pages_node(tioca_kern->ca_closest_node,
GFP_KERNEL | __GFP_ZERO,
get_order(tioca_kern->ca_gart_size));
if (!tmp) {
printk(KERN_ERR "%s: Could not allocate "
"%llu bytes (order %d) for GART\n",
__func__,
tioca_kern->ca_gart_size,
get_order(tioca_kern->ca_gart_size));
return -ENOMEM;
}
tioca_kern->ca_gart = page_address(tmp);
tioca_kern->ca_gart_coretalk_addr =
PHYS_TO_TIODMA(virt_to_phys(tioca_kern->ca_gart));
/*
* Compute PCI/AGP convenience fields
*/
offset = CA_PCI32_MAPPED_BASE - CA_APERATURE_BASE;
tioca_kern->ca_pciap_base = CA_PCI32_MAPPED_BASE;
tioca_kern->ca_pciap_size = CA_PCI32_MAPPED_SIZE;
tioca_kern->ca_pcigart_start = offset / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_base =
tioca_kern->ca_gart_coretalk_addr + offset;
tioca_kern->ca_pcigart =
&tioca_kern->ca_gart[tioca_kern->ca_pcigart_start];
tioca_kern->ca_pcigart_entries =
tioca_kern->ca_pciap_size / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_pcigart_pagemap =
kzalloc(tioca_kern->ca_pcigart_entries / 8, GFP_KERNEL);
if (!tioca_kern->ca_pcigart_pagemap) {
free_pages((unsigned long)tioca_kern->ca_gart,
get_order(tioca_kern->ca_gart_size));
return -1;
}
offset = CA_AGP_MAPPED_BASE - CA_APERATURE_BASE;
tioca_kern->ca_gfxap_base = CA_AGP_MAPPED_BASE;
tioca_kern->ca_gfxap_size = CA_AGP_MAPPED_SIZE;
tioca_kern->ca_gfxgart_start = offset / tioca_kern->ca_ap_pagesize;
tioca_kern->ca_gfxgart_base =
tioca_kern->ca_gart_coretalk_addr + offset;
tioca_kern->ca_gfxgart =
&tioca_kern->ca_gart[tioca_kern->ca_gfxgart_start];
tioca_kern->ca_gfxgart_entries =
tioca_kern->ca_gfxap_size / tioca_kern->ca_ap_pagesize;
/*
* various control settings:
* use agp op-combining
* use GET semantics to fetch memory
* participate in coherency domain
* DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
*/
__sn_setq_relaxed(&ca_base->ca_control1,
CA_AGPDMA_OP_ENB_COMBDELAY); /* PV895469 ? */
__sn_clrq_relaxed(&ca_base->ca_control2, CA_GART_MEM_PARAM);
__sn_setq_relaxed(&ca_base->ca_control2,
(0x2ull << CA_GART_MEM_PARAM_SHFT));
tioca_kern->ca_gart_iscoherent = 1;
__sn_clrq_relaxed(&ca_base->ca_control2,
(CA_GART_WR_PREFETCH_ENB | CA_GART_RD_PREFETCH_ENB));
/*
* Unmask GART fetch error interrupts. Clear residual errors first.
*/
writeq(CA_GART_FETCH_ERR, &ca_base->ca_int_status_alias);
writeq(CA_GART_FETCH_ERR, &ca_base->ca_mult_error_alias);
__sn_clrq_relaxed(&ca_base->ca_int_mask, CA_GART_FETCH_ERR);
/*
* Program the aperature and gart registers in TIOCA
*/
writeq(ap_reg, &ca_base->ca_gart_aperature);
writeq(tioca_kern->ca_gart_coretalk_addr|1, &ca_base->ca_gart_ptr_table);
return 0;
}
/**
* tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
* @tioca_kernel: structure representing the CA
*
* Given a CA, scan all attached functions making sure they all support
* FastWrite. If so, enable FastWrite for all functions and the CA itself.
*/
void
tioca_fastwrite_enable(struct tioca_kernel *tioca_kern)
{
int cap_ptr;
u32 reg;
struct tioca __iomem *tioca_base;
struct pci_dev *pdev;
struct tioca_common *common;
common = tioca_kern->ca_common;
/*
* Scan all vga controllers on this bus making sure they all
* support FW. If not, return.
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
continue;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
if (!cap_ptr)
return; /* no AGP CAP means no FW */
pci_read_config_dword(pdev, cap_ptr + PCI_AGP_STATUS, ®);
if (!(reg & PCI_AGP_STATUS_FW))
return; /* function doesn't support FW */
}
/*
* Set fw for all vga fn's
*/
list_for_each_entry(pdev, tioca_kern->ca_devices, bus_list) {
if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8))
continue;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
pci_read_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, ®);
reg |= PCI_AGP_COMMAND_FW;
pci_write_config_dword(pdev, cap_ptr + PCI_AGP_COMMAND, reg);
}
/*
* Set ca's fw to match
*/
tioca_base = (struct tioca __iomem*)common->ca_common.bs_base;
__sn_setq_relaxed(&tioca_base->ca_control1, CA_AGP_FW_ENABLE);
}
EXPORT_SYMBOL(tioca_fastwrite_enable); /* used by agp-sgi */
/**
* tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
* @paddr: system physical address
*
* Map @paddr into 64-bit CA bus space. No device context is necessary.
* Bits 53:0 come from the coretalk address. We just need to mask in the
* following optional bits of the 64-bit pci address:
*
* 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
* 0x2 for PIO (non-coherent)
* We will always use 0x1
* 55:55 - Swap bytes Currently unused
*/
static u64
tioca_dma_d64(unsigned long paddr)
{
dma_addr_t bus_addr;
bus_addr = PHYS_TO_TIODMA(paddr);
BUG_ON(!bus_addr);
BUG_ON(bus_addr >> 54);
/* Set upper nibble to Cache Coherent Memory op */
bus_addr |= (1UL << 60);
return bus_addr;
}
/**
* tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
* @pdev: linux pci_dev representing the function
* @paddr: system physical address
*
* Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
*
* The CA agp 48 bit direct address falls out as follows:
*
* When direct mapping AGP addresses, the 48 bit AGP address is
* constructed as follows:
*
* [47:40] - Low 8 bits of the page Node ID extracted from coretalk
* address [47:40]. The upper 8 node bits are fixed
* and come from the xxx register bits [5:0]
* [39:38] - Chiplet ID extracted from coretalk address [39:38]
* [37:00] - node offset extracted from coretalk address [37:00]
*
* Since the node id in general will be non-zero, and the chiplet id
* will always be non-zero, it follows that the device must support
* a dma mask of at least 0xffffffffff (40 bits) to target node 0
* and in general should be 0xffffffffffff (48 bits) to target nodes
* up to 255. Nodes above 255 need the support of the xxx register,
* and so a given CA can only directly target nodes in the range
* xxx - xxx+255.
*/
static u64
tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
{
struct tioca_common *tioca_common;
struct tioca __iomem *ca_base;
u64 ct_addr;
dma_addr_t bus_addr;
u32 node_upper;
u64 agp_dma_extn;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
ca_base = (struct tioca __iomem *)tioca_common->ca_common.bs_base;
ct_addr = PHYS_TO_TIODMA(paddr);
if (!ct_addr)
return 0;
bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL);
node_upper = ct_addr >> 48;
if (node_upper > 64) {
printk(KERN_ERR "%s: coretalk addr 0x%p node id out "
"of range\n", __func__, (void *)ct_addr);
return 0;
}
agp_dma_extn = __sn_readq_relaxed(&ca_base->ca_agp_dma_addr_extn);
if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) {
printk(KERN_ERR "%s: coretalk upper node (%u) "
"mismatch with ca_agp_dma_addr_extn (%llu)\n",
__func__,
node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT));
return 0;
}
return bus_addr;
}
/**
* tioca_dma_mapped - create a DMA mapping using a CA GART
* @pdev: linux pci_dev representing the function
* @paddr: host physical address to map
* @req_size: len (bytes) to map
*
* Map @paddr into CA address space using the GART mechanism. The mapped
* dma_addr_t is guaranteed to be contiguous in CA bus space.
*/
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
dma_addr_t bus_addr = 0;
struct tioca_dmamap *ca_dmamap;
void *map;
unsigned long flags;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
xio_addr = PHYS_TO_TIODMA(paddr);
if (!xio_addr)
return 0;
spin_lock_irqsave(&tioca_kern->ca_lock, flags);
/*
* allocate a map struct
*/
ca_dmamap = kzalloc(sizeof(struct tioca_dmamap), GFP_ATOMIC);
if (!ca_dmamap)
goto map_return;
/*
* Locate free entries that can hold req_size. Account for
* unaligned start/length when allocating.
*/
ps = tioca_kern->ca_ap_pagesize; /* will be power of 2 */
ps_shift = ffs(ps) - 1;
end_xio_addr = xio_addr + req_size - 1;
entries = (end_xio_addr >> ps_shift) - (xio_addr >> ps_shift) + 1;
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
if (entry >= mapsize) {
kfree(ca_dmamap);
goto map_return;
}
bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
ca_dmamap->cad_dma_addr = bus_addr;
ca_dmamap->cad_gart_size = entries;
ca_dmamap->cad_gart_entry = entry;
list_add(&ca_dmamap->cad_list, &tioca_kern->ca_dmamaps);
if (xio_addr % ps) {
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
bus_addr += xio_addr & (ps - 1);
xio_addr &= ~(ps - 1);
xio_addr += ps;
entry++;
}
while (xio_addr < end_xio_addr) {
tioca_kern->ca_pcigart[entry] = tioca_paddr_to_gart(xio_addr);
xio_addr += ps;
entry++;
}
tioca_tlbflush(tioca_kern);
map_return:
spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
return bus_addr;
}
/**
* tioca_dma_unmap - release CA mapping resources
* @pdev: linux pci_dev representing the function
* @bus_addr: bus address returned by an earlier tioca_dma_map
* @dir: mapping direction (unused)
*
* Locate mapping resources associated with @bus_addr and release them.
* For mappings created using the direct modes (64 or 48) there are no
* resources to release.
*/
static void
tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
{
int i, entry;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
struct tioca_dmamap *map;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(pdev);
unsigned long flags;
tioca_common = (struct tioca_common *)pcidev_info->pdi_pcibus_info;
tioca_kern = (struct tioca_kernel *)tioca_common->ca_kernel_private;
/* return straight away if this isn't be a mapped address */
if (bus_addr < tioca_kern->ca_pciap_base ||
bus_addr >= (tioca_kern->ca_pciap_base + tioca_kern->ca_pciap_size))
return;
spin_lock_irqsave(&tioca_kern->ca_lock, flags);
list_for_each_entry(map, &tioca_kern->ca_dmamaps, cad_list)
if (map->cad_dma_addr == bus_addr)
break;
BUG_ON(map == NULL);
entry = map->cad_gart_entry;
for (i = 0; i < map->cad_gart_size; i++, entry++) {
clear_bit(entry, tioca_kern->ca_pcigart_pagemap);
tioca_kern->ca_pcigart[entry] = 0;
}
tioca_tlbflush(tioca_kern);
list_del(&map->cad_list);
spin_unlock_irqrestore(&tioca_kern->ca_lock, flags);
kfree(map);
}
/**
* tioca_dma_map - map pages for PCI DMA
* @pdev: linux pci_dev representing the function
* @paddr: host physical address to map
* @byte_count: bytes to map
*
* This is the main wrapper for mapping host physical pages to CA PCI space.
* The mapping mode used is based on the devices dma_mask. As a last resort
* use the GART mapped mode.
*/
static u64
tioca_dma_map(struct pci_dev *pdev, unsigned long paddr, size_t byte_count, int dma_flags)
{
u64 mapaddr;
/*
* Not supported for now ...
*/
if (dma_flags & SN_DMA_MSI)
return 0;
/*
* If card is 64 or 48 bit addressable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that
* we don't use it even though CA has some support.
*/
if (pdev->dma_mask == ~0UL)
mapaddr = tioca_dma_d64(paddr);
else if (pdev->dma_mask == 0xffffffffffffUL)
mapaddr = tioca_dma_d48(pdev, paddr);
else
mapaddr = 0;
/* Last resort ... use PCI portion of CA GART */
if (mapaddr == 0)
mapaddr = tioca_dma_mapped(pdev, paddr, byte_count);
return mapaddr;
}
/**
* tioca_error_intr_handler - SGI TIO CA error interrupt handler
* @irq: unused
* @arg: pointer to tioca_common struct for the given CA
*
* Handle a CA error interrupt. Simply a wrapper around a SAL call which
* defers processing to the SGI prom.
*/
static irqreturn_t
tioca_error_intr_handler(int irq, void *arg)
{
struct tioca_common *soft = arg;
struct ia64_sal_retval ret_stuff;
u64 segment;
u64 busnum;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->ca_common.bs_persist_segment;
busnum = soft->ca_common.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
segment, busnum, 0, 0, 0, 0, 0);
return IRQ_HANDLED;
}
/**
* tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
* @prom_bussoft: Common prom/kernel struct representing the bus
*
* Replicates the tioca_common pointed to by @prom_bussoft in kernel
* space. Allocates and initializes a kernel-only area for a given CA,
* and sets up an irq for handling CA error interrupts.
*
* On successful setup, returns the kernel version of tioca_common back to
* the caller.
*/
static void *
tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
struct pci_bus *bus;
/* sanity check prom rev */
if (is_shub1() && sn_sal_rev() < 0x0406) {
printk
(KERN_ERR "%s: SGI prom rev 4.06 or greater required "
"for tioca support\n", __func__);
return NULL;
}
/*
* Allocate kernel bus soft and copy from prom.
*/
tioca_common = kmemdup(prom_bussoft, sizeof(struct tioca_common),
GFP_KERNEL);
if (!tioca_common)
return NULL;
tioca_common->ca_common.bs_base = (unsigned long)
ioremap(REGION_OFFSET(tioca_common->ca_common.bs_base),
sizeof(struct tioca_common));
/* init kernel-private area */
tioca_kern = kzalloc(sizeof(struct tioca_kernel), GFP_KERNEL);
if (!tioca_kern) {
kfree(tioca_common);
return NULL;
}
tioca_kern->ca_common = tioca_common;
spin_lock_init(&tioca_kern->ca_lock);
INIT_LIST_HEAD(&tioca_kern->ca_dmamaps);
tioca_kern->ca_closest_node =
nasid_to_cnodeid(tioca_common->ca_closest_nasid);
tioca_common->ca_kernel_private = (u64) tioca_kern;
bus = pci_find_bus(tioca_common->ca_common.bs_persist_segment,
tioca_common->ca_common.bs_persist_busnum);
BUG_ON(!bus);
tioca_kern->ca_devices = &bus->devices;
/* init GART */
if (tioca_gart_init(tioca_kern) < 0) {
kfree(tioca_kern);
kfree(tioca_common);
return NULL;
}
tioca_gart_found++;
list_add(&tioca_kern->ca_list, &tioca_list);
if (request_irq(SGI_TIOCA_ERROR,
tioca_error_intr_handler,
IRQF_SHARED, "TIOCA error", (void *)tioca_common))
printk(KERN_WARNING
"%s: Unable to get irq %d. "
"Error interrupts won't be routed for TIOCA bus %d\n",
__func__, SGI_TIOCA_ERROR,
(int)tioca_common->ca_common.bs_persist_busnum);
irq_set_handler(SGI_TIOCA_ERROR, handle_level_irq);
sn_set_err_irq_affinity(SGI_TIOCA_ERROR);
/* Setup locality information */
controller->node = tioca_kern->ca_closest_node;
return tioca_common;
}
static struct sn_pcibus_provider tioca_pci_interfaces = {
.dma_map = tioca_dma_map,
.dma_map_consistent = tioca_dma_map,
.dma_unmap = tioca_dma_unmap,
.bus_fixup = tioca_bus_fixup,
.force_interrupt = NULL,
.target_interrupt = NULL
};
/**
* tioca_init_provider - init SN PCI provider ops for TIO CA
*/
int
tioca_init_provider(void)
{
sn_pci_provider[PCIIO_ASIC_TYPE_TIOCA] = &tioca_pci_interfaces;
return 0;
}
| gpl-2.0 |
n3ocort3x/Kernel_one_x_sense | arch/parisc/kernel/asm-offsets.c | 11800 | 16486 | /*
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000 Sam Creasey <sammy@sammy.net>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/kbuild.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/uaccess.h>
#ifdef CONFIG_64BIT
#define FRAME_SIZE 128
#else
#define FRAME_SIZE 64
#endif
#define FRAME_ALIGN 64
/* Add FRAME_SIZE to the size x and align it to y. All definitions
* that use align_frame will include space for a frame.
*/
#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
int main(void)
{
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality));
DEFINE(TASK_PID, offsetof(struct task_struct, pid));
BLANK();
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
BLANK();
DEFINE(TASK_SZ, sizeof(struct task_struct));
/* TASK_SZ_ALGN includes space for a stack frame. */
DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
BLANK();
DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
/* PT_SZ_ALGN includes space for a stack frame. */
DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
BLANK();
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info));
/* THREAD_SZ_ALGN includes space for a stack frame. */
DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
BLANK();
DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
BLANK();
DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
BLANK();
return 0;
}
| gpl-2.0 |
ChaOSChriS/android_kernel_google_msm | arch/parisc/kernel/asm-offsets.c | 11800 | 16486 | /*
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
*
* Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000 Sam Creasey <sammy@sammy.net>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2001 Richard Hirst <rhirst at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/kbuild.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/pdc.h>
#include <asm/uaccess.h>
#ifdef CONFIG_64BIT
#define FRAME_SIZE 128
#else
#define FRAME_SIZE 64
#endif
#define FRAME_ALIGN 64
/* Add FRAME_SIZE to the size x and align it to y. All definitions
* that use align_frame will include space for a frame.
*/
#define align_frame(x,y) (((x)+FRAME_SIZE+(y)-1) - (((x)+(y)-1)%(y)))
int main(void)
{
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_PERSONALITY, offsetof(struct task_struct, personality));
DEFINE(TASK_PID, offsetof(struct task_struct, pid));
BLANK();
DEFINE(TASK_REGS, offsetof(struct task_struct, thread.regs));
DEFINE(TASK_PT_PSW, offsetof(struct task_struct, thread.regs.gr[ 0]));
DEFINE(TASK_PT_GR1, offsetof(struct task_struct, thread.regs.gr[ 1]));
DEFINE(TASK_PT_GR2, offsetof(struct task_struct, thread.regs.gr[ 2]));
DEFINE(TASK_PT_GR3, offsetof(struct task_struct, thread.regs.gr[ 3]));
DEFINE(TASK_PT_GR4, offsetof(struct task_struct, thread.regs.gr[ 4]));
DEFINE(TASK_PT_GR5, offsetof(struct task_struct, thread.regs.gr[ 5]));
DEFINE(TASK_PT_GR6, offsetof(struct task_struct, thread.regs.gr[ 6]));
DEFINE(TASK_PT_GR7, offsetof(struct task_struct, thread.regs.gr[ 7]));
DEFINE(TASK_PT_GR8, offsetof(struct task_struct, thread.regs.gr[ 8]));
DEFINE(TASK_PT_GR9, offsetof(struct task_struct, thread.regs.gr[ 9]));
DEFINE(TASK_PT_GR10, offsetof(struct task_struct, thread.regs.gr[10]));
DEFINE(TASK_PT_GR11, offsetof(struct task_struct, thread.regs.gr[11]));
DEFINE(TASK_PT_GR12, offsetof(struct task_struct, thread.regs.gr[12]));
DEFINE(TASK_PT_GR13, offsetof(struct task_struct, thread.regs.gr[13]));
DEFINE(TASK_PT_GR14, offsetof(struct task_struct, thread.regs.gr[14]));
DEFINE(TASK_PT_GR15, offsetof(struct task_struct, thread.regs.gr[15]));
DEFINE(TASK_PT_GR16, offsetof(struct task_struct, thread.regs.gr[16]));
DEFINE(TASK_PT_GR17, offsetof(struct task_struct, thread.regs.gr[17]));
DEFINE(TASK_PT_GR18, offsetof(struct task_struct, thread.regs.gr[18]));
DEFINE(TASK_PT_GR19, offsetof(struct task_struct, thread.regs.gr[19]));
DEFINE(TASK_PT_GR20, offsetof(struct task_struct, thread.regs.gr[20]));
DEFINE(TASK_PT_GR21, offsetof(struct task_struct, thread.regs.gr[21]));
DEFINE(TASK_PT_GR22, offsetof(struct task_struct, thread.regs.gr[22]));
DEFINE(TASK_PT_GR23, offsetof(struct task_struct, thread.regs.gr[23]));
DEFINE(TASK_PT_GR24, offsetof(struct task_struct, thread.regs.gr[24]));
DEFINE(TASK_PT_GR25, offsetof(struct task_struct, thread.regs.gr[25]));
DEFINE(TASK_PT_GR26, offsetof(struct task_struct, thread.regs.gr[26]));
DEFINE(TASK_PT_GR27, offsetof(struct task_struct, thread.regs.gr[27]));
DEFINE(TASK_PT_GR28, offsetof(struct task_struct, thread.regs.gr[28]));
DEFINE(TASK_PT_GR29, offsetof(struct task_struct, thread.regs.gr[29]));
DEFINE(TASK_PT_GR30, offsetof(struct task_struct, thread.regs.gr[30]));
DEFINE(TASK_PT_GR31, offsetof(struct task_struct, thread.regs.gr[31]));
DEFINE(TASK_PT_FR0, offsetof(struct task_struct, thread.regs.fr[ 0]));
DEFINE(TASK_PT_FR1, offsetof(struct task_struct, thread.regs.fr[ 1]));
DEFINE(TASK_PT_FR2, offsetof(struct task_struct, thread.regs.fr[ 2]));
DEFINE(TASK_PT_FR3, offsetof(struct task_struct, thread.regs.fr[ 3]));
DEFINE(TASK_PT_FR4, offsetof(struct task_struct, thread.regs.fr[ 4]));
DEFINE(TASK_PT_FR5, offsetof(struct task_struct, thread.regs.fr[ 5]));
DEFINE(TASK_PT_FR6, offsetof(struct task_struct, thread.regs.fr[ 6]));
DEFINE(TASK_PT_FR7, offsetof(struct task_struct, thread.regs.fr[ 7]));
DEFINE(TASK_PT_FR8, offsetof(struct task_struct, thread.regs.fr[ 8]));
DEFINE(TASK_PT_FR9, offsetof(struct task_struct, thread.regs.fr[ 9]));
DEFINE(TASK_PT_FR10, offsetof(struct task_struct, thread.regs.fr[10]));
DEFINE(TASK_PT_FR11, offsetof(struct task_struct, thread.regs.fr[11]));
DEFINE(TASK_PT_FR12, offsetof(struct task_struct, thread.regs.fr[12]));
DEFINE(TASK_PT_FR13, offsetof(struct task_struct, thread.regs.fr[13]));
DEFINE(TASK_PT_FR14, offsetof(struct task_struct, thread.regs.fr[14]));
DEFINE(TASK_PT_FR15, offsetof(struct task_struct, thread.regs.fr[15]));
DEFINE(TASK_PT_FR16, offsetof(struct task_struct, thread.regs.fr[16]));
DEFINE(TASK_PT_FR17, offsetof(struct task_struct, thread.regs.fr[17]));
DEFINE(TASK_PT_FR18, offsetof(struct task_struct, thread.regs.fr[18]));
DEFINE(TASK_PT_FR19, offsetof(struct task_struct, thread.regs.fr[19]));
DEFINE(TASK_PT_FR20, offsetof(struct task_struct, thread.regs.fr[20]));
DEFINE(TASK_PT_FR21, offsetof(struct task_struct, thread.regs.fr[21]));
DEFINE(TASK_PT_FR22, offsetof(struct task_struct, thread.regs.fr[22]));
DEFINE(TASK_PT_FR23, offsetof(struct task_struct, thread.regs.fr[23]));
DEFINE(TASK_PT_FR24, offsetof(struct task_struct, thread.regs.fr[24]));
DEFINE(TASK_PT_FR25, offsetof(struct task_struct, thread.regs.fr[25]));
DEFINE(TASK_PT_FR26, offsetof(struct task_struct, thread.regs.fr[26]));
DEFINE(TASK_PT_FR27, offsetof(struct task_struct, thread.regs.fr[27]));
DEFINE(TASK_PT_FR28, offsetof(struct task_struct, thread.regs.fr[28]));
DEFINE(TASK_PT_FR29, offsetof(struct task_struct, thread.regs.fr[29]));
DEFINE(TASK_PT_FR30, offsetof(struct task_struct, thread.regs.fr[30]));
DEFINE(TASK_PT_FR31, offsetof(struct task_struct, thread.regs.fr[31]));
DEFINE(TASK_PT_SR0, offsetof(struct task_struct, thread.regs.sr[ 0]));
DEFINE(TASK_PT_SR1, offsetof(struct task_struct, thread.regs.sr[ 1]));
DEFINE(TASK_PT_SR2, offsetof(struct task_struct, thread.regs.sr[ 2]));
DEFINE(TASK_PT_SR3, offsetof(struct task_struct, thread.regs.sr[ 3]));
DEFINE(TASK_PT_SR4, offsetof(struct task_struct, thread.regs.sr[ 4]));
DEFINE(TASK_PT_SR5, offsetof(struct task_struct, thread.regs.sr[ 5]));
DEFINE(TASK_PT_SR6, offsetof(struct task_struct, thread.regs.sr[ 6]));
DEFINE(TASK_PT_SR7, offsetof(struct task_struct, thread.regs.sr[ 7]));
DEFINE(TASK_PT_IASQ0, offsetof(struct task_struct, thread.regs.iasq[0]));
DEFINE(TASK_PT_IASQ1, offsetof(struct task_struct, thread.regs.iasq[1]));
DEFINE(TASK_PT_IAOQ0, offsetof(struct task_struct, thread.regs.iaoq[0]));
DEFINE(TASK_PT_IAOQ1, offsetof(struct task_struct, thread.regs.iaoq[1]));
DEFINE(TASK_PT_CR27, offsetof(struct task_struct, thread.regs.cr27));
DEFINE(TASK_PT_ORIG_R28, offsetof(struct task_struct, thread.regs.orig_r28));
DEFINE(TASK_PT_KSP, offsetof(struct task_struct, thread.regs.ksp));
DEFINE(TASK_PT_KPC, offsetof(struct task_struct, thread.regs.kpc));
DEFINE(TASK_PT_SAR, offsetof(struct task_struct, thread.regs.sar));
DEFINE(TASK_PT_IIR, offsetof(struct task_struct, thread.regs.iir));
DEFINE(TASK_PT_ISR, offsetof(struct task_struct, thread.regs.isr));
DEFINE(TASK_PT_IOR, offsetof(struct task_struct, thread.regs.ior));
BLANK();
DEFINE(TASK_SZ, sizeof(struct task_struct));
/* TASK_SZ_ALGN includes space for a stack frame. */
DEFINE(TASK_SZ_ALGN, align_frame(sizeof(struct task_struct), FRAME_ALIGN));
BLANK();
DEFINE(PT_PSW, offsetof(struct pt_regs, gr[ 0]));
DEFINE(PT_GR1, offsetof(struct pt_regs, gr[ 1]));
DEFINE(PT_GR2, offsetof(struct pt_regs, gr[ 2]));
DEFINE(PT_GR3, offsetof(struct pt_regs, gr[ 3]));
DEFINE(PT_GR4, offsetof(struct pt_regs, gr[ 4]));
DEFINE(PT_GR5, offsetof(struct pt_regs, gr[ 5]));
DEFINE(PT_GR6, offsetof(struct pt_regs, gr[ 6]));
DEFINE(PT_GR7, offsetof(struct pt_regs, gr[ 7]));
DEFINE(PT_GR8, offsetof(struct pt_regs, gr[ 8]));
DEFINE(PT_GR9, offsetof(struct pt_regs, gr[ 9]));
DEFINE(PT_GR10, offsetof(struct pt_regs, gr[10]));
DEFINE(PT_GR11, offsetof(struct pt_regs, gr[11]));
DEFINE(PT_GR12, offsetof(struct pt_regs, gr[12]));
DEFINE(PT_GR13, offsetof(struct pt_regs, gr[13]));
DEFINE(PT_GR14, offsetof(struct pt_regs, gr[14]));
DEFINE(PT_GR15, offsetof(struct pt_regs, gr[15]));
DEFINE(PT_GR16, offsetof(struct pt_regs, gr[16]));
DEFINE(PT_GR17, offsetof(struct pt_regs, gr[17]));
DEFINE(PT_GR18, offsetof(struct pt_regs, gr[18]));
DEFINE(PT_GR19, offsetof(struct pt_regs, gr[19]));
DEFINE(PT_GR20, offsetof(struct pt_regs, gr[20]));
DEFINE(PT_GR21, offsetof(struct pt_regs, gr[21]));
DEFINE(PT_GR22, offsetof(struct pt_regs, gr[22]));
DEFINE(PT_GR23, offsetof(struct pt_regs, gr[23]));
DEFINE(PT_GR24, offsetof(struct pt_regs, gr[24]));
DEFINE(PT_GR25, offsetof(struct pt_regs, gr[25]));
DEFINE(PT_GR26, offsetof(struct pt_regs, gr[26]));
DEFINE(PT_GR27, offsetof(struct pt_regs, gr[27]));
DEFINE(PT_GR28, offsetof(struct pt_regs, gr[28]));
DEFINE(PT_GR29, offsetof(struct pt_regs, gr[29]));
DEFINE(PT_GR30, offsetof(struct pt_regs, gr[30]));
DEFINE(PT_GR31, offsetof(struct pt_regs, gr[31]));
DEFINE(PT_FR0, offsetof(struct pt_regs, fr[ 0]));
DEFINE(PT_FR1, offsetof(struct pt_regs, fr[ 1]));
DEFINE(PT_FR2, offsetof(struct pt_regs, fr[ 2]));
DEFINE(PT_FR3, offsetof(struct pt_regs, fr[ 3]));
DEFINE(PT_FR4, offsetof(struct pt_regs, fr[ 4]));
DEFINE(PT_FR5, offsetof(struct pt_regs, fr[ 5]));
DEFINE(PT_FR6, offsetof(struct pt_regs, fr[ 6]));
DEFINE(PT_FR7, offsetof(struct pt_regs, fr[ 7]));
DEFINE(PT_FR8, offsetof(struct pt_regs, fr[ 8]));
DEFINE(PT_FR9, offsetof(struct pt_regs, fr[ 9]));
DEFINE(PT_FR10, offsetof(struct pt_regs, fr[10]));
DEFINE(PT_FR11, offsetof(struct pt_regs, fr[11]));
DEFINE(PT_FR12, offsetof(struct pt_regs, fr[12]));
DEFINE(PT_FR13, offsetof(struct pt_regs, fr[13]));
DEFINE(PT_FR14, offsetof(struct pt_regs, fr[14]));
DEFINE(PT_FR15, offsetof(struct pt_regs, fr[15]));
DEFINE(PT_FR16, offsetof(struct pt_regs, fr[16]));
DEFINE(PT_FR17, offsetof(struct pt_regs, fr[17]));
DEFINE(PT_FR18, offsetof(struct pt_regs, fr[18]));
DEFINE(PT_FR19, offsetof(struct pt_regs, fr[19]));
DEFINE(PT_FR20, offsetof(struct pt_regs, fr[20]));
DEFINE(PT_FR21, offsetof(struct pt_regs, fr[21]));
DEFINE(PT_FR22, offsetof(struct pt_regs, fr[22]));
DEFINE(PT_FR23, offsetof(struct pt_regs, fr[23]));
DEFINE(PT_FR24, offsetof(struct pt_regs, fr[24]));
DEFINE(PT_FR25, offsetof(struct pt_regs, fr[25]));
DEFINE(PT_FR26, offsetof(struct pt_regs, fr[26]));
DEFINE(PT_FR27, offsetof(struct pt_regs, fr[27]));
DEFINE(PT_FR28, offsetof(struct pt_regs, fr[28]));
DEFINE(PT_FR29, offsetof(struct pt_regs, fr[29]));
DEFINE(PT_FR30, offsetof(struct pt_regs, fr[30]));
DEFINE(PT_FR31, offsetof(struct pt_regs, fr[31]));
DEFINE(PT_SR0, offsetof(struct pt_regs, sr[ 0]));
DEFINE(PT_SR1, offsetof(struct pt_regs, sr[ 1]));
DEFINE(PT_SR2, offsetof(struct pt_regs, sr[ 2]));
DEFINE(PT_SR3, offsetof(struct pt_regs, sr[ 3]));
DEFINE(PT_SR4, offsetof(struct pt_regs, sr[ 4]));
DEFINE(PT_SR5, offsetof(struct pt_regs, sr[ 5]));
DEFINE(PT_SR6, offsetof(struct pt_regs, sr[ 6]));
DEFINE(PT_SR7, offsetof(struct pt_regs, sr[ 7]));
DEFINE(PT_IASQ0, offsetof(struct pt_regs, iasq[0]));
DEFINE(PT_IASQ1, offsetof(struct pt_regs, iasq[1]));
DEFINE(PT_IAOQ0, offsetof(struct pt_regs, iaoq[0]));
DEFINE(PT_IAOQ1, offsetof(struct pt_regs, iaoq[1]));
DEFINE(PT_CR27, offsetof(struct pt_regs, cr27));
DEFINE(PT_ORIG_R28, offsetof(struct pt_regs, orig_r28));
DEFINE(PT_KSP, offsetof(struct pt_regs, ksp));
DEFINE(PT_KPC, offsetof(struct pt_regs, kpc));
DEFINE(PT_SAR, offsetof(struct pt_regs, sar));
DEFINE(PT_IIR, offsetof(struct pt_regs, iir));
DEFINE(PT_ISR, offsetof(struct pt_regs, isr));
DEFINE(PT_IOR, offsetof(struct pt_regs, ior));
DEFINE(PT_SIZE, sizeof(struct pt_regs));
/* PT_SZ_ALGN includes space for a stack frame. */
DEFINE(PT_SZ_ALGN, align_frame(sizeof(struct pt_regs), FRAME_ALIGN));
BLANK();
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(THREAD_SZ, sizeof(struct thread_info));
/* THREAD_SZ_ALGN includes space for a stack frame. */
DEFINE(THREAD_SZ_ALGN, align_frame(sizeof(struct thread_info), FRAME_ALIGN));
BLANK();
DEFINE(ICACHE_BASE, offsetof(struct pdc_cache_info, ic_base));
DEFINE(ICACHE_STRIDE, offsetof(struct pdc_cache_info, ic_stride));
DEFINE(ICACHE_COUNT, offsetof(struct pdc_cache_info, ic_count));
DEFINE(ICACHE_LOOP, offsetof(struct pdc_cache_info, ic_loop));
DEFINE(DCACHE_BASE, offsetof(struct pdc_cache_info, dc_base));
DEFINE(DCACHE_STRIDE, offsetof(struct pdc_cache_info, dc_stride));
DEFINE(DCACHE_COUNT, offsetof(struct pdc_cache_info, dc_count));
DEFINE(DCACHE_LOOP, offsetof(struct pdc_cache_info, dc_loop));
DEFINE(ITLB_SID_BASE, offsetof(struct pdc_cache_info, it_sp_base));
DEFINE(ITLB_SID_STRIDE, offsetof(struct pdc_cache_info, it_sp_stride));
DEFINE(ITLB_SID_COUNT, offsetof(struct pdc_cache_info, it_sp_count));
DEFINE(ITLB_OFF_BASE, offsetof(struct pdc_cache_info, it_off_base));
DEFINE(ITLB_OFF_STRIDE, offsetof(struct pdc_cache_info, it_off_stride));
DEFINE(ITLB_OFF_COUNT, offsetof(struct pdc_cache_info, it_off_count));
DEFINE(ITLB_LOOP, offsetof(struct pdc_cache_info, it_loop));
DEFINE(DTLB_SID_BASE, offsetof(struct pdc_cache_info, dt_sp_base));
DEFINE(DTLB_SID_STRIDE, offsetof(struct pdc_cache_info, dt_sp_stride));
DEFINE(DTLB_SID_COUNT, offsetof(struct pdc_cache_info, dt_sp_count));
DEFINE(DTLB_OFF_BASE, offsetof(struct pdc_cache_info, dt_off_base));
DEFINE(DTLB_OFF_STRIDE, offsetof(struct pdc_cache_info, dt_off_stride));
DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count));
DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop));
BLANK();
DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP);
DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP);
BLANK();
DEFINE(ASM_PMD_SHIFT, PMD_SHIFT);
DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT);
DEFINE(ASM_BITS_PER_PGD, BITS_PER_PGD);
DEFINE(ASM_BITS_PER_PMD, BITS_PER_PMD);
DEFINE(ASM_BITS_PER_PTE, BITS_PER_PTE);
DEFINE(ASM_PGD_PMD_OFFSET, -(PAGE_SIZE << PGD_ORDER));
DEFINE(ASM_PMD_ENTRY, ((PAGE_OFFSET & PMD_MASK) >> PMD_SHIFT));
DEFINE(ASM_PGD_ENTRY, PAGE_OFFSET >> PGDIR_SHIFT);
DEFINE(ASM_PGD_ENTRY_SIZE, PGD_ENTRY_SIZE);
DEFINE(ASM_PMD_ENTRY_SIZE, PMD_ENTRY_SIZE);
DEFINE(ASM_PTE_ENTRY_SIZE, PTE_ENTRY_SIZE);
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
BLANK();
DEFINE(ASM_PDC_RESULT_SIZE, NUM_PDC_RESULT * sizeof(unsigned long));
BLANK();
return 0;
}
| gpl-2.0 |
bachtk/linux | drivers/input/joystick/db9.c | 13336 | 21279 | /*
* Copyright (c) 1999-2001 Vojtech Pavlik
*
* Based on the work of:
* Andree Borrmann Mats Sjövall
*/
/*
* Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/input.h>
#include <linux/mutex.h>
#include <linux/slab.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver");
MODULE_LICENSE("GPL");
struct db9_config {
int args[2];
unsigned int nargs;
};
#define DB9_MAX_PORTS 3
static struct db9_config db9_cfg[DB9_MAX_PORTS] __initdata;
module_param_array_named(dev, db9_cfg[0].args, int, &db9_cfg[0].nargs, 0);
MODULE_PARM_DESC(dev, "Describes first attached device (<parport#>,<type>)");
module_param_array_named(dev2, db9_cfg[1].args, int, &db9_cfg[1].nargs, 0);
MODULE_PARM_DESC(dev2, "Describes second attached device (<parport#>,<type>)");
module_param_array_named(dev3, db9_cfg[2].args, int, &db9_cfg[2].nargs, 0);
MODULE_PARM_DESC(dev3, "Describes third attached device (<parport#>,<type>)");
#define DB9_ARG_PARPORT 0
#define DB9_ARG_MODE 1
#define DB9_MULTI_STICK 0x01
#define DB9_MULTI2_STICK 0x02
#define DB9_GENESIS_PAD 0x03
#define DB9_GENESIS5_PAD 0x05
#define DB9_GENESIS6_PAD 0x06
#define DB9_SATURN_PAD 0x07
#define DB9_MULTI_0802 0x08
#define DB9_MULTI_0802_2 0x09
#define DB9_CD32_PAD 0x0A
#define DB9_SATURN_DPP 0x0B
#define DB9_SATURN_DPP_2 0x0C
#define DB9_MAX_PAD 0x0D
#define DB9_UP 0x01
#define DB9_DOWN 0x02
#define DB9_LEFT 0x04
#define DB9_RIGHT 0x08
#define DB9_FIRE1 0x10
#define DB9_FIRE2 0x20
#define DB9_FIRE3 0x40
#define DB9_FIRE4 0x80
#define DB9_NORMAL 0x0a
#define DB9_NOSELECT 0x08
#define DB9_GENESIS6_DELAY 14
#define DB9_REFRESH_TIME HZ/100
#define DB9_MAX_DEVICES 2
struct db9_mode_data {
const char *name;
const short *buttons;
int n_buttons;
int n_pads;
int n_axis;
int bidirectional;
int reverse;
};
struct db9 {
struct input_dev *dev[DB9_MAX_DEVICES];
struct timer_list timer;
struct pardevice *pd;
int mode;
int used;
struct mutex mutex;
char phys[DB9_MAX_DEVICES][32];
};
static struct db9 *db9_base[3];
static const short db9_multi_btn[] = { BTN_TRIGGER, BTN_THUMB };
static const short db9_genesis_btn[] = { BTN_START, BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_MODE };
static const short db9_cd32_btn[] = { BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_TL, BTN_TR, BTN_START };
static const short db9_abs[] = { ABS_X, ABS_Y, ABS_RX, ABS_RY, ABS_RZ, ABS_Z, ABS_HAT0X, ABS_HAT0Y, ABS_HAT1X, ABS_HAT1Y };
static const struct db9_mode_data db9_modes[] = {
{ NULL, NULL, 0, 0, 0, 0, 0 },
{ "Multisystem joystick", db9_multi_btn, 1, 1, 2, 1, 1 },
{ "Multisystem joystick (2 fire)", db9_multi_btn, 2, 1, 2, 1, 1 },
{ "Genesis pad", db9_genesis_btn, 4, 1, 2, 1, 1 },
{ NULL, NULL, 0, 0, 0, 0, 0 },
{ "Genesis 5 pad", db9_genesis_btn, 6, 1, 2, 1, 1 },
{ "Genesis 6 pad", db9_genesis_btn, 8, 1, 2, 1, 1 },
{ "Saturn pad", db9_cd32_btn, 9, 6, 7, 0, 1 },
{ "Multisystem (0.8.0.2) joystick", db9_multi_btn, 1, 1, 2, 1, 1 },
{ "Multisystem (0.8.0.2-dual) joystick", db9_multi_btn, 1, 2, 2, 1, 1 },
{ "Amiga CD-32 pad", db9_cd32_btn, 7, 1, 2, 1, 1 },
{ "Saturn dpp", db9_cd32_btn, 9, 6, 7, 0, 0 },
{ "Saturn dpp dual", db9_cd32_btn, 9, 12, 7, 0, 0 },
};
/*
* Saturn controllers
*/
#define DB9_SATURN_DELAY 300
static const int db9_saturn_byte[] = { 1, 1, 1, 2, 2, 2, 2, 2, 1 };
static const unsigned char db9_saturn_mask[] = { 0x04, 0x01, 0x02, 0x40, 0x20, 0x10, 0x08, 0x80, 0x08 };
/*
* db9_saturn_write_sub() writes 2 bit data.
*/
static void db9_saturn_write_sub(struct parport *port, int type, unsigned char data, int powered, int pwr_sub)
{
unsigned char c;
switch (type) {
case 1: /* DPP1 */
c = 0x80 | 0x30 | (powered ? 0x08 : 0) | (pwr_sub ? 0x04 : 0) | data;
parport_write_data(port, c);
break;
case 2: /* DPP2 */
c = 0x40 | data << 4 | (powered ? 0x08 : 0) | (pwr_sub ? 0x04 : 0) | 0x03;
parport_write_data(port, c);
break;
case 0: /* DB9 */
c = ((((data & 2) ? 2 : 0) | ((data & 1) ? 4 : 0)) ^ 0x02) | !powered;
parport_write_control(port, c);
break;
}
}
/*
* gc_saturn_read_sub() reads 4 bit data.
*/
static unsigned char db9_saturn_read_sub(struct parport *port, int type)
{
unsigned char data;
if (type) {
/* DPP */
data = parport_read_status(port) ^ 0x80;
return (data & 0x80 ? 1 : 0) | (data & 0x40 ? 2 : 0)
| (data & 0x20 ? 4 : 0) | (data & 0x10 ? 8 : 0);
} else {
/* DB9 */
data = parport_read_data(port) & 0x0f;
return (data & 0x8 ? 1 : 0) | (data & 0x4 ? 2 : 0)
| (data & 0x2 ? 4 : 0) | (data & 0x1 ? 8 : 0);
}
}
/*
* db9_saturn_read_analog() sends clock and reads 8 bit data.
*/
static unsigned char db9_saturn_read_analog(struct parport *port, int type, int powered)
{
unsigned char data;
db9_saturn_write_sub(port, type, 0, powered, 0);
udelay(DB9_SATURN_DELAY);
data = db9_saturn_read_sub(port, type) << 4;
db9_saturn_write_sub(port, type, 2, powered, 0);
udelay(DB9_SATURN_DELAY);
data |= db9_saturn_read_sub(port, type);
return data;
}
/*
* db9_saturn_read_packet() reads whole saturn packet at connector
* and returns device identifier code.
*/
static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char *data, int type, int powered)
{
int i, j;
unsigned char tmp;
db9_saturn_write_sub(port, type, 3, powered, 0);
data[0] = db9_saturn_read_sub(port, type);
switch (data[0] & 0x0f) {
case 0xf:
/* 1111 no pad */
return data[0] = 0xff;
case 0x4: case 0x4 | 0x8:
/* ?100 : digital controller */
db9_saturn_write_sub(port, type, 0, powered, 1);
data[2] = db9_saturn_read_sub(port, type) << 4;
db9_saturn_write_sub(port, type, 2, powered, 1);
data[1] = db9_saturn_read_sub(port, type) << 4;
db9_saturn_write_sub(port, type, 1, powered, 1);
data[1] |= db9_saturn_read_sub(port, type);
db9_saturn_write_sub(port, type, 3, powered, 1);
/* data[2] |= db9_saturn_read_sub(port, type); */
data[2] |= data[0];
return data[0] = 0x02;
case 0x1:
/* 0001 : analog controller or multitap */
db9_saturn_write_sub(port, type, 2, powered, 0);
udelay(DB9_SATURN_DELAY);
data[0] = db9_saturn_read_analog(port, type, powered);
if (data[0] != 0x41) {
/* read analog controller */
for (i = 0; i < (data[0] & 0x0f); i++)
data[i + 1] = db9_saturn_read_analog(port, type, powered);
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0];
} else {
/* read multitap */
if (db9_saturn_read_analog(port, type, powered) != 0x60)
return data[0] = 0xff;
for (i = 0; i < 60; i += 10) {
data[i] = db9_saturn_read_analog(port, type, powered);
if (data[i] != 0xff)
/* read each pad */
for (j = 0; j < (data[i] & 0x0f); j++)
data[i + j + 1] = db9_saturn_read_analog(port, type, powered);
}
db9_saturn_write_sub(port, type, 3, powered, 0);
return 0x41;
}
case 0x0:
/* 0000 : mouse */
db9_saturn_write_sub(port, type, 2, powered, 0);
udelay(DB9_SATURN_DELAY);
tmp = db9_saturn_read_analog(port, type, powered);
if (tmp == 0xff) {
for (i = 0; i < 3; i++)
data[i + 1] = db9_saturn_read_analog(port, type, powered);
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0] = 0xe3;
}
default:
return data[0];
}
}
/*
* db9_saturn_report() analyzes packet and reports.
*/
static int db9_saturn_report(unsigned char id, unsigned char data[60], struct input_dev *devs[], int n, int max_pads)
{
struct input_dev *dev;
int tmp, i, j;
tmp = (id == 0x41) ? 60 : 10;
for (j = 0; j < tmp && n < max_pads; j += 10, n++) {
dev = devs[n];
switch (data[j]) {
case 0x16: /* multi controller (analog 4 axis) */
input_report_abs(dev, db9_abs[5], data[j + 6]);
case 0x15: /* mission stick (analog 3 axis) */
input_report_abs(dev, db9_abs[3], data[j + 4]);
input_report_abs(dev, db9_abs[4], data[j + 5]);
case 0x13: /* racing controller (analog 1 axis) */
input_report_abs(dev, db9_abs[2], data[j + 3]);
case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */
case 0x02: /* digital pad (digital 2 axis + buttons) */
input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
input_report_abs(dev, db9_abs[1], !(data[j + 1] & 32) - !(data[j + 1] & 16));
for (i = 0; i < 9; i++)
input_report_key(dev, db9_cd32_btn[i], ~data[j + db9_saturn_byte[i]] & db9_saturn_mask[i]);
break;
case 0x19: /* mission stick x2 (analog 6 axis + buttons) */
input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
input_report_abs(dev, db9_abs[1], !(data[j + 1] & 32) - !(data[j + 1] & 16));
for (i = 0; i < 9; i++)
input_report_key(dev, db9_cd32_btn[i], ~data[j + db9_saturn_byte[i]] & db9_saturn_mask[i]);
input_report_abs(dev, db9_abs[2], data[j + 3]);
input_report_abs(dev, db9_abs[3], data[j + 4]);
input_report_abs(dev, db9_abs[4], data[j + 5]);
/*
input_report_abs(dev, db9_abs[8], (data[j + 6] & 128 ? 0 : 1) - (data[j + 6] & 64 ? 0 : 1));
input_report_abs(dev, db9_abs[9], (data[j + 6] & 32 ? 0 : 1) - (data[j + 6] & 16 ? 0 : 1));
*/
input_report_abs(dev, db9_abs[6], data[j + 7]);
input_report_abs(dev, db9_abs[7], data[j + 8]);
input_report_abs(dev, db9_abs[5], data[j + 9]);
break;
case 0xd3: /* sankyo ff (analog 1 axis + stop btn) */
input_report_key(dev, BTN_A, data[j + 3] & 0x80);
input_report_abs(dev, db9_abs[2], data[j + 3] & 0x7f);
break;
case 0xe3: /* shuttle mouse (analog 2 axis + buttons. signed value) */
input_report_key(dev, BTN_START, data[j + 1] & 0x08);
input_report_key(dev, BTN_A, data[j + 1] & 0x04);
input_report_key(dev, BTN_C, data[j + 1] & 0x02);
input_report_key(dev, BTN_B, data[j + 1] & 0x01);
input_report_abs(dev, db9_abs[2], data[j + 2] ^ 0x80);
input_report_abs(dev, db9_abs[3], (0xff-(data[j + 3] ^ 0x80))+1); /* */
break;
case 0xff:
default: /* no pad */
input_report_abs(dev, db9_abs[0], 0);
input_report_abs(dev, db9_abs[1], 0);
for (i = 0; i < 9; i++)
input_report_key(dev, db9_cd32_btn[i], 0);
break;
}
}
return n;
}
static int db9_saturn(int mode, struct parport *port, struct input_dev *devs[])
{
unsigned char id, data[60];
int type, n, max_pads;
int tmp, i;
switch (mode) {
case DB9_SATURN_PAD:
type = 0;
n = 1;
break;
case DB9_SATURN_DPP:
type = 1;
n = 1;
break;
case DB9_SATURN_DPP_2:
type = 1;
n = 2;
break;
default:
return -1;
}
max_pads = min(db9_modes[mode].n_pads, DB9_MAX_DEVICES);
for (tmp = 0, i = 0; i < n; i++) {
id = db9_saturn_read_packet(port, data, type + i, 1);
tmp = db9_saturn_report(id, data, devs, tmp, max_pads);
}
return 0;
}
static void db9_timer(unsigned long private)
{
struct db9 *db9 = (void *) private;
struct parport *port = db9->pd->port;
struct input_dev *dev = db9->dev[0];
struct input_dev *dev2 = db9->dev[1];
int data, i;
switch (db9->mode) {
case DB9_MULTI_0802_2:
data = parport_read_data(port) >> 3;
input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1);
case DB9_MULTI_0802:
data = parport_read_status(port) >> 3;
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_TRIGGER, data & DB9_FIRE1);
break;
case DB9_MULTI_STICK:
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_TRIGGER, ~data & DB9_FIRE1);
break;
case DB9_MULTI2_STICK:
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_TRIGGER, ~data & DB9_FIRE1);
input_report_key(dev, BTN_THUMB, ~data & DB9_FIRE2);
break;
case DB9_GENESIS_PAD:
parport_write_control(port, DB9_NOSELECT);
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_B, ~data & DB9_FIRE1);
input_report_key(dev, BTN_C, ~data & DB9_FIRE2);
parport_write_control(port, DB9_NORMAL);
data = parport_read_data(port);
input_report_key(dev, BTN_A, ~data & DB9_FIRE1);
input_report_key(dev, BTN_START, ~data & DB9_FIRE2);
break;
case DB9_GENESIS5_PAD:
parport_write_control(port, DB9_NOSELECT);
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_B, ~data & DB9_FIRE1);
input_report_key(dev, BTN_C, ~data & DB9_FIRE2);
parport_write_control(port, DB9_NORMAL);
data = parport_read_data(port);
input_report_key(dev, BTN_A, ~data & DB9_FIRE1);
input_report_key(dev, BTN_X, ~data & DB9_FIRE2);
input_report_key(dev, BTN_Y, ~data & DB9_LEFT);
input_report_key(dev, BTN_START, ~data & DB9_RIGHT);
break;
case DB9_GENESIS6_PAD:
parport_write_control(port, DB9_NOSELECT); /* 1 */
udelay(DB9_GENESIS6_DELAY);
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_B, ~data & DB9_FIRE1);
input_report_key(dev, BTN_C, ~data & DB9_FIRE2);
parport_write_control(port, DB9_NORMAL);
udelay(DB9_GENESIS6_DELAY);
data = parport_read_data(port);
input_report_key(dev, BTN_A, ~data & DB9_FIRE1);
input_report_key(dev, BTN_START, ~data & DB9_FIRE2);
parport_write_control(port, DB9_NOSELECT); /* 2 */
udelay(DB9_GENESIS6_DELAY);
parport_write_control(port, DB9_NORMAL);
udelay(DB9_GENESIS6_DELAY);
parport_write_control(port, DB9_NOSELECT); /* 3 */
udelay(DB9_GENESIS6_DELAY);
data=parport_read_data(port);
input_report_key(dev, BTN_X, ~data & DB9_LEFT);
input_report_key(dev, BTN_Y, ~data & DB9_DOWN);
input_report_key(dev, BTN_Z, ~data & DB9_UP);
input_report_key(dev, BTN_MODE, ~data & DB9_RIGHT);
parport_write_control(port, DB9_NORMAL);
udelay(DB9_GENESIS6_DELAY);
parport_write_control(port, DB9_NOSELECT); /* 4 */
udelay(DB9_GENESIS6_DELAY);
parport_write_control(port, DB9_NORMAL);
break;
case DB9_SATURN_PAD:
case DB9_SATURN_DPP:
case DB9_SATURN_DPP_2:
db9_saturn(db9->mode, port, db9->dev);
break;
case DB9_CD32_PAD:
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
parport_write_control(port, 0x0a);
for (i = 0; i < 7; i++) {
data = parport_read_data(port);
parport_write_control(port, 0x02);
parport_write_control(port, 0x0a);
input_report_key(dev, db9_cd32_btn[i], ~data & DB9_FIRE2);
}
parport_write_control(port, 0x00);
break;
}
input_sync(dev);
mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME);
}
static int db9_open(struct input_dev *dev)
{
struct db9 *db9 = input_get_drvdata(dev);
struct parport *port = db9->pd->port;
int err;
err = mutex_lock_interruptible(&db9->mutex);
if (err)
return err;
if (!db9->used++) {
parport_claim(db9->pd);
parport_write_data(port, 0xff);
if (db9_modes[db9->mode].reverse) {
parport_data_reverse(port);
parport_write_control(port, DB9_NORMAL);
}
mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME);
}
mutex_unlock(&db9->mutex);
return 0;
}
static void db9_close(struct input_dev *dev)
{
struct db9 *db9 = input_get_drvdata(dev);
struct parport *port = db9->pd->port;
mutex_lock(&db9->mutex);
if (!--db9->used) {
del_timer_sync(&db9->timer);
parport_write_control(port, 0x00);
parport_data_forward(port);
parport_release(db9->pd);
}
mutex_unlock(&db9->mutex);
}
static struct db9 __init *db9_probe(int parport, int mode)
{
struct db9 *db9;
const struct db9_mode_data *db9_mode;
struct parport *pp;
struct pardevice *pd;
struct input_dev *input_dev;
int i, j;
int err;
if (mode < 1 || mode >= DB9_MAX_PAD || !db9_modes[mode].n_buttons) {
printk(KERN_ERR "db9.c: Bad device type %d\n", mode);
err = -EINVAL;
goto err_out;
}
db9_mode = &db9_modes[mode];
pp = parport_find_number(parport);
if (!pp) {
printk(KERN_ERR "db9.c: no such parport\n");
err = -ENODEV;
goto err_out;
}
if (db9_mode->bidirectional && !(pp->modes & PARPORT_MODE_TRISTATE)) {
printk(KERN_ERR "db9.c: specified parport is not bidirectional\n");
err = -EINVAL;
goto err_put_pp;
}
pd = parport_register_device(pp, "db9", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
if (!pd) {
printk(KERN_ERR "db9.c: parport busy already - lp.o loaded?\n");
err = -EBUSY;
goto err_put_pp;
}
db9 = kzalloc(sizeof(struct db9), GFP_KERNEL);
if (!db9) {
printk(KERN_ERR "db9.c: Not enough memory\n");
err = -ENOMEM;
goto err_unreg_pardev;
}
mutex_init(&db9->mutex);
db9->pd = pd;
db9->mode = mode;
init_timer(&db9->timer);
db9->timer.data = (long) db9;
db9->timer.function = db9_timer;
for (i = 0; i < (min(db9_mode->n_pads, DB9_MAX_DEVICES)); i++) {
db9->dev[i] = input_dev = input_allocate_device();
if (!input_dev) {
printk(KERN_ERR "db9.c: Not enough memory for input device\n");
err = -ENOMEM;
goto err_unreg_devs;
}
snprintf(db9->phys[i], sizeof(db9->phys[i]),
"%s/input%d", db9->pd->port->name, i);
input_dev->name = db9_mode->name;
input_dev->phys = db9->phys[i];
input_dev->id.bustype = BUS_PARPORT;
input_dev->id.vendor = 0x0002;
input_dev->id.product = mode;
input_dev->id.version = 0x0100;
input_set_drvdata(input_dev, db9);
input_dev->open = db9_open;
input_dev->close = db9_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
for (j = 0; j < db9_mode->n_buttons; j++)
set_bit(db9_mode->buttons[j], input_dev->keybit);
for (j = 0; j < db9_mode->n_axis; j++) {
if (j < 2)
input_set_abs_params(input_dev, db9_abs[j], -1, 1, 0, 0);
else
input_set_abs_params(input_dev, db9_abs[j], 1, 255, 0, 0);
}
err = input_register_device(input_dev);
if (err)
goto err_free_dev;
}
parport_put_port(pp);
return db9;
err_free_dev:
input_free_device(db9->dev[i]);
err_unreg_devs:
while (--i >= 0)
input_unregister_device(db9->dev[i]);
kfree(db9);
err_unreg_pardev:
parport_unregister_device(pd);
err_put_pp:
parport_put_port(pp);
err_out:
return ERR_PTR(err);
}
static void db9_remove(struct db9 *db9)
{
int i;
for (i = 0; i < min(db9_modes[db9->mode].n_pads, DB9_MAX_DEVICES); i++)
input_unregister_device(db9->dev[i]);
parport_unregister_device(db9->pd);
kfree(db9);
}
static int __init db9_init(void)
{
int i;
int have_dev = 0;
int err = 0;
for (i = 0; i < DB9_MAX_PORTS; i++) {
if (db9_cfg[i].nargs == 0 || db9_cfg[i].args[DB9_ARG_PARPORT] < 0)
continue;
if (db9_cfg[i].nargs < 2) {
printk(KERN_ERR "db9.c: Device type must be specified.\n");
err = -EINVAL;
break;
}
db9_base[i] = db9_probe(db9_cfg[i].args[DB9_ARG_PARPORT],
db9_cfg[i].args[DB9_ARG_MODE]);
if (IS_ERR(db9_base[i])) {
err = PTR_ERR(db9_base[i]);
break;
}
have_dev = 1;
}
if (err) {
while (--i >= 0)
if (db9_base[i])
db9_remove(db9_base[i]);
return err;
}
return have_dev ? 0 : -ENODEV;
}
static void __exit db9_exit(void)
{
int i;
for (i = 0; i < DB9_MAX_PORTS; i++)
if (db9_base[i])
db9_remove(db9_base[i]);
}
module_init(db9_init);
module_exit(db9_exit);
| gpl-2.0 |
modulexcite/linux | drivers/input/joystick/db9.c | 13336 | 21279 | /*
* Copyright (c) 1999-2001 Vojtech Pavlik
*
* Based on the work of:
* Andree Borrmann Mats Sjövall
*/
/*
* Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver for Linux
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/parport.h>
#include <linux/input.h>
#include <linux/mutex.h>
#include <linux/slab.h>
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Atari, Amstrad, Commodore, Amiga, Sega, etc. joystick driver");
MODULE_LICENSE("GPL");
struct db9_config {
int args[2];
unsigned int nargs;
};
#define DB9_MAX_PORTS 3
static struct db9_config db9_cfg[DB9_MAX_PORTS] __initdata;
module_param_array_named(dev, db9_cfg[0].args, int, &db9_cfg[0].nargs, 0);
MODULE_PARM_DESC(dev, "Describes first attached device (<parport#>,<type>)");
module_param_array_named(dev2, db9_cfg[1].args, int, &db9_cfg[1].nargs, 0);
MODULE_PARM_DESC(dev2, "Describes second attached device (<parport#>,<type>)");
module_param_array_named(dev3, db9_cfg[2].args, int, &db9_cfg[2].nargs, 0);
MODULE_PARM_DESC(dev3, "Describes third attached device (<parport#>,<type>)");
#define DB9_ARG_PARPORT 0
#define DB9_ARG_MODE 1
#define DB9_MULTI_STICK 0x01
#define DB9_MULTI2_STICK 0x02
#define DB9_GENESIS_PAD 0x03
#define DB9_GENESIS5_PAD 0x05
#define DB9_GENESIS6_PAD 0x06
#define DB9_SATURN_PAD 0x07
#define DB9_MULTI_0802 0x08
#define DB9_MULTI_0802_2 0x09
#define DB9_CD32_PAD 0x0A
#define DB9_SATURN_DPP 0x0B
#define DB9_SATURN_DPP_2 0x0C
#define DB9_MAX_PAD 0x0D
#define DB9_UP 0x01
#define DB9_DOWN 0x02
#define DB9_LEFT 0x04
#define DB9_RIGHT 0x08
#define DB9_FIRE1 0x10
#define DB9_FIRE2 0x20
#define DB9_FIRE3 0x40
#define DB9_FIRE4 0x80
#define DB9_NORMAL 0x0a
#define DB9_NOSELECT 0x08
#define DB9_GENESIS6_DELAY 14
#define DB9_REFRESH_TIME HZ/100
#define DB9_MAX_DEVICES 2
struct db9_mode_data {
const char *name;
const short *buttons;
int n_buttons;
int n_pads;
int n_axis;
int bidirectional;
int reverse;
};
struct db9 {
struct input_dev *dev[DB9_MAX_DEVICES];
struct timer_list timer;
struct pardevice *pd;
int mode;
int used;
struct mutex mutex;
char phys[DB9_MAX_DEVICES][32];
};
static struct db9 *db9_base[3];
static const short db9_multi_btn[] = { BTN_TRIGGER, BTN_THUMB };
static const short db9_genesis_btn[] = { BTN_START, BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_MODE };
static const short db9_cd32_btn[] = { BTN_A, BTN_B, BTN_C, BTN_X, BTN_Y, BTN_Z, BTN_TL, BTN_TR, BTN_START };
static const short db9_abs[] = { ABS_X, ABS_Y, ABS_RX, ABS_RY, ABS_RZ, ABS_Z, ABS_HAT0X, ABS_HAT0Y, ABS_HAT1X, ABS_HAT1Y };
static const struct db9_mode_data db9_modes[] = {
{ NULL, NULL, 0, 0, 0, 0, 0 },
{ "Multisystem joystick", db9_multi_btn, 1, 1, 2, 1, 1 },
{ "Multisystem joystick (2 fire)", db9_multi_btn, 2, 1, 2, 1, 1 },
{ "Genesis pad", db9_genesis_btn, 4, 1, 2, 1, 1 },
{ NULL, NULL, 0, 0, 0, 0, 0 },
{ "Genesis 5 pad", db9_genesis_btn, 6, 1, 2, 1, 1 },
{ "Genesis 6 pad", db9_genesis_btn, 8, 1, 2, 1, 1 },
{ "Saturn pad", db9_cd32_btn, 9, 6, 7, 0, 1 },
{ "Multisystem (0.8.0.2) joystick", db9_multi_btn, 1, 1, 2, 1, 1 },
{ "Multisystem (0.8.0.2-dual) joystick", db9_multi_btn, 1, 2, 2, 1, 1 },
{ "Amiga CD-32 pad", db9_cd32_btn, 7, 1, 2, 1, 1 },
{ "Saturn dpp", db9_cd32_btn, 9, 6, 7, 0, 0 },
{ "Saturn dpp dual", db9_cd32_btn, 9, 12, 7, 0, 0 },
};
/*
* Saturn controllers
*/
#define DB9_SATURN_DELAY 300
static const int db9_saturn_byte[] = { 1, 1, 1, 2, 2, 2, 2, 2, 1 };
static const unsigned char db9_saturn_mask[] = { 0x04, 0x01, 0x02, 0x40, 0x20, 0x10, 0x08, 0x80, 0x08 };
/*
* db9_saturn_write_sub() writes 2 bit data.
*/
static void db9_saturn_write_sub(struct parport *port, int type, unsigned char data, int powered, int pwr_sub)
{
unsigned char c;
switch (type) {
case 1: /* DPP1 */
c = 0x80 | 0x30 | (powered ? 0x08 : 0) | (pwr_sub ? 0x04 : 0) | data;
parport_write_data(port, c);
break;
case 2: /* DPP2 */
c = 0x40 | data << 4 | (powered ? 0x08 : 0) | (pwr_sub ? 0x04 : 0) | 0x03;
parport_write_data(port, c);
break;
case 0: /* DB9 */
c = ((((data & 2) ? 2 : 0) | ((data & 1) ? 4 : 0)) ^ 0x02) | !powered;
parport_write_control(port, c);
break;
}
}
/*
* gc_saturn_read_sub() reads 4 bit data.
*/
static unsigned char db9_saturn_read_sub(struct parport *port, int type)
{
unsigned char data;
if (type) {
/* DPP */
data = parport_read_status(port) ^ 0x80;
return (data & 0x80 ? 1 : 0) | (data & 0x40 ? 2 : 0)
| (data & 0x20 ? 4 : 0) | (data & 0x10 ? 8 : 0);
} else {
/* DB9 */
data = parport_read_data(port) & 0x0f;
return (data & 0x8 ? 1 : 0) | (data & 0x4 ? 2 : 0)
| (data & 0x2 ? 4 : 0) | (data & 0x1 ? 8 : 0);
}
}
/*
* db9_saturn_read_analog() sends clock and reads 8 bit data.
*/
static unsigned char db9_saturn_read_analog(struct parport *port, int type, int powered)
{
unsigned char data;
db9_saturn_write_sub(port, type, 0, powered, 0);
udelay(DB9_SATURN_DELAY);
data = db9_saturn_read_sub(port, type) << 4;
db9_saturn_write_sub(port, type, 2, powered, 0);
udelay(DB9_SATURN_DELAY);
data |= db9_saturn_read_sub(port, type);
return data;
}
/*
* db9_saturn_read_packet() reads whole saturn packet at connector
* and returns device identifier code.
*/
static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char *data, int type, int powered)
{
int i, j;
unsigned char tmp;
db9_saturn_write_sub(port, type, 3, powered, 0);
data[0] = db9_saturn_read_sub(port, type);
switch (data[0] & 0x0f) {
case 0xf:
/* 1111 no pad */
return data[0] = 0xff;
case 0x4: case 0x4 | 0x8:
/* ?100 : digital controller */
db9_saturn_write_sub(port, type, 0, powered, 1);
data[2] = db9_saturn_read_sub(port, type) << 4;
db9_saturn_write_sub(port, type, 2, powered, 1);
data[1] = db9_saturn_read_sub(port, type) << 4;
db9_saturn_write_sub(port, type, 1, powered, 1);
data[1] |= db9_saturn_read_sub(port, type);
db9_saturn_write_sub(port, type, 3, powered, 1);
/* data[2] |= db9_saturn_read_sub(port, type); */
data[2] |= data[0];
return data[0] = 0x02;
case 0x1:
/* 0001 : analog controller or multitap */
db9_saturn_write_sub(port, type, 2, powered, 0);
udelay(DB9_SATURN_DELAY);
data[0] = db9_saturn_read_analog(port, type, powered);
if (data[0] != 0x41) {
/* read analog controller */
for (i = 0; i < (data[0] & 0x0f); i++)
data[i + 1] = db9_saturn_read_analog(port, type, powered);
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0];
} else {
/* read multitap */
if (db9_saturn_read_analog(port, type, powered) != 0x60)
return data[0] = 0xff;
for (i = 0; i < 60; i += 10) {
data[i] = db9_saturn_read_analog(port, type, powered);
if (data[i] != 0xff)
/* read each pad */
for (j = 0; j < (data[i] & 0x0f); j++)
data[i + j + 1] = db9_saturn_read_analog(port, type, powered);
}
db9_saturn_write_sub(port, type, 3, powered, 0);
return 0x41;
}
case 0x0:
/* 0000 : mouse */
db9_saturn_write_sub(port, type, 2, powered, 0);
udelay(DB9_SATURN_DELAY);
tmp = db9_saturn_read_analog(port, type, powered);
if (tmp == 0xff) {
for (i = 0; i < 3; i++)
data[i + 1] = db9_saturn_read_analog(port, type, powered);
db9_saturn_write_sub(port, type, 3, powered, 0);
return data[0] = 0xe3;
}
default:
return data[0];
}
}
/*
* db9_saturn_report() analyzes packet and reports.
*/
static int db9_saturn_report(unsigned char id, unsigned char data[60], struct input_dev *devs[], int n, int max_pads)
{
struct input_dev *dev;
int tmp, i, j;
tmp = (id == 0x41) ? 60 : 10;
for (j = 0; j < tmp && n < max_pads; j += 10, n++) {
dev = devs[n];
switch (data[j]) {
case 0x16: /* multi controller (analog 4 axis) */
input_report_abs(dev, db9_abs[5], data[j + 6]);
case 0x15: /* mission stick (analog 3 axis) */
input_report_abs(dev, db9_abs[3], data[j + 4]);
input_report_abs(dev, db9_abs[4], data[j + 5]);
case 0x13: /* racing controller (analog 1 axis) */
input_report_abs(dev, db9_abs[2], data[j + 3]);
case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */
case 0x02: /* digital pad (digital 2 axis + buttons) */
input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
input_report_abs(dev, db9_abs[1], !(data[j + 1] & 32) - !(data[j + 1] & 16));
for (i = 0; i < 9; i++)
input_report_key(dev, db9_cd32_btn[i], ~data[j + db9_saturn_byte[i]] & db9_saturn_mask[i]);
break;
case 0x19: /* mission stick x2 (analog 6 axis + buttons) */
input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64));
input_report_abs(dev, db9_abs[1], !(data[j + 1] & 32) - !(data[j + 1] & 16));
for (i = 0; i < 9; i++)
input_report_key(dev, db9_cd32_btn[i], ~data[j + db9_saturn_byte[i]] & db9_saturn_mask[i]);
input_report_abs(dev, db9_abs[2], data[j + 3]);
input_report_abs(dev, db9_abs[3], data[j + 4]);
input_report_abs(dev, db9_abs[4], data[j + 5]);
/*
input_report_abs(dev, db9_abs[8], (data[j + 6] & 128 ? 0 : 1) - (data[j + 6] & 64 ? 0 : 1));
input_report_abs(dev, db9_abs[9], (data[j + 6] & 32 ? 0 : 1) - (data[j + 6] & 16 ? 0 : 1));
*/
input_report_abs(dev, db9_abs[6], data[j + 7]);
input_report_abs(dev, db9_abs[7], data[j + 8]);
input_report_abs(dev, db9_abs[5], data[j + 9]);
break;
case 0xd3: /* sankyo ff (analog 1 axis + stop btn) */
input_report_key(dev, BTN_A, data[j + 3] & 0x80);
input_report_abs(dev, db9_abs[2], data[j + 3] & 0x7f);
break;
case 0xe3: /* shuttle mouse (analog 2 axis + buttons. signed value) */
input_report_key(dev, BTN_START, data[j + 1] & 0x08);
input_report_key(dev, BTN_A, data[j + 1] & 0x04);
input_report_key(dev, BTN_C, data[j + 1] & 0x02);
input_report_key(dev, BTN_B, data[j + 1] & 0x01);
input_report_abs(dev, db9_abs[2], data[j + 2] ^ 0x80);
input_report_abs(dev, db9_abs[3], (0xff-(data[j + 3] ^ 0x80))+1); /* */
break;
case 0xff:
default: /* no pad */
input_report_abs(dev, db9_abs[0], 0);
input_report_abs(dev, db9_abs[1], 0);
for (i = 0; i < 9; i++)
input_report_key(dev, db9_cd32_btn[i], 0);
break;
}
}
return n;
}
static int db9_saturn(int mode, struct parport *port, struct input_dev *devs[])
{
unsigned char id, data[60];
int type, n, max_pads;
int tmp, i;
switch (mode) {
case DB9_SATURN_PAD:
type = 0;
n = 1;
break;
case DB9_SATURN_DPP:
type = 1;
n = 1;
break;
case DB9_SATURN_DPP_2:
type = 1;
n = 2;
break;
default:
return -1;
}
max_pads = min(db9_modes[mode].n_pads, DB9_MAX_DEVICES);
for (tmp = 0, i = 0; i < n; i++) {
id = db9_saturn_read_packet(port, data, type + i, 1);
tmp = db9_saturn_report(id, data, devs, tmp, max_pads);
}
return 0;
}
static void db9_timer(unsigned long private)
{
struct db9 *db9 = (void *) private;
struct parport *port = db9->pd->port;
struct input_dev *dev = db9->dev[0];
struct input_dev *dev2 = db9->dev[1];
int data, i;
switch (db9->mode) {
case DB9_MULTI_0802_2:
data = parport_read_data(port) >> 3;
input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1);
case DB9_MULTI_0802:
data = parport_read_status(port) >> 3;
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_TRIGGER, data & DB9_FIRE1);
break;
case DB9_MULTI_STICK:
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_TRIGGER, ~data & DB9_FIRE1);
break;
case DB9_MULTI2_STICK:
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_TRIGGER, ~data & DB9_FIRE1);
input_report_key(dev, BTN_THUMB, ~data & DB9_FIRE2);
break;
case DB9_GENESIS_PAD:
parport_write_control(port, DB9_NOSELECT);
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_B, ~data & DB9_FIRE1);
input_report_key(dev, BTN_C, ~data & DB9_FIRE2);
parport_write_control(port, DB9_NORMAL);
data = parport_read_data(port);
input_report_key(dev, BTN_A, ~data & DB9_FIRE1);
input_report_key(dev, BTN_START, ~data & DB9_FIRE2);
break;
case DB9_GENESIS5_PAD:
parport_write_control(port, DB9_NOSELECT);
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_B, ~data & DB9_FIRE1);
input_report_key(dev, BTN_C, ~data & DB9_FIRE2);
parport_write_control(port, DB9_NORMAL);
data = parport_read_data(port);
input_report_key(dev, BTN_A, ~data & DB9_FIRE1);
input_report_key(dev, BTN_X, ~data & DB9_FIRE2);
input_report_key(dev, BTN_Y, ~data & DB9_LEFT);
input_report_key(dev, BTN_START, ~data & DB9_RIGHT);
break;
case DB9_GENESIS6_PAD:
parport_write_control(port, DB9_NOSELECT); /* 1 */
udelay(DB9_GENESIS6_DELAY);
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
input_report_key(dev, BTN_B, ~data & DB9_FIRE1);
input_report_key(dev, BTN_C, ~data & DB9_FIRE2);
parport_write_control(port, DB9_NORMAL);
udelay(DB9_GENESIS6_DELAY);
data = parport_read_data(port);
input_report_key(dev, BTN_A, ~data & DB9_FIRE1);
input_report_key(dev, BTN_START, ~data & DB9_FIRE2);
parport_write_control(port, DB9_NOSELECT); /* 2 */
udelay(DB9_GENESIS6_DELAY);
parport_write_control(port, DB9_NORMAL);
udelay(DB9_GENESIS6_DELAY);
parport_write_control(port, DB9_NOSELECT); /* 3 */
udelay(DB9_GENESIS6_DELAY);
data=parport_read_data(port);
input_report_key(dev, BTN_X, ~data & DB9_LEFT);
input_report_key(dev, BTN_Y, ~data & DB9_DOWN);
input_report_key(dev, BTN_Z, ~data & DB9_UP);
input_report_key(dev, BTN_MODE, ~data & DB9_RIGHT);
parport_write_control(port, DB9_NORMAL);
udelay(DB9_GENESIS6_DELAY);
parport_write_control(port, DB9_NOSELECT); /* 4 */
udelay(DB9_GENESIS6_DELAY);
parport_write_control(port, DB9_NORMAL);
break;
case DB9_SATURN_PAD:
case DB9_SATURN_DPP:
case DB9_SATURN_DPP_2:
db9_saturn(db9->mode, port, db9->dev);
break;
case DB9_CD32_PAD:
data = parport_read_data(port);
input_report_abs(dev, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1));
input_report_abs(dev, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1));
parport_write_control(port, 0x0a);
for (i = 0; i < 7; i++) {
data = parport_read_data(port);
parport_write_control(port, 0x02);
parport_write_control(port, 0x0a);
input_report_key(dev, db9_cd32_btn[i], ~data & DB9_FIRE2);
}
parport_write_control(port, 0x00);
break;
}
input_sync(dev);
mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME);
}
static int db9_open(struct input_dev *dev)
{
struct db9 *db9 = input_get_drvdata(dev);
struct parport *port = db9->pd->port;
int err;
err = mutex_lock_interruptible(&db9->mutex);
if (err)
return err;
if (!db9->used++) {
parport_claim(db9->pd);
parport_write_data(port, 0xff);
if (db9_modes[db9->mode].reverse) {
parport_data_reverse(port);
parport_write_control(port, DB9_NORMAL);
}
mod_timer(&db9->timer, jiffies + DB9_REFRESH_TIME);
}
mutex_unlock(&db9->mutex);
return 0;
}
static void db9_close(struct input_dev *dev)
{
struct db9 *db9 = input_get_drvdata(dev);
struct parport *port = db9->pd->port;
mutex_lock(&db9->mutex);
if (!--db9->used) {
del_timer_sync(&db9->timer);
parport_write_control(port, 0x00);
parport_data_forward(port);
parport_release(db9->pd);
}
mutex_unlock(&db9->mutex);
}
static struct db9 __init *db9_probe(int parport, int mode)
{
struct db9 *db9;
const struct db9_mode_data *db9_mode;
struct parport *pp;
struct pardevice *pd;
struct input_dev *input_dev;
int i, j;
int err;
if (mode < 1 || mode >= DB9_MAX_PAD || !db9_modes[mode].n_buttons) {
printk(KERN_ERR "db9.c: Bad device type %d\n", mode);
err = -EINVAL;
goto err_out;
}
db9_mode = &db9_modes[mode];
pp = parport_find_number(parport);
if (!pp) {
printk(KERN_ERR "db9.c: no such parport\n");
err = -ENODEV;
goto err_out;
}
if (db9_mode->bidirectional && !(pp->modes & PARPORT_MODE_TRISTATE)) {
printk(KERN_ERR "db9.c: specified parport is not bidirectional\n");
err = -EINVAL;
goto err_put_pp;
}
pd = parport_register_device(pp, "db9", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
if (!pd) {
printk(KERN_ERR "db9.c: parport busy already - lp.o loaded?\n");
err = -EBUSY;
goto err_put_pp;
}
db9 = kzalloc(sizeof(struct db9), GFP_KERNEL);
if (!db9) {
printk(KERN_ERR "db9.c: Not enough memory\n");
err = -ENOMEM;
goto err_unreg_pardev;
}
mutex_init(&db9->mutex);
db9->pd = pd;
db9->mode = mode;
init_timer(&db9->timer);
db9->timer.data = (long) db9;
db9->timer.function = db9_timer;
for (i = 0; i < (min(db9_mode->n_pads, DB9_MAX_DEVICES)); i++) {
db9->dev[i] = input_dev = input_allocate_device();
if (!input_dev) {
printk(KERN_ERR "db9.c: Not enough memory for input device\n");
err = -ENOMEM;
goto err_unreg_devs;
}
snprintf(db9->phys[i], sizeof(db9->phys[i]),
"%s/input%d", db9->pd->port->name, i);
input_dev->name = db9_mode->name;
input_dev->phys = db9->phys[i];
input_dev->id.bustype = BUS_PARPORT;
input_dev->id.vendor = 0x0002;
input_dev->id.product = mode;
input_dev->id.version = 0x0100;
input_set_drvdata(input_dev, db9);
input_dev->open = db9_open;
input_dev->close = db9_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
for (j = 0; j < db9_mode->n_buttons; j++)
set_bit(db9_mode->buttons[j], input_dev->keybit);
for (j = 0; j < db9_mode->n_axis; j++) {
if (j < 2)
input_set_abs_params(input_dev, db9_abs[j], -1, 1, 0, 0);
else
input_set_abs_params(input_dev, db9_abs[j], 1, 255, 0, 0);
}
err = input_register_device(input_dev);
if (err)
goto err_free_dev;
}
parport_put_port(pp);
return db9;
err_free_dev:
input_free_device(db9->dev[i]);
err_unreg_devs:
while (--i >= 0)
input_unregister_device(db9->dev[i]);
kfree(db9);
err_unreg_pardev:
parport_unregister_device(pd);
err_put_pp:
parport_put_port(pp);
err_out:
return ERR_PTR(err);
}
static void db9_remove(struct db9 *db9)
{
int i;
for (i = 0; i < min(db9_modes[db9->mode].n_pads, DB9_MAX_DEVICES); i++)
input_unregister_device(db9->dev[i]);
parport_unregister_device(db9->pd);
kfree(db9);
}
static int __init db9_init(void)
{
int i;
int have_dev = 0;
int err = 0;
for (i = 0; i < DB9_MAX_PORTS; i++) {
if (db9_cfg[i].nargs == 0 || db9_cfg[i].args[DB9_ARG_PARPORT] < 0)
continue;
if (db9_cfg[i].nargs < 2) {
printk(KERN_ERR "db9.c: Device type must be specified.\n");
err = -EINVAL;
break;
}
db9_base[i] = db9_probe(db9_cfg[i].args[DB9_ARG_PARPORT],
db9_cfg[i].args[DB9_ARG_MODE]);
if (IS_ERR(db9_base[i])) {
err = PTR_ERR(db9_base[i]);
break;
}
have_dev = 1;
}
if (err) {
while (--i >= 0)
if (db9_base[i])
db9_remove(db9_base[i]);
return err;
}
return have_dev ? 0 : -ENODEV;
}
static void __exit db9_exit(void)
{
int i;
for (i = 0; i < DB9_MAX_PORTS; i++)
if (db9_base[i])
db9_remove(db9_base[i]);
}
module_init(db9_init);
module_exit(db9_exit);
| gpl-2.0 |
zefie/kernel_moto_shamu | arch/powerpc/boot/ep8248e.c | 14104 | 1260 | /*
* Embedded Planet EP8248E with PlanetCore firmware
*
* Author: Scott Wood <scottwood@freescale.com>
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "planetcore.h"
#include "pq2.h"
static char *table;
static u64 mem_size;
#include <io.h>
static void platform_fixups(void)
{
u64 val;
dt_fixup_memory(0, mem_size);
planetcore_set_mac_addrs(table);
if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
printf("No PlanetCore crystal frequency key.\r\n");
return;
}
pq2_fixup_clocks(val);
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
table = (char *)r3;
planetcore_prepare_table(table);
if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
return;
mem_size *= 1024 * 1024;
simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64);
fdt_init(_dtb_start);
planetcore_set_stdout_path(table);
serial_console_init();
platform_ops.fixups = platform_fixups;
}
| gpl-2.0 |
semdoc/linux-block | arch/powerpc/boot/ep8248e.c | 14104 | 1260 | /*
* Embedded Planet EP8248E with PlanetCore firmware
*
* Author: Scott Wood <scottwood@freescale.com>
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "planetcore.h"
#include "pq2.h"
static char *table;
static u64 mem_size;
#include <io.h>
static void platform_fixups(void)
{
u64 val;
dt_fixup_memory(0, mem_size);
planetcore_set_mac_addrs(table);
if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
printf("No PlanetCore crystal frequency key.\r\n");
return;
}
pq2_fixup_clocks(val);
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
table = (char *)r3;
planetcore_prepare_table(table);
if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
return;
mem_size *= 1024 * 1024;
simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64);
fdt_init(_dtb_start);
planetcore_set_stdout_path(table);
serial_console_init();
platform_ops.fixups = platform_fixups;
}
| gpl-2.0 |
Dosis/geeksphone-kernel-zero-2.6.35 | arch/powerpc/boot/cuboot-824x.c | 14104 | 1224 | /*
* Old U-boot compatibility for 824x
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "cuboot.h"
#define TARGET_824x
#include "ppcboot.h"
static bd_t bd;
static void platform_fixups(void)
{
void *soc;
dt_fixup_memory(bd.bi_memstart, bd.bi_memsize);
dt_fixup_mac_addresses(bd.bi_enetaddr);
dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq);
soc = find_node_by_devtype(NULL, "soc");
if (soc) {
void *serial = NULL;
setprop(soc, "bus-frequency", &bd.bi_busfreq,
sizeof(bd.bi_busfreq));
while ((serial = find_node_by_devtype(serial, "serial"))) {
if (get_parent(serial) != soc)
continue;
setprop(serial, "clock-frequency", &bd.bi_busfreq,
sizeof(bd.bi_busfreq));
}
}
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
CUBOOT_INIT();
fdt_init(_dtb_start);
serial_console_init();
platform_ops.fixups = platform_fixups;
}
| gpl-2.0 |
LordNerevar/kernel_htc_pyramid | arch/powerpc/boot/ep8248e.c | 14104 | 1260 | /*
* Embedded Planet EP8248E with PlanetCore firmware
*
* Author: Scott Wood <scottwood@freescale.com>
*
* Copyright (c) 2007 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*/
#include "ops.h"
#include "stdio.h"
#include "planetcore.h"
#include "pq2.h"
static char *table;
static u64 mem_size;
#include <io.h>
static void platform_fixups(void)
{
u64 val;
dt_fixup_memory(0, mem_size);
planetcore_set_mac_addrs(table);
if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
printf("No PlanetCore crystal frequency key.\r\n");
return;
}
pq2_fixup_clocks(val);
}
void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7)
{
table = (char *)r3;
planetcore_prepare_table(table);
if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
return;
mem_size *= 1024 * 1024;
simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64);
fdt_init(_dtb_start);
planetcore_set_stdout_path(table);
serial_console_init();
platform_ops.fixups = platform_fixups;
}
| gpl-2.0 |
kzlin129/tt-gpl | go12/linux-2.6.28.10/drivers/net/ppp_generic.c | 25 | 67910 | /*
* Generic PPP layer for Linux.
*
* Copyright 1999-2002 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* The generic PPP layer handles the PPP network interfaces, the
* /dev/ppp device, packet and VJ compression, and multilink.
* It talks to PPP `channels' via the interface defined in
* include/linux/ppp_channel.h. Channels provide the basic means for
* sending and receiving PPP frames on some kind of communications
* channel.
*
* Part of the code in this driver was inspired by the old async-only
* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*
* ==FILEVERSION 20041108==
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/ppp_defs.h>
#include <linux/filter.h>
#include <linux/if_ppp.h>
#include <linux/ppp_channel.h>
#include <linux/ppp-comp.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/stddef.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <net/slhc_vj.h>
#include <asm/atomic.h>
#define PPP_VERSION "2.4.2"
/*
* Network protocols we support.
*/
#define NP_IP 0 /* Internet Protocol V4 */
#define NP_IPV6 1 /* Internet Protocol V6 */
#define NP_IPX 2 /* IPX protocol */
#define NP_AT 3 /* Appletalk protocol */
#define NP_MPLS_UC 4 /* MPLS unicast */
#define NP_MPLS_MC 5 /* MPLS multicast */
#define NUM_NP 6 /* Number of NPs. */
#define MPHDRLEN 6 /* multilink protocol header length */
#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
#define MIN_FRAG_SIZE 64
/*
* An instance of /dev/ppp can be associated with either a ppp
* interface unit or a ppp channel. In both cases, file->private_data
* points to one of these.
*/
struct ppp_file {
enum {
INTERFACE=1, CHANNEL
} kind;
struct sk_buff_head xq; /* pppd transmit queue */
struct sk_buff_head rq; /* receive queue for pppd */
wait_queue_head_t rwait; /* for poll on reading /dev/ppp */
atomic_t refcnt; /* # refs (incl /dev/ppp attached) */
int hdrlen; /* space to leave for headers */
int index; /* interface unit / channel number */
int dead; /* unit/channel has been shut down */
};
#define PF_TO_X(pf, X) container_of(pf, X, file)
#define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
#define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
/*
* Data structure describing one ppp unit.
* A ppp unit corresponds to a ppp network interface device
* and represents a multilink bundle.
* It can have 0 or more ppp channels connected to it.
*/
struct ppp {
struct ppp_file file; /* stuff for read/write/poll 0 */
struct file *owner; /* file that owns this unit 48 */
struct list_head channels; /* list of attached channels 4c */
int n_channels; /* how many channels are attached 54 */
spinlock_t rlock; /* lock for receive side 58 */
spinlock_t wlock; /* lock for transmit side 5c */
int mru; /* max receive unit 60 */
unsigned int flags; /* control bits 64 */
unsigned int xstate; /* transmit state bits 68 */
unsigned int rstate; /* receive state bits 6c */
int debug; /* debug flags 70 */
struct slcompress *vj; /* state for VJ header compression */
enum NPmode npmode[NUM_NP]; /* what to do with each net proto 78 */
struct sk_buff *xmit_pending; /* a packet ready to go out 88 */
struct compressor *xcomp; /* transmit packet compressor 8c */
void *xc_state; /* its internal state 90 */
struct compressor *rcomp; /* receive decompressor 94 */
void *rc_state; /* its internal state 98 */
unsigned long last_xmit; /* jiffies when last pkt sent 9c */
unsigned long last_recv; /* jiffies when last pkt rcvd a0 */
struct net_device *dev; /* network interface device a4 */
int closing; /* is device closing down? a8 */
#ifdef CONFIG_PPP_MULTILINK
int nxchan; /* next channel to send something on */
u32 nxseq; /* next sequence number to send */
int mrru; /* MP: max reconst. receive unit */
u32 nextseq; /* MP: seq no of next packet */
u32 minseq; /* MP: min of most recent seqnos */
struct sk_buff_head mrq; /* MP: receive reconstruction queue */
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
struct sock_filter *pass_filter; /* filter for packets to pass */
struct sock_filter *active_filter;/* filter for pkts to reset idle */
unsigned pass_len, active_len;
#endif /* CONFIG_PPP_FILTER */
};
/*
* Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
* SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
* SC_MUST_COMP
* Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
* Bits in xstate: SC_COMP_RUN
*/
#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
|SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
|SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
/*
* Private data structure for each channel.
* This includes the data structure used for multilink.
*/
struct channel {
struct ppp_file file; /* stuff for read/write/poll */
struct list_head list; /* link in all/new_channels list */
struct ppp_channel *chan; /* public channel data structure */
struct rw_semaphore chan_sem; /* protects `chan' during chan ioctl */
spinlock_t downl; /* protects `chan', file.xq dequeue */
struct ppp *ppp; /* ppp unit we're connected to */
struct list_head clist; /* link in list of channels per unit */
rwlock_t upl; /* protects `ppp' */
#ifdef CONFIG_PPP_MULTILINK
u8 avail; /* flag used in multilink stuff */
u8 had_frag; /* >= 1 fragments have been sent */
u32 lastseq; /* MP: last sequence # received */
#endif /* CONFIG_PPP_MULTILINK */
};
/*
* SMP locking issues:
* Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
* list and the ppp.n_channels field, you need to take both locks
* before you modify them.
* The lock ordering is: channel.upl -> ppp.wlock -> ppp.rlock ->
* channel.downl.
*/
/*
* A cardmap represents a mapping from unsigned integers to pointers,
* and provides a fast "find lowest unused number" operation.
* It uses a broad (32-way) tree with a bitmap at each level.
* It is designed to be space-efficient for small numbers of entries
* and time-efficient for large numbers of entries.
*/
#define CARDMAP_ORDER 5
#define CARDMAP_WIDTH (1U << CARDMAP_ORDER)
#define CARDMAP_MASK (CARDMAP_WIDTH - 1)
struct cardmap {
int shift;
unsigned long inuse;
struct cardmap *parent;
void *ptr[CARDMAP_WIDTH];
};
static void *cardmap_get(struct cardmap *map, unsigned int nr);
static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
static unsigned int cardmap_find_first_free(struct cardmap *map);
static void cardmap_destroy(struct cardmap **map);
/*
* all_ppp_mutex protects the all_ppp_units mapping.
* It also ensures that finding a ppp unit in the all_ppp_units map
* and updating its file.refcnt field is atomic.
*/
static DEFINE_MUTEX(all_ppp_mutex);
static struct cardmap *all_ppp_units;
static atomic_t ppp_unit_count = ATOMIC_INIT(0);
/*
* all_channels_lock protects all_channels and last_channel_index,
* and the atomicity of find a channel and updating its file.refcnt
* field.
*/
static DEFINE_SPINLOCK(all_channels_lock);
static LIST_HEAD(all_channels);
static LIST_HEAD(new_channels);
static int last_channel_index;
static atomic_t channel_count = ATOMIC_INIT(0);
/* Get the PPP protocol number from a skb */
#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1])
/* We limit the length of ppp->file.rq to this (arbitrary) value */
#define PPP_MAX_RQLEN 32
/*
* Maximum number of multilink fragments queued up.
* This has to be large enough to cope with the maximum latency of
* the slowest channel relative to the others. Strictly it should
* depend on the number of channels and their characteristics.
*/
#define PPP_MP_MAX_QLEN 128
/* Multilink header bits. */
#define B 0x80 /* this fragment begins a packet */
#define E 0x40 /* this fragment ends a packet */
/* Compare multilink sequence numbers (assumed to be 32 bits wide) */
#define seq_before(a, b) ((s32)((a) - (b)) < 0)
#define seq_after(a, b) ((s32)((a) - (b)) > 0)
/* Prototypes. */
static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
unsigned int cmd, unsigned long arg);
static void ppp_xmit_process(struct ppp *ppp);
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
static void ppp_push(struct ppp *ppp);
static void ppp_channel_push(struct channel *pch);
static void ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb,
struct channel *pch);
static void ppp_receive_error(struct ppp *ppp);
static void ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb);
static struct sk_buff *ppp_decompress_frame(struct ppp *ppp,
struct sk_buff *skb);
#ifdef CONFIG_PPP_MULTILINK
static void ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb,
struct channel *pch);
static void ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb);
static struct sk_buff *ppp_mp_reconstruct(struct ppp *ppp);
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb);
#endif /* CONFIG_PPP_MULTILINK */
static int ppp_set_compress(struct ppp *ppp, unsigned long arg);
static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
static struct ppp *ppp_create_interface(int unit, int *retp);
static void init_ppp_file(struct ppp_file *pf, int kind);
static void ppp_shutdown_interface(struct ppp *ppp);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(int unit);
static struct channel *ppp_find_channel(int unit);
static int ppp_connect_channel(struct channel *pch, int unit);
static int ppp_disconnect_channel(struct channel *pch);
static void ppp_destroy_channel(struct channel *pch);
static struct class *ppp_class;
/* Translates a PPP protocol number to a NP index (NP == network protocol) */
static inline int proto_to_npindex(int proto)
{
switch (proto) {
case PPP_IP:
return NP_IP;
case PPP_IPV6:
return NP_IPV6;
case PPP_IPX:
return NP_IPX;
case PPP_AT:
return NP_AT;
case PPP_MPLS_UC:
return NP_MPLS_UC;
case PPP_MPLS_MC:
return NP_MPLS_MC;
}
return -EINVAL;
}
/* Translates an NP index into a PPP protocol number */
static const int npindex_to_proto[NUM_NP] = {
PPP_IP,
PPP_IPV6,
PPP_IPX,
PPP_AT,
PPP_MPLS_UC,
PPP_MPLS_MC,
};
/* Translates an ethertype into an NP index */
static inline int ethertype_to_npindex(int ethertype)
{
switch (ethertype) {
case ETH_P_IP:
return NP_IP;
case ETH_P_IPV6:
return NP_IPV6;
case ETH_P_IPX:
return NP_IPX;
case ETH_P_PPPTALK:
case ETH_P_ATALK:
return NP_AT;
case ETH_P_MPLS_UC:
return NP_MPLS_UC;
case ETH_P_MPLS_MC:
return NP_MPLS_MC;
}
return -1;
}
/* Translates an NP index into an ethertype */
static const int npindex_to_ethertype[NUM_NP] = {
ETH_P_IP,
ETH_P_IPV6,
ETH_P_IPX,
ETH_P_PPPTALK,
ETH_P_MPLS_UC,
ETH_P_MPLS_MC,
};
/*
* Locking shorthand.
*/
#define ppp_xmit_lock(ppp) spin_lock_bh(&(ppp)->wlock)
#define ppp_xmit_unlock(ppp) spin_unlock_bh(&(ppp)->wlock)
#define ppp_recv_lock(ppp) spin_lock_bh(&(ppp)->rlock)
#define ppp_recv_unlock(ppp) spin_unlock_bh(&(ppp)->rlock)
#define ppp_lock(ppp) do { ppp_xmit_lock(ppp); \
ppp_recv_lock(ppp); } while (0)
#define ppp_unlock(ppp) do { ppp_recv_unlock(ppp); \
ppp_xmit_unlock(ppp); } while (0)
/*
* /dev/ppp device routines.
* The /dev/ppp device is used by pppd to control the ppp unit.
* It supports the read, write, ioctl and poll functions.
* Open instances of /dev/ppp can be in one of three states:
* unattached, attached to a ppp unit, or attached to a ppp channel.
*/
static int ppp_open(struct inode *inode, struct file *file)
{
cycle_kernel_lock();
/*
* This could (should?) be enforced by the permissions on /dev/ppp.
*/
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return 0;
}
static int ppp_release(struct inode *unused, struct file *file)
{
struct ppp_file *pf = file->private_data;
struct ppp *ppp;
if (pf) {
file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
if (file == ppp->owner)
ppp_shutdown_interface(ppp);
}
if (atomic_dec_and_test(&pf->refcnt)) {
switch (pf->kind) {
case INTERFACE:
ppp_destroy_interface(PF_TO_PPP(pf));
break;
case CHANNEL:
ppp_destroy_channel(PF_TO_CHANNEL(pf));
break;
}
}
}
return 0;
}
static ssize_t ppp_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct ppp_file *pf = file->private_data;
DECLARE_WAITQUEUE(wait, current);
ssize_t ret;
struct sk_buff *skb = NULL;
ret = count;
if (!pf)
return -ENXIO;
add_wait_queue(&pf->rwait, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
skb = skb_dequeue(&pf->rq);
if (skb)
break;
ret = 0;
if (pf->dead)
break;
if (pf->kind == INTERFACE) {
/*
* Return 0 (EOF) on an interface that has no
* channels connected, unless it is looping
* network traffic (demand mode).
*/
struct ppp *ppp = PF_TO_PPP(pf);
if (ppp->n_channels == 0
&& (ppp->flags & SC_LOOP_TRAFFIC) == 0)
break;
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -ERESTARTSYS;
if (signal_pending(current))
break;
schedule();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&pf->rwait, &wait);
if (!skb)
goto out;
ret = -EOVERFLOW;
if (skb->len > count)
goto outf;
ret = -EFAULT;
if (copy_to_user(buf, skb->data, skb->len))
goto outf;
ret = skb->len;
outf:
kfree_skb(skb);
out:
return ret;
}
static ssize_t ppp_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ppp_file *pf = file->private_data;
struct sk_buff *skb;
ssize_t ret;
if (!pf)
return -ENXIO;
ret = -ENOMEM;
skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL);
if (!skb)
goto out;
skb_reserve(skb, pf->hdrlen);
ret = -EFAULT;
if (copy_from_user(skb_put(skb, count), buf, count)) {
kfree_skb(skb);
goto out;
}
skb_queue_tail(&pf->xq, skb);
switch (pf->kind) {
case INTERFACE:
ppp_xmit_process(PF_TO_PPP(pf));
break;
case CHANNEL:
ppp_channel_push(PF_TO_CHANNEL(pf));
break;
}
ret = count;
out:
return ret;
}
/* No kernel lock - fine */
static unsigned int ppp_poll(struct file *file, poll_table *wait)
{
struct ppp_file *pf = file->private_data;
unsigned int mask;
if (!pf)
return 0;
poll_wait(file, &pf->rwait, wait);
mask = POLLOUT | POLLWRNORM;
if (skb_peek(&pf->rq))
mask |= POLLIN | POLLRDNORM;
if (pf->dead)
mask |= POLLHUP;
else if (pf->kind == INTERFACE) {
/* see comment in ppp_read */
struct ppp *ppp = PF_TO_PPP(pf);
if (ppp->n_channels == 0
&& (ppp->flags & SC_LOOP_TRAFFIC) == 0)
mask |= POLLIN | POLLRDNORM;
}
return mask;
}
#ifdef CONFIG_PPP_FILTER
static int get_filter(void __user *arg, struct sock_filter **p)
{
struct sock_fprog uprog;
struct sock_filter *code = NULL;
int len, err;
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
if (!uprog.len) {
*p = NULL;
return 0;
}
len = uprog.len * sizeof(struct sock_filter);
code = kmalloc(len, GFP_KERNEL);
if (code == NULL)
return -ENOMEM;
if (copy_from_user(code, uprog.filter, len)) {
kfree(code);
return -EFAULT;
}
err = sk_chk_filter(code, uprog.len);
if (err) {
kfree(code);
return err;
}
*p = code;
return uprog.len;
}
#endif /* CONFIG_PPP_FILTER */
static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ppp_file *pf = file->private_data;
struct ppp *ppp;
int err = -EFAULT, val, val2, i;
struct ppp_idle idle;
struct npioctl npi;
int unit, cflags;
struct slcompress *vj;
void __user *argp = (void __user *)arg;
int __user *p = argp;
if (!pf)
return ppp_unattached_ioctl(pf, file, cmd, arg);
if (cmd == PPPIOCDETACH) {
/*
* We have to be careful here... if the file descriptor
* has been dup'd, we could have another process in the
* middle of a poll using the same file *, so we had
* better not free the interface data structures -
* instead we fail the ioctl. Even in this case, we
* shut down the interface if we are the owner of it.
* Actually, we should get rid of PPPIOCDETACH, userland
* (i.e. pppd) could achieve the same effect by closing
* this fd and reopening /dev/ppp.
*/
err = -EINVAL;
lock_kernel();
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
if (file == ppp->owner)
ppp_shutdown_interface(ppp);
}
if (atomic_long_read(&file->f_count) <= 2) {
ppp_release(NULL, file);
err = 0;
} else
printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
atomic_long_read(&file->f_count));
unlock_kernel();
return err;
}
if (pf->kind == CHANNEL) {
struct channel *pch;
struct ppp_channel *chan;
lock_kernel();
pch = PF_TO_CHANNEL(pf);
switch (cmd) {
case PPPIOCCONNECT:
if (get_user(unit, p))
break;
err = ppp_connect_channel(pch, unit);
break;
case PPPIOCDISCONN:
err = ppp_disconnect_channel(pch);
break;
default:
down_read(&pch->chan_sem);
chan = pch->chan;
err = -ENOTTY;
if (chan && chan->ops->ioctl)
err = chan->ops->ioctl(chan, cmd, arg);
up_read(&pch->chan_sem);
}
unlock_kernel();
return err;
}
if (pf->kind != INTERFACE) {
/* can't happen */
printk(KERN_ERR "PPP: not interface or channel??\n");
return -EINVAL;
}
lock_kernel();
ppp = PF_TO_PPP(pf);
switch (cmd) {
case PPPIOCSMRU:
if (get_user(val, p))
break;
ppp->mru = val;
err = 0;
break;
case PPPIOCSFLAGS:
if (get_user(val, p))
break;
ppp_lock(ppp);
cflags = ppp->flags & ~val;
ppp->flags = val & SC_FLAG_BITS;
ppp_unlock(ppp);
if (cflags & SC_CCP_OPEN)
ppp_ccp_closed(ppp);
err = 0;
break;
case PPPIOCGFLAGS:
val = ppp->flags | ppp->xstate | ppp->rstate;
if (put_user(val, p))
break;
err = 0;
break;
case PPPIOCSCOMPRESS:
err = ppp_set_compress(ppp, arg);
break;
case PPPIOCGUNIT:
if (put_user(ppp->file.index, p))
break;
err = 0;
break;
case PPPIOCSDEBUG:
if (get_user(val, p))
break;
ppp->debug = val;
err = 0;
break;
case PPPIOCGDEBUG:
if (put_user(ppp->debug, p))
break;
err = 0;
break;
case PPPIOCGIDLE:
idle.xmit_idle = (jiffies - ppp->last_xmit) / HZ;
idle.recv_idle = (jiffies - ppp->last_recv) / HZ;
if (copy_to_user(argp, &idle, sizeof(idle)))
break;
err = 0;
break;
case PPPIOCSMAXCID:
if (get_user(val, p))
break;
val2 = 15;
if ((val >> 16) != 0) {
val2 = val >> 16;
val &= 0xffff;
}
vj = slhc_init(val2+1, val+1);
if (!vj) {
printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
err = -ENOMEM;
break;
}
ppp_lock(ppp);
if (ppp->vj)
slhc_free(ppp->vj);
ppp->vj = vj;
ppp_unlock(ppp);
err = 0;
break;
case PPPIOCGNPMODE:
case PPPIOCSNPMODE:
if (copy_from_user(&npi, argp, sizeof(npi)))
break;
err = proto_to_npindex(npi.protocol);
if (err < 0)
break;
i = err;
if (cmd == PPPIOCGNPMODE) {
err = -EFAULT;
npi.mode = ppp->npmode[i];
if (copy_to_user(argp, &npi, sizeof(npi)))
break;
} else {
ppp->npmode[i] = npi.mode;
/* we may be able to transmit more packets now (??) */
netif_wake_queue(ppp->dev);
}
err = 0;
break;
#ifdef CONFIG_PPP_FILTER
case PPPIOCSPASS:
{
struct sock_filter *code;
err = get_filter(argp, &code);
if (err >= 0) {
ppp_lock(ppp);
kfree(ppp->pass_filter);
ppp->pass_filter = code;
ppp->pass_len = err;
ppp_unlock(ppp);
err = 0;
}
break;
}
case PPPIOCSACTIVE:
{
struct sock_filter *code;
err = get_filter(argp, &code);
if (err >= 0) {
ppp_lock(ppp);
kfree(ppp->active_filter);
ppp->active_filter = code;
ppp->active_len = err;
ppp_unlock(ppp);
err = 0;
}
break;
}
#endif /* CONFIG_PPP_FILTER */
#ifdef CONFIG_PPP_MULTILINK
case PPPIOCSMRRU:
if (get_user(val, p))
break;
ppp_recv_lock(ppp);
ppp->mrru = val;
ppp_recv_unlock(ppp);
err = 0;
break;
#endif /* CONFIG_PPP_MULTILINK */
default:
err = -ENOTTY;
}
unlock_kernel();
return err;
}
static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
unsigned int cmd, unsigned long arg)
{
int unit, err = -EFAULT;
struct ppp *ppp;
struct channel *chan;
int __user *p = (int __user *)arg;
lock_kernel();
switch (cmd) {
case PPPIOCNEWUNIT:
/* Create a new ppp unit */
if (get_user(unit, p))
break;
ppp = ppp_create_interface(unit, &err);
if (!ppp)
break;
file->private_data = &ppp->file;
ppp->owner = file;
err = -EFAULT;
if (put_user(ppp->file.index, p))
break;
err = 0;
break;
case PPPIOCATTACH:
/* Attach to an existing ppp unit */
if (get_user(unit, p))
break;
mutex_lock(&all_ppp_mutex);
err = -ENXIO;
ppp = ppp_find_unit(unit);
if (ppp) {
atomic_inc(&ppp->file.refcnt);
file->private_data = &ppp->file;
err = 0;
}
mutex_unlock(&all_ppp_mutex);
break;
case PPPIOCATTCHAN:
if (get_user(unit, p))
break;
spin_lock_bh(&all_channels_lock);
err = -ENXIO;
chan = ppp_find_channel(unit);
if (chan) {
atomic_inc(&chan->file.refcnt);
file->private_data = &chan->file;
err = 0;
}
spin_unlock_bh(&all_channels_lock);
break;
default:
err = -ENOTTY;
}
unlock_kernel();
return err;
}
static const struct file_operations ppp_device_fops = {
.owner = THIS_MODULE,
.read = ppp_read,
.write = ppp_write,
.poll = ppp_poll,
.unlocked_ioctl = ppp_ioctl,
.open = ppp_open,
.release = ppp_release
};
#define PPP_MAJOR 108
/* Called at boot time if ppp is compiled into the kernel,
or at module load time (from init_module) if compiled as a module. */
static int __init ppp_init(void)
{
int err;
printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
if (!err) {
ppp_class = class_create(THIS_MODULE, "ppp");
if (IS_ERR(ppp_class)) {
err = PTR_ERR(ppp_class);
goto out_chrdev;
}
device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL,
"ppp");
}
out:
if (err)
printk(KERN_ERR "failed to register PPP device (%d)\n", err);
return err;
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
goto out;
}
/*
* Network interface unit routines.
*/
static int
ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ppp *ppp = (struct ppp *) dev->priv;
int npi, proto;
unsigned char *pp;
npi = ethertype_to_npindex(ntohs(skb->protocol));
if (npi < 0)
goto outf;
/* Drop, accept or reject the packet */
switch (ppp->npmode[npi]) {
case NPMODE_PASS:
break;
case NPMODE_QUEUE:
/* it would be nice to have a way to tell the network
system to queue this one up for later. */
goto outf;
case NPMODE_DROP:
case NPMODE_ERROR:
goto outf;
}
/* Put the 2-byte PPP protocol number on the front,
making sure there is room for the address and control fields. */
if (skb_cow_head(skb, PPP_HDRLEN))
goto outf;
pp = skb_push(skb, 2);
proto = npindex_to_proto[npi];
pp[0] = proto >> 8;
pp[1] = proto;
netif_stop_queue(dev);
skb_queue_tail(&ppp->file.xq, skb);
ppp_xmit_process(ppp);
return 0;
outf:
kfree_skb(skb);
++ppp->dev->stats.tx_dropped;
return 0;
}
static int
ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ppp *ppp = dev->priv;
int err = -EFAULT;
void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
struct ppp_stats stats;
struct ppp_comp_stats cstats;
char *vers;
switch (cmd) {
case SIOCGPPPSTATS:
ppp_get_stats(ppp, &stats);
if (copy_to_user(addr, &stats, sizeof(stats)))
break;
err = 0;
break;
case SIOCGPPPCSTATS:
memset(&cstats, 0, sizeof(cstats));
if (ppp->xc_state)
ppp->xcomp->comp_stat(ppp->xc_state, &cstats.c);
if (ppp->rc_state)
ppp->rcomp->decomp_stat(ppp->rc_state, &cstats.d);
if (copy_to_user(addr, &cstats, sizeof(cstats)))
break;
err = 0;
break;
case SIOCGPPPVER:
vers = PPP_VERSION;
if (copy_to_user(addr, vers, strlen(vers) + 1))
break;
err = 0;
break;
default:
err = -EINVAL;
}
return err;
}
static void ppp_setup(struct net_device *dev)
{
dev->hard_header_len = PPP_HDRLEN;
dev->mtu = PPP_MTU;
dev->addr_len = 0;
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
}
/*
* Transmit-side routines.
*/
/*
* Called to do any work queued up on the transmit side
* that can now be done.
*/
static void
ppp_xmit_process(struct ppp *ppp)
{
struct sk_buff *skb;
ppp_xmit_lock(ppp);
if (!ppp->closing) {
ppp_push(ppp);
while (!ppp->xmit_pending
&& (skb = skb_dequeue(&ppp->file.xq)))
ppp_send_frame(ppp, skb);
/* If there's no work left to do, tell the core net
code that we can accept some more. */
if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
netif_wake_queue(ppp->dev);
}
ppp_xmit_unlock(ppp);
}
static inline struct sk_buff *
pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
{
struct sk_buff *new_skb;
int len;
int new_skb_size = ppp->dev->mtu +
ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
int compressor_skb_size = ppp->dev->mtu +
ppp->xcomp->comp_extra + PPP_HDRLEN;
new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
if (!new_skb) {
if (net_ratelimit())
printk(KERN_ERR "PPP: no memory (comp pkt)\n");
return NULL;
}
if (ppp->dev->hard_header_len > PPP_HDRLEN)
skb_reserve(new_skb,
ppp->dev->hard_header_len - PPP_HDRLEN);
/* compressor still expects A/C bytes in hdr */
len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
new_skb->data, skb->len + 2,
compressor_skb_size);
if (len > 0 && (ppp->flags & SC_CCP_UP)) {
kfree_skb(skb);
skb = new_skb;
skb_put(skb, len);
skb_pull(skb, 2); /* pull off A/C bytes */
} else if (len == 0) {
/* didn't compress, or CCP not up yet */
kfree_skb(new_skb);
new_skb = skb;
} else {
/*
* (len < 0)
* MPPE requires that we do not send unencrypted
* frames. The compressor will return -1 if we
* should drop the frame. We cannot simply test
* the compress_proto because MPPE and MPPC share
* the same number.
*/
if (net_ratelimit())
printk(KERN_ERR "ppp: compressor dropped pkt\n");
kfree_skb(skb);
kfree_skb(new_skb);
new_skb = NULL;
}
return new_skb;
}
/*
* Compress and send a frame.
* The caller should have locked the xmit path,
* and xmit_pending should be 0.
*/
static void
ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
{
int proto = PPP_PROTO(skb);
struct sk_buff *new_skb;
int len;
unsigned char *cp;
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
/* check if we should pass this packet */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
*skb_push(skb, 2) = 1;
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
if (ppp->debug & 1)
printk(KERN_DEBUG "PPP: outbound frame not passed\n");
kfree_skb(skb);
return;
}
/* if this packet passes the active filter, record the time */
if (!(ppp->active_filter
&& sk_run_filter(skb, ppp->active_filter,
ppp->active_len) == 0))
ppp->last_xmit = jiffies;
skb_pull(skb, 2);
#else
/* for data packets, record the time */
ppp->last_xmit = jiffies;
#endif /* CONFIG_PPP_FILTER */
}
++ppp->dev->stats.tx_packets;
ppp->dev->stats.tx_bytes += skb->len - 2;
switch (proto) {
case PPP_IP:
if (!ppp->vj || (ppp->flags & SC_COMP_TCP) == 0)
break;
/* try to do VJ TCP header compression */
new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
GFP_ATOMIC);
if (!new_skb) {
printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
goto drop;
}
skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
cp = skb->data + 2;
len = slhc_compress(ppp->vj, cp, skb->len - 2,
new_skb->data + 2, &cp,
!(ppp->flags & SC_NO_TCP_CCID));
if (cp == skb->data + 2) {
/* didn't compress */
kfree_skb(new_skb);
} else {
if (cp[0] & SL_TYPE_COMPRESSED_TCP) {
proto = PPP_VJC_COMP;
cp[0] &= ~SL_TYPE_COMPRESSED_TCP;
} else {
proto = PPP_VJC_UNCOMP;
cp[0] = skb->data[2];
}
kfree_skb(skb);
skb = new_skb;
cp = skb_put(skb, len + 2);
cp[0] = 0;
cp[1] = proto;
}
break;
case PPP_CCP:
/* peek at outbound CCP frames */
ppp_ccp_peek(ppp, skb, 0);
break;
}
/* try to do packet compression */
if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state
&& proto != PPP_LCP && proto != PPP_CCP) {
if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
if (net_ratelimit())
printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
goto drop;
}
skb = pad_compress_skb(ppp, skb);
if (!skb)
goto drop;
}
/*
* If we are waiting for traffic (demand dialling),
* queue it up for pppd to receive.
*/
if (ppp->flags & SC_LOOP_TRAFFIC) {
if (ppp->file.rq.qlen > PPP_MAX_RQLEN)
goto drop;
skb_queue_tail(&ppp->file.rq, skb);
wake_up_interruptible(&ppp->file.rwait);
return;
}
ppp->xmit_pending = skb;
ppp_push(ppp);
return;
drop:
if (skb)
kfree_skb(skb);
++ppp->dev->stats.tx_errors;
}
/*
* Try to send the frame in xmit_pending.
* The caller should have the xmit path locked.
*/
static void
ppp_push(struct ppp *ppp)
{
struct list_head *list;
struct channel *pch;
struct sk_buff *skb = ppp->xmit_pending;
if (!skb)
return;
list = &ppp->channels;
if (list_empty(list)) {
/* nowhere to send the packet, just drop it */
ppp->xmit_pending = NULL;
kfree_skb(skb);
return;
}
if ((ppp->flags & SC_MULTILINK) == 0) {
/* not doing multilink: send it down the first channel */
list = list->next;
pch = list_entry(list, struct channel, clist);
spin_lock_bh(&pch->downl);
if (pch->chan) {
if (pch->chan->ops->start_xmit(pch->chan, skb))
ppp->xmit_pending = NULL;
} else {
/* channel got unregistered */
kfree_skb(skb);
ppp->xmit_pending = NULL;
}
spin_unlock_bh(&pch->downl);
return;
}
#ifdef CONFIG_PPP_MULTILINK
/* Multilink: fragment the packet over as many links
as can take the packet at the moment. */
if (!ppp_mp_explode(ppp, skb))
return;
#endif /* CONFIG_PPP_MULTILINK */
ppp->xmit_pending = NULL;
kfree_skb(skb);
}
#ifdef CONFIG_PPP_MULTILINK
/*
* Divide a packet to be transmitted into fragments and
* send them out the individual links.
*/
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{
int len, fragsize;
int i, bits, hdrlen, mtu;
int flen;
int navail, nfree;
int nbigger;
unsigned char *p, *q;
struct list_head *list;
struct channel *pch;
struct sk_buff *frag;
struct ppp_channel *chan;
nfree = 0; /* # channels which have no packet already queued */
navail = 0; /* total # of usable channels (not deregistered) */
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
i = 0;
list_for_each_entry(pch, &ppp->channels, clist) {
navail += pch->avail = (pch->chan != NULL);
if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
!pch->had_frag) {
pch->avail = 2;
++nfree;
}
if (!pch->had_frag && i < ppp->nxchan)
ppp->nxchan = i;
}
++i;
}
/*
* Don't start sending this packet unless at least half of
* the channels are free. This gives much better TCP
* performance if we have a lot of channels.
*/
if (nfree == 0 || nfree < navail / 2)
return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression (XXX this should be optional) */
p = skb->data;
len = skb->len;
if (*p == 0) {
++p;
--len;
}
/*
* Decide on fragment size.
* We create a fragment for each free channel regardless of
* how small they are (i.e. even 0 length) in order to minimize
* the time that it will take to detect when a channel drops
* a fragment.
*/
fragsize = len;
if (nfree > 1)
fragsize = DIV_ROUND_UP(fragsize, nfree);
/* nbigger channels get fragsize bytes, the rest get fragsize-1,
except if nbigger==0, then they all get fragsize. */
nbigger = len % nfree;
/* skip to the channel after the one we last used
and start at that one */
list = &ppp->channels;
for (i = 0; i < ppp->nxchan; ++i) {
list = list->next;
if (list == &ppp->channels) {
i = 0;
break;
}
}
/* create a fragment for each channel */
bits = B;
while (nfree > 0 || len > 0) {
list = list->next;
if (list == &ppp->channels) {
i = 0;
continue;
}
pch = list_entry(list, struct channel, clist);
++i;
if (!pch->avail)
continue;
/*
* Skip this channel if it has a fragment pending already and
* we haven't given a fragment to all of the free channels.
*/
if (pch->avail == 1) {
if (nfree > 0)
continue;
} else {
--nfree;
pch->avail = 1;
}
/* check the channel's mtu and whether it is still attached. */
spin_lock_bh(&pch->downl);
if (pch->chan == NULL) {
/* can't use this channel, it's being deregistered */
spin_unlock_bh(&pch->downl);
pch->avail = 0;
if (--navail == 0)
break;
continue;
}
/*
* Create a fragment for this channel of
* min(max(mtu+2-hdrlen, 4), fragsize, len) bytes.
* If mtu+2-hdrlen < 4, that is a ridiculously small
* MTU, so we use mtu = 2 + hdrlen.
*/
if (fragsize > len)
fragsize = len;
flen = fragsize;
mtu = pch->chan->mtu + 2 - hdrlen;
if (mtu < 4)
mtu = 4;
if (flen > mtu)
flen = mtu;
if (flen == len && nfree == 0)
bits |= E;
frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
if (!frag)
goto noskb;
q = skb_put(frag, flen + hdrlen);
/* make the MP header */
q[0] = PPP_MP >> 8;
q[1] = PPP_MP;
if (ppp->flags & SC_MP_XSHORTSEQ) {
q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
} else {
q[2] = bits;
q[3] = ppp->nxseq >> 16;
q[4] = ppp->nxseq >> 8;
q[5] = ppp->nxseq;
}
/*
* Copy the data in.
* Unfortunately there is a bug in older versions of
* the Linux PPP multilink reconstruction code where it
* drops 0-length fragments. Therefore we make sure the
* fragment has at least one byte of data. Any bytes
* we add in this situation will end up as padding on the
* end of the reconstructed packet.
*/
if (flen == 0)
*skb_put(frag, 1) = 0;
else
memcpy(q + hdrlen, p, flen);
/* try to send it down the channel */
chan = pch->chan;
if (!skb_queue_empty(&pch->file.xq) ||
!chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag);
pch->had_frag = 1;
p += flen;
len -= flen;
++ppp->nxseq;
bits = 0;
spin_unlock_bh(&pch->downl);
if (--nbigger == 0 && fragsize > 0)
--fragsize;
}
ppp->nxchan = i;
return 1;
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
printk(KERN_ERR "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
}
#endif /* CONFIG_PPP_MULTILINK */
/*
* Try to send data out on a channel.
*/
static void
ppp_channel_push(struct channel *pch)
{
struct sk_buff *skb;
struct ppp *ppp;
spin_lock_bh(&pch->downl);
if (pch->chan) {
while (!skb_queue_empty(&pch->file.xq)) {
skb = skb_dequeue(&pch->file.xq);
if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
/* put the packet back and try again later */
skb_queue_head(&pch->file.xq, skb);
break;
}
}
} else {
/* channel got deregistered */
skb_queue_purge(&pch->file.xq);
}
spin_unlock_bh(&pch->downl);
/* see if there is anything from the attached unit to be sent */
if (skb_queue_empty(&pch->file.xq)) {
read_lock_bh(&pch->upl);
ppp = pch->ppp;
if (ppp)
ppp_xmit_process(ppp);
read_unlock_bh(&pch->upl);
}
}
/*
* Receive-side routines.
*/
/* misuse a few fields of the skb for MP reconstruction */
#define sequence priority
#define BEbits cb[0]
static inline void
ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
ppp_recv_lock(ppp);
if (!ppp->closing)
ppp_receive_frame(ppp, skb, pch);
else
kfree_skb(skb);
ppp_recv_unlock(ppp);
}
void
ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
{
struct channel *pch = chan->ppp;
int proto;
if (!pch || skb->len == 0) {
kfree_skb(skb);
return;
}
proto = PPP_PROTO(skb);
read_lock_bh(&pch->upl);
if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
/* put it on the channel queue */
skb_queue_tail(&pch->file.rq, skb);
/* drop old frames if queue too long */
while (pch->file.rq.qlen > PPP_MAX_RQLEN
&& (skb = skb_dequeue(&pch->file.rq)))
kfree_skb(skb);
wake_up_interruptible(&pch->file.rwait);
} else {
ppp_do_recv(pch->ppp, skb, pch);
}
read_unlock_bh(&pch->upl);
}
/* Put a 0-length skb in the receive queue as an error indication */
void
ppp_input_error(struct ppp_channel *chan, int code)
{
struct channel *pch = chan->ppp;
struct sk_buff *skb;
if (!pch)
return;
read_lock_bh(&pch->upl);
if (pch->ppp) {
skb = alloc_skb(0, GFP_ATOMIC);
if (skb) {
skb->len = 0; /* probably unnecessary */
skb->cb[0] = code;
ppp_do_recv(pch->ppp, skb, pch);
}
}
read_unlock_bh(&pch->upl);
}
/*
* We come in here to process a received frame.
* The receive side of the ppp unit is locked.
*/
static void
ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
if (pskb_may_pull(skb, 2)) {
#ifdef CONFIG_PPP_MULTILINK
/* XXX do channel-level decompression here */
if (PPP_PROTO(skb) == PPP_MP)
ppp_receive_mp_frame(ppp, skb, pch);
else
#endif /* CONFIG_PPP_MULTILINK */
ppp_receive_nonmp_frame(ppp, skb);
return;
}
if (skb->len > 0)
/* note: a 0-length skb is used as an error indication */
++ppp->dev->stats.rx_length_errors;
kfree_skb(skb);
ppp_receive_error(ppp);
}
static void
ppp_receive_error(struct ppp *ppp)
{
++ppp->dev->stats.rx_errors;
if (ppp->vj)
slhc_toss(ppp->vj);
}
static void
ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
{
struct sk_buff *ns;
int proto, len, npi;
/*
* Decompress the frame, if compressed.
* Note that some decompressors need to see uncompressed frames
* that come in as well as compressed frames.
*/
if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)
&& (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
skb = ppp_decompress_frame(ppp, skb);
if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
goto err;
proto = PPP_PROTO(skb);
switch (proto) {
case PPP_VJC_COMP:
/* decompress VJ compressed packets */
if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
goto err;
if (skb_tailroom(skb) < 124 || skb_cloned(skb)) {
/* copy to a new sk_buff with more tailroom */
ns = dev_alloc_skb(skb->len + 128);
if (!ns) {
printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
goto err;
}
skb_reserve(ns, 2);
skb_copy_bits(skb, 0, skb_put(ns, skb->len), skb->len);
kfree_skb(skb);
skb = ns;
}
else
skb->ip_summed = CHECKSUM_NONE;
len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
if (len <= 0) {
printk(KERN_DEBUG "PPP: VJ decompression error\n");
goto err;
}
len += 2;
if (len > skb->len)
skb_put(skb, len - skb->len);
else if (len < skb->len)
skb_trim(skb, len);
proto = PPP_IP;
break;
case PPP_VJC_UNCOMP:
if (!ppp->vj || (ppp->flags & SC_REJ_COMP_TCP))
goto err;
/* Until we fix the decompressor need to make sure
* data portion is linear.
*/
if (!pskb_may_pull(skb, skb->len))
goto err;
if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
printk(KERN_ERR "PPP: VJ uncompressed error\n");
goto err;
}
proto = PPP_IP;
break;
case PPP_CCP:
ppp_ccp_peek(ppp, skb, 1);
break;
}
++ppp->dev->stats.rx_packets;
ppp->dev->stats.rx_bytes += skb->len - 2;
npi = proto_to_npindex(proto);
if (npi < 0) {
/* control or unknown frame - pass it to pppd */
skb_queue_tail(&ppp->file.rq, skb);
/* limit queue length by dropping old frames */
while (ppp->file.rq.qlen > PPP_MAX_RQLEN
&& (skb = skb_dequeue(&ppp->file.rq)))
kfree_skb(skb);
/* wake up any process polling or blocking on read */
wake_up_interruptible(&ppp->file.rwait);
} else {
/* network protocol frame - give it to the kernel */
#ifdef CONFIG_PPP_FILTER
/* check if the packet passes the pass and active filters */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
if (skb_cloned(skb) &&
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
goto err;
*skb_push(skb, 2) = 0;
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
if (ppp->debug & 1)
printk(KERN_DEBUG "PPP: inbound frame "
"not passed\n");
kfree_skb(skb);
return;
}
if (!(ppp->active_filter
&& sk_run_filter(skb, ppp->active_filter,
ppp->active_len) == 0))
ppp->last_recv = jiffies;
__skb_pull(skb, 2);
} else
#endif /* CONFIG_PPP_FILTER */
ppp->last_recv = jiffies;
if ((ppp->dev->flags & IFF_UP) == 0
|| ppp->npmode[npi] != NPMODE_PASS) {
kfree_skb(skb);
} else {
/* chop off protocol */
skb_pull_rcsum(skb, 2);
skb->dev = ppp->dev;
skb->protocol = htons(npindex_to_ethertype[npi]);
skb_reset_mac_header(skb);
netif_rx(skb);
ppp->dev->last_rx = jiffies;
}
}
return;
err:
kfree_skb(skb);
ppp_receive_error(ppp);
}
static struct sk_buff *
ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
{
int proto = PPP_PROTO(skb);
struct sk_buff *ns;
int len;
/* Until we fix all the decompressor's need to make sure
* data portion is linear.
*/
if (!pskb_may_pull(skb, skb->len))
goto err;
if (proto == PPP_COMP) {
int obuff_size;
switch(ppp->rcomp->compress_proto) {
case CI_MPPE:
obuff_size = ppp->mru + PPP_HDRLEN + 1;
break;
default:
obuff_size = ppp->mru + PPP_HDRLEN;
break;
}
ns = dev_alloc_skb(obuff_size);
if (!ns) {
printk(KERN_ERR "ppp_decompress_frame: no memory\n");
goto err;
}
/* the decompressor still expects the A/C bytes in the hdr */
len = ppp->rcomp->decompress(ppp->rc_state, skb->data - 2,
skb->len + 2, ns->data, obuff_size);
if (len < 0) {
/* Pass the compressed frame to pppd as an
error indication. */
if (len == DECOMP_FATALERROR)
ppp->rstate |= SC_DC_FERROR;
kfree_skb(ns);
goto err;
}
kfree_skb(skb);
skb = ns;
skb_put(skb, len);
skb_pull(skb, 2); /* pull off the A/C bytes */
} else {
/* Uncompressed frame - pass to decompressor so it
can update its dictionary if necessary. */
if (ppp->rcomp->incomp)
ppp->rcomp->incomp(ppp->rc_state, skb->data - 2,
skb->len + 2);
}
return skb;
err:
ppp->rstate |= SC_DC_ERROR;
ppp_receive_error(ppp);
return skb;
}
#ifdef CONFIG_PPP_MULTILINK
/*
* Receive a multilink frame.
* We put it on the reconstruction queue and then pull off
* as many completed frames as we can.
*/
static void
ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
u32 mask, seq;
struct channel *ch;
int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
goto err; /* no good, throw it away */
/* Decode sequence number and begin/end bits */
if (ppp->flags & SC_MP_SHORTSEQ) {
seq = ((skb->data[2] & 0x0f) << 8) | skb->data[3];
mask = 0xfff;
} else {
seq = (skb->data[3] << 16) | (skb->data[4] << 8)| skb->data[5];
mask = 0xffffff;
}
skb->BEbits = skb->data[2];
skb_pull(skb, mphdrlen); /* pull off PPP and MP headers */
/*
* Do protocol ID decompression on the first fragment of each packet.
*/
if ((skb->BEbits & B) && (skb->data[0] & 1))
*skb_push(skb, 1) = 0;
/*
* Expand sequence number to 32 bits, making it as close
* as possible to ppp->minseq.
*/
seq |= ppp->minseq & ~mask;
if ((int)(ppp->minseq - seq) > (int)(mask >> 1))
seq += mask + 1;
else if ((int)(seq - ppp->minseq) > (int)(mask >> 1))
seq -= mask + 1; /* should never happen */
skb->sequence = seq;
pch->lastseq = seq;
/*
* If this packet comes before the next one we were expecting,
* drop it.
*/
if (seq_before(seq, ppp->nextseq)) {
kfree_skb(skb);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
return;
}
/*
* Reevaluate minseq, the minimum over all channels of the
* last sequence number received on each channel. Because of
* the increasing sequence number rule, we know that any fragment
* before `minseq' which hasn't arrived is never going to arrive.
* The list of channels can't change because we have the receive
* side of the ppp unit locked.
*/
list_for_each_entry(ch, &ppp->channels, clist) {
if (seq_before(ch->lastseq, seq))
seq = ch->lastseq;
}
if (seq_before(ppp->minseq, seq))
ppp->minseq = seq;
/* Put the fragment on the reconstruction queue */
ppp_mp_insert(ppp, skb);
/* If the queue is getting long, don't wait any longer for packets
before the start of the queue. */
if (skb_queue_len(&ppp->mrq) >= PPP_MP_MAX_QLEN) {
struct sk_buff *skb = skb_peek(&ppp->mrq);
if (seq_before(ppp->minseq, skb->sequence))
ppp->minseq = skb->sequence;
}
/* Pull completed packets off the queue and receive them. */
while ((skb = ppp_mp_reconstruct(ppp)))
ppp_receive_nonmp_frame(ppp, skb);
return;
err:
kfree_skb(skb);
ppp_receive_error(ppp);
}
/*
* Insert a fragment on the MP reconstruction queue.
* The queue is ordered by increasing sequence number.
*/
static void
ppp_mp_insert(struct ppp *ppp, struct sk_buff *skb)
{
struct sk_buff *p;
struct sk_buff_head *list = &ppp->mrq;
u32 seq = skb->sequence;
/* N.B. we don't need to lock the list lock because we have the
ppp unit receive-side lock. */
skb_queue_walk(list, p) {
if (seq_before(seq, p->sequence))
break;
}
__skb_queue_before(list, p, skb);
}
/*
* Reconstruct a packet from the MP fragment queue.
* We go through increasing sequence numbers until we find a
* complete packet, or we get to the sequence number for a fragment
* which hasn't arrived but might still do so.
*/
static struct sk_buff *
ppp_mp_reconstruct(struct ppp *ppp)
{
u32 seq = ppp->nextseq;
u32 minseq = ppp->minseq;
struct sk_buff_head *list = &ppp->mrq;
struct sk_buff *p, *next;
struct sk_buff *head, *tail;
struct sk_buff *skb = NULL;
int lost = 0, len = 0;
if (ppp->mrru == 0) /* do nothing until mrru is set */
return NULL;
head = list->next;
tail = NULL;
for (p = head; p != (struct sk_buff *) list; p = next) {
next = p->next;
if (seq_before(p->sequence, seq)) {
/* this can't happen, anyway ignore the skb */
printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
p->sequence, seq);
head = next;
continue;
}
if (p->sequence != seq) {
/* Fragment `seq' is missing. If it is after
minseq, it might arrive later, so stop here. */
if (seq_after(seq, minseq))
break;
/* Fragment `seq' is lost, keep going. */
lost = 1;
seq = seq_before(minseq, p->sequence)?
minseq + 1: p->sequence;
next = p;
continue;
}
/*
* At this point we know that all the fragments from
* ppp->nextseq to seq are either present or lost.
* Also, there are no complete packets in the queue
* that have no missing fragments and end before this
* fragment.
*/
/* B bit set indicates this fragment starts a packet */
if (p->BEbits & B) {
head = p;
lost = 0;
len = 0;
}
len += p->len;
/* Got a complete packet yet? */
if (lost == 0 && (p->BEbits & E) && (head->BEbits & B)) {
if (len > ppp->mrru + 2) {
++ppp->dev->stats.rx_length_errors;
printk(KERN_DEBUG "PPP: reconstructed packet"
" is too long (%d)\n", len);
} else if (p == head) {
/* fragment is complete packet - reuse skb */
tail = p;
skb = skb_get(p);
break;
} else if ((skb = dev_alloc_skb(len)) == NULL) {
++ppp->dev->stats.rx_missed_errors;
printk(KERN_DEBUG "PPP: no memory for "
"reconstructed packet");
} else {
tail = p;
break;
}
ppp->nextseq = seq + 1;
}
/*
* If this is the ending fragment of a packet,
* and we haven't found a complete valid packet yet,
* we can discard up to and including this fragment.
*/
if (p->BEbits & E)
head = next;
++seq;
}
/* If we have a complete packet, copy it all into one skb. */
if (tail != NULL) {
/* If we have discarded any fragments,
signal a receive error. */
if (head->sequence != ppp->nextseq) {
if (ppp->debug & 1)
printk(KERN_DEBUG " missed pkts %u..%u\n",
ppp->nextseq, head->sequence-1);
++ppp->dev->stats.rx_dropped;
ppp_receive_error(ppp);
}
if (head != tail)
/* copy to a single skb */
for (p = head; p != tail->next; p = p->next)
skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
ppp->nextseq = tail->sequence + 1;
head = tail->next;
}
/* Discard all the skbuffs that we have copied the data out of
or that we can't use. */
while ((p = list->next) != head) {
__skb_unlink(p, list);
kfree_skb(p);
}
return skb;
}
#endif /* CONFIG_PPP_MULTILINK */
/*
* Channel interface.
*/
/*
* Create a new, unattached ppp channel.
*/
int
ppp_register_channel(struct ppp_channel *chan)
{
struct channel *pch;
pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (!pch)
return -ENOMEM;
pch->ppp = NULL;
pch->chan = chan;
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
#ifdef CONFIG_PPP_MULTILINK
pch->lastseq = -1;
#endif /* CONFIG_PPP_MULTILINK */
init_rwsem(&pch->chan_sem);
spin_lock_init(&pch->downl);
rwlock_init(&pch->upl);
spin_lock_bh(&all_channels_lock);
pch->file.index = ++last_channel_index;
list_add(&pch->list, &new_channels);
atomic_inc(&channel_count);
spin_unlock_bh(&all_channels_lock);
return 0;
}
/*
* Return the index of a channel.
*/
int ppp_channel_index(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
if (pch)
return pch->file.index;
return -1;
}
/*
* Return the PPP unit number to which a channel is connected.
*/
int ppp_unit_number(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
int unit = -1;
if (pch) {
read_lock_bh(&pch->upl);
if (pch->ppp)
unit = pch->ppp->file.index;
read_unlock_bh(&pch->upl);
}
return unit;
}
/*
* Disconnect a channel from the generic layer.
* This must be called in process context.
*/
void
ppp_unregister_channel(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
if (!pch)
return; /* should never happen */
chan->ppp = NULL;
/*
* This ensures that we have returned from any calls into the
* the channel's start_xmit or ioctl routine before we proceed.
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
pch->chan = NULL;
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
spin_lock_bh(&all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&all_channels_lock);
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
if (atomic_dec_and_test(&pch->file.refcnt))
ppp_destroy_channel(pch);
}
/*
* Callback from a channel when it can accept more to transmit.
* This should be called at BH/softirq level, not interrupt level.
*/
void
ppp_output_wakeup(struct ppp_channel *chan)
{
struct channel *pch = chan->ppp;
if (!pch)
return;
ppp_channel_push(pch);
}
/*
* Compression control.
*/
/* Process the PPPIOCSCOMPRESS ioctl. */
static int
ppp_set_compress(struct ppp *ppp, unsigned long arg)
{
int err;
struct compressor *cp, *ocomp;
struct ppp_option_data data;
void *state, *ostate;
unsigned char ccp_option[CCP_MAX_OPTION_LENGTH];
err = -EFAULT;
if (copy_from_user(&data, (void __user *) arg, sizeof(data))
|| (data.length <= CCP_MAX_OPTION_LENGTH
&& copy_from_user(ccp_option, (void __user *) data.ptr, data.length)))
goto out;
err = -EINVAL;
if (data.length > CCP_MAX_OPTION_LENGTH
|| ccp_option[1] < 2 || ccp_option[1] > data.length)
goto out;
cp = try_then_request_module(
find_compressor(ccp_option[0]),
"ppp-compress-%d", ccp_option[0]);
if (!cp)
goto out;
err = -ENOBUFS;
if (data.transmit) {
state = cp->comp_alloc(ccp_option, data.length);
if (state) {
ppp_xmit_lock(ppp);
ppp->xstate &= ~SC_COMP_RUN;
ocomp = ppp->xcomp;
ostate = ppp->xc_state;
ppp->xcomp = cp;
ppp->xc_state = state;
ppp_xmit_unlock(ppp);
if (ostate) {
ocomp->comp_free(ostate);
module_put(ocomp->owner);
}
err = 0;
} else
module_put(cp->owner);
} else {
state = cp->decomp_alloc(ccp_option, data.length);
if (state) {
ppp_recv_lock(ppp);
ppp->rstate &= ~SC_DECOMP_RUN;
ocomp = ppp->rcomp;
ostate = ppp->rc_state;
ppp->rcomp = cp;
ppp->rc_state = state;
ppp_recv_unlock(ppp);
if (ostate) {
ocomp->decomp_free(ostate);
module_put(ocomp->owner);
}
err = 0;
} else
module_put(cp->owner);
}
out:
return err;
}
/*
* Look at a CCP packet and update our state accordingly.
* We assume the caller has the xmit or recv path locked.
*/
static void
ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
{
unsigned char *dp;
int len;
if (!pskb_may_pull(skb, CCP_HDRLEN + 2))
return; /* no header */
dp = skb->data + 2;
switch (CCP_CODE(dp)) {
case CCP_CONFREQ:
/* A ConfReq starts negotiation of compression
* in one direction of transmission,
* and hence brings it down...but which way?
*
* Remember:
* A ConfReq indicates what the sender would like to receive
*/
if(inbound)
/* He is proposing what I should send */
ppp->xstate &= ~SC_COMP_RUN;
else
/* I am proposing to what he should send */
ppp->rstate &= ~SC_DECOMP_RUN;
break;
case CCP_TERMREQ:
case CCP_TERMACK:
/*
* CCP is going down, both directions of transmission
*/
ppp->rstate &= ~SC_DECOMP_RUN;
ppp->xstate &= ~SC_COMP_RUN;
break;
case CCP_CONFACK:
if ((ppp->flags & (SC_CCP_OPEN | SC_CCP_UP)) != SC_CCP_OPEN)
break;
len = CCP_LENGTH(dp);
if (!pskb_may_pull(skb, len + 2))
return; /* too short */
dp += CCP_HDRLEN;
len -= CCP_HDRLEN;
if (len < CCP_OPT_MINLEN || len < CCP_OPT_LENGTH(dp))
break;
if (inbound) {
/* we will start receiving compressed packets */
if (!ppp->rc_state)
break;
if (ppp->rcomp->decomp_init(ppp->rc_state, dp, len,
ppp->file.index, 0, ppp->mru, ppp->debug)) {
ppp->rstate |= SC_DECOMP_RUN;
ppp->rstate &= ~(SC_DC_ERROR | SC_DC_FERROR);
}
} else {
/* we will soon start sending compressed packets */
if (!ppp->xc_state)
break;
if (ppp->xcomp->comp_init(ppp->xc_state, dp, len,
ppp->file.index, 0, ppp->debug))
ppp->xstate |= SC_COMP_RUN;
}
break;
case CCP_RESETACK:
/* reset the [de]compressor */
if ((ppp->flags & SC_CCP_UP) == 0)
break;
if (inbound) {
if (ppp->rc_state && (ppp->rstate & SC_DECOMP_RUN)) {
ppp->rcomp->decomp_reset(ppp->rc_state);
ppp->rstate &= ~SC_DC_ERROR;
}
} else {
if (ppp->xc_state && (ppp->xstate & SC_COMP_RUN))
ppp->xcomp->comp_reset(ppp->xc_state);
}
break;
}
}
/* Free up compression resources. */
static void
ppp_ccp_closed(struct ppp *ppp)
{
void *xstate, *rstate;
struct compressor *xcomp, *rcomp;
ppp_lock(ppp);
ppp->flags &= ~(SC_CCP_OPEN | SC_CCP_UP);
ppp->xstate = 0;
xcomp = ppp->xcomp;
xstate = ppp->xc_state;
ppp->xc_state = NULL;
ppp->rstate = 0;
rcomp = ppp->rcomp;
rstate = ppp->rc_state;
ppp->rc_state = NULL;
ppp_unlock(ppp);
if (xstate) {
xcomp->comp_free(xstate);
module_put(xcomp->owner);
}
if (rstate) {
rcomp->decomp_free(rstate);
module_put(rcomp->owner);
}
}
/* List of compressors. */
static LIST_HEAD(compressor_list);
static DEFINE_SPINLOCK(compressor_list_lock);
struct compressor_entry {
struct list_head list;
struct compressor *comp;
};
static struct compressor_entry *
find_comp_entry(int proto)
{
struct compressor_entry *ce;
list_for_each_entry(ce, &compressor_list, list) {
if (ce->comp->compress_proto == proto)
return ce;
}
return NULL;
}
/* Register a compressor */
int
ppp_register_compressor(struct compressor *cp)
{
struct compressor_entry *ce;
int ret;
spin_lock(&compressor_list_lock);
ret = -EEXIST;
if (find_comp_entry(cp->compress_proto))
goto out;
ret = -ENOMEM;
ce = kmalloc(sizeof(struct compressor_entry), GFP_ATOMIC);
if (!ce)
goto out;
ret = 0;
ce->comp = cp;
list_add(&ce->list, &compressor_list);
out:
spin_unlock(&compressor_list_lock);
return ret;
}
/* Unregister a compressor */
void
ppp_unregister_compressor(struct compressor *cp)
{
struct compressor_entry *ce;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(cp->compress_proto);
if (ce && ce->comp == cp) {
list_del(&ce->list);
kfree(ce);
}
spin_unlock(&compressor_list_lock);
}
/* Find a compressor. */
static struct compressor *
find_compressor(int type)
{
struct compressor_entry *ce;
struct compressor *cp = NULL;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(type);
if (ce) {
cp = ce->comp;
if (!try_module_get(cp->owner))
cp = NULL;
}
spin_unlock(&compressor_list_lock);
return cp;
}
/*
* Miscelleneous stuff.
*/
static void
ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
{
struct slcompress *vj = ppp->vj;
memset(st, 0, sizeof(*st));
st->p.ppp_ipackets = ppp->dev->stats.rx_packets;
st->p.ppp_ierrors = ppp->dev->stats.rx_errors;
st->p.ppp_ibytes = ppp->dev->stats.rx_bytes;
st->p.ppp_opackets = ppp->dev->stats.tx_packets;
st->p.ppp_oerrors = ppp->dev->stats.tx_errors;
st->p.ppp_obytes = ppp->dev->stats.tx_bytes;
if (!vj)
return;
st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed;
st->vj.vjs_compressed = vj->sls_o_compressed;
st->vj.vjs_searches = vj->sls_o_searches;
st->vj.vjs_misses = vj->sls_o_misses;
st->vj.vjs_errorin = vj->sls_i_error;
st->vj.vjs_tossed = vj->sls_i_tossed;
st->vj.vjs_uncompressedin = vj->sls_i_uncompressed;
st->vj.vjs_compressedin = vj->sls_i_compressed;
}
/*
* Stuff for handling the lists of ppp units and channels
* and for initialization.
*/
/*
* Create a new ppp interface unit. Fails if it can't allocate memory
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
static struct ppp *
ppp_create_interface(int unit, int *retp)
{
struct ppp *ppp;
struct net_device *dev = NULL;
int ret = -ENOMEM;
int i;
ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
if (!ppp)
goto out;
dev = alloc_netdev(0, "", ppp_setup);
if (!dev)
goto out1;
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
for (i = 0; i < NUM_NP; ++i)
ppp->npmode[i] = NPMODE_PASS;
INIT_LIST_HEAD(&ppp->channels);
spin_lock_init(&ppp->rlock);
spin_lock_init(&ppp->wlock);
#ifdef CONFIG_PPP_MULTILINK
ppp->minseq = -1;
skb_queue_head_init(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
ppp->dev = dev;
dev->priv = ppp;
dev->hard_start_xmit = ppp_start_xmit;
dev->do_ioctl = ppp_net_ioctl;
ret = -EEXIST;
mutex_lock(&all_ppp_mutex);
if (unit < 0)
unit = cardmap_find_first_free(all_ppp_units);
else if (cardmap_get(all_ppp_units, unit) != NULL)
goto out2; /* unit already exists */
/* Initialize the new ppp unit */
ppp->file.index = unit;
sprintf(dev->name, "ppp%d", unit);
ret = register_netdev(dev);
if (ret != 0) {
printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
dev->name, ret);
goto out2;
}
atomic_inc(&ppp_unit_count);
ret = cardmap_set(&all_ppp_units, unit, ppp);
if (ret != 0)
goto out3;
mutex_unlock(&all_ppp_mutex);
*retp = 0;
return ppp;
out3:
atomic_dec(&ppp_unit_count);
unregister_netdev(dev);
out2:
mutex_unlock(&all_ppp_mutex);
free_netdev(dev);
out1:
kfree(ppp);
out:
*retp = ret;
return NULL;
}
/*
* Initialize a ppp_file structure.
*/
static void
init_ppp_file(struct ppp_file *pf, int kind)
{
pf->kind = kind;
skb_queue_head_init(&pf->xq);
skb_queue_head_init(&pf->rq);
atomic_set(&pf->refcnt, 1);
init_waitqueue_head(&pf->rwait);
}
/*
* Take down a ppp interface unit - called when the owning file
* (the one that created the unit) is closed or detached.
*/
static void ppp_shutdown_interface(struct ppp *ppp)
{
mutex_lock(&all_ppp_mutex);
/* This will call dev_close() for us. */
ppp_lock(ppp);
if (!ppp->closing) {
ppp->closing = 1;
ppp_unlock(ppp);
unregister_netdev(ppp->dev);
} else
ppp_unlock(ppp);
cardmap_set(&all_ppp_units, ppp->file.index, NULL);
ppp->file.dead = 1;
ppp->owner = NULL;
wake_up_interruptible(&ppp->file.rwait);
mutex_unlock(&all_ppp_mutex);
}
/*
* Free the memory used by a ppp unit. This is only called once
* there are no channels connected to the unit and no file structs
* that reference the unit.
*/
static void ppp_destroy_interface(struct ppp *ppp)
{
atomic_dec(&ppp_unit_count);
if (!ppp->file.dead || ppp->n_channels) {
/* "can't happen" */
printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
"n_channels=%d !\n", ppp, ppp->file.dead,
ppp->n_channels);
return;
}
ppp_ccp_closed(ppp);
if (ppp->vj) {
slhc_free(ppp->vj);
ppp->vj = NULL;
}
skb_queue_purge(&ppp->file.xq);
skb_queue_purge(&ppp->file.rq);
#ifdef CONFIG_PPP_MULTILINK
skb_queue_purge(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
kfree(ppp->pass_filter);
ppp->pass_filter = NULL;
kfree(ppp->active_filter);
ppp->active_filter = NULL;
#endif /* CONFIG_PPP_FILTER */
if (ppp->xmit_pending)
kfree_skb(ppp->xmit_pending);
free_netdev(ppp->dev);
}
/*
* Locate an existing ppp unit.
* The caller should have locked the all_ppp_mutex.
*/
static struct ppp *
ppp_find_unit(int unit)
{
return cardmap_get(all_ppp_units, unit);
}
/*
* Locate an existing ppp channel.
* The caller should have locked the all_channels_lock.
* First we look in the new_channels list, then in the
* all_channels list. If found in the new_channels list,
* we move it to the all_channels list. This is for speed
* when we have a lot of channels in use.
*/
static struct channel *
ppp_find_channel(int unit)
{
struct channel *pch;
list_for_each_entry(pch, &new_channels, list) {
if (pch->file.index == unit) {
list_move(&pch->list, &all_channels);
return pch;
}
}
list_for_each_entry(pch, &all_channels, list) {
if (pch->file.index == unit)
return pch;
}
return NULL;
}
/*
* Connect a PPP channel to a PPP interface unit.
*/
static int
ppp_connect_channel(struct channel *pch, int unit)
{
struct ppp *ppp;
int ret = -ENXIO;
int hdrlen;
mutex_lock(&all_ppp_mutex);
ppp = ppp_find_unit(unit);
if (!ppp)
goto out;
write_lock_bh(&pch->upl);
ret = -EINVAL;
if (pch->ppp)
goto outl;
ppp_lock(ppp);
if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen;
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
if (hdrlen > ppp->dev->hard_header_len)
ppp->dev->hard_header_len = hdrlen;
list_add_tail(&pch->clist, &ppp->channels);
++ppp->n_channels;
pch->ppp = ppp;
atomic_inc(&ppp->file.refcnt);
ppp_unlock(ppp);
ret = 0;
outl:
write_unlock_bh(&pch->upl);
out:
mutex_unlock(&all_ppp_mutex);
return ret;
}
/*
* Disconnect a channel from its ppp unit.
*/
static int
ppp_disconnect_channel(struct channel *pch)
{
struct ppp *ppp;
int err = -EINVAL;
write_lock_bh(&pch->upl);
ppp = pch->ppp;
pch->ppp = NULL;
write_unlock_bh(&pch->upl);
if (ppp) {
/* remove it from the ppp unit's list */
ppp_lock(ppp);
list_del(&pch->clist);
if (--ppp->n_channels == 0)
wake_up_interruptible(&ppp->file.rwait);
ppp_unlock(ppp);
if (atomic_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
err = 0;
}
return err;
}
/*
* Free up the resources used by a ppp channel.
*/
static void ppp_destroy_channel(struct channel *pch)
{
atomic_dec(&channel_count);
if (!pch->file.dead) {
/* "can't happen" */
printk(KERN_ERR "ppp: destroying undead channel %p !\n",
pch);
return;
}
skb_queue_purge(&pch->file.xq);
skb_queue_purge(&pch->file.rq);
kfree(pch);
}
static void __exit ppp_cleanup(void)
{
/* should never happen */
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
printk(KERN_ERR "PPP: removing module but units remain!\n");
cardmap_destroy(&all_ppp_units);
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
}
/*
* Cardmap implementation.
*/
static void *cardmap_get(struct cardmap *map, unsigned int nr)
{
struct cardmap *p;
int i;
for (p = map; p != NULL; ) {
if ((i = nr >> p->shift) >= CARDMAP_WIDTH)
return NULL;
if (p->shift == 0)
return p->ptr[i];
nr &= ~(CARDMAP_MASK << p->shift);
p = p->ptr[i];
}
return NULL;
}
static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
{
struct cardmap *p;
int i;
p = *pmap;
if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
do {
/* need a new top level */
struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
goto enomem;
np->ptr[0] = p;
if (p != NULL) {
np->shift = p->shift + CARDMAP_ORDER;
p->parent = np;
} else
np->shift = 0;
p = np;
} while ((nr >> p->shift) >= CARDMAP_WIDTH);
*pmap = p;
}
while (p->shift > 0) {
i = (nr >> p->shift) & CARDMAP_MASK;
if (p->ptr[i] == NULL) {
struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
goto enomem;
np->shift = p->shift - CARDMAP_ORDER;
np->parent = p;
p->ptr[i] = np;
}
if (ptr == NULL)
clear_bit(i, &p->inuse);
p = p->ptr[i];
}
i = nr & CARDMAP_MASK;
p->ptr[i] = ptr;
if (ptr != NULL)
set_bit(i, &p->inuse);
else
clear_bit(i, &p->inuse);
return 0;
enomem:
return -ENOMEM;
}
static unsigned int cardmap_find_first_free(struct cardmap *map)
{
struct cardmap *p;
unsigned int nr = 0;
int i;
if ((p = map) == NULL)
return 0;
for (;;) {
i = find_first_zero_bit(&p->inuse, CARDMAP_WIDTH);
if (i >= CARDMAP_WIDTH) {
if (p->parent == NULL)
return CARDMAP_WIDTH << p->shift;
p = p->parent;
i = (nr >> p->shift) & CARDMAP_MASK;
set_bit(i, &p->inuse);
continue;
}
nr = (nr & (~CARDMAP_MASK << p->shift)) | (i << p->shift);
if (p->shift == 0 || p->ptr[i] == NULL)
return nr;
p = p->ptr[i];
}
}
static void cardmap_destroy(struct cardmap **pmap)
{
struct cardmap *p, *np;
int i;
for (p = *pmap; p != NULL; p = np) {
if (p->shift != 0) {
for (i = 0; i < CARDMAP_WIDTH; ++i)
if (p->ptr[i] != NULL)
break;
if (i < CARDMAP_WIDTH) {
np = p->ptr[i];
p->ptr[i] = NULL;
continue;
}
}
np = p->parent;
kfree(p);
}
*pmap = NULL;
}
/* Module/initialization stuff */
module_init(ppp_init);
module_exit(ppp_cleanup);
EXPORT_SYMBOL(ppp_register_channel);
EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR);
MODULE_ALIAS("/dev/ppp");
| gpl-2.0 |
devmapal/linux | drivers/block/floppy.c | 25 | 120165 | /*
* linux/drivers/block/floppy.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1993, 1994 Alain Knaff
* Copyright (C) 1998 Alan Cox
*/
/*
* 02.12.91 - Changed to static variables to indicate need for reset
* and recalibrate. This makes some things easier (output_byte reset
* checking etc), and means less interrupt jumping in case of errors,
* so the code is hopefully easier to understand.
*/
/*
* This file is certainly a mess. I've tried my best to get it working,
* but I don't like programming floppies, and I have only one anyway.
* Urgel. I should check for more errors, and do more graceful error
* recovery. Seems there are problems with several drives. I've tried to
* correct them. No promises.
*/
/*
* As with hd.c, all routines within this file can (and will) be called
* by interrupts, so extreme caution is needed. A hardware interrupt
* handler may not sleep, or a kernel panic will happen. Thus I cannot
* call "floppy-on" directly, but have to set a special timer interrupt
* etc.
*/
/*
* 28.02.92 - made track-buffering routines, based on the routines written
* by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
*/
/*
* Automatic floppy-detection and formatting written by Werner Almesberger
* (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
* the floppy-change signal detection.
*/
/*
* 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
* FDC data overrun bug, added some preliminary stuff for vertical
* recording support.
*
* 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
*
* TODO: Errors are still not counted properly.
*/
/* 1992/9/20
* Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
* modeled after the freeware MS-DOS program fdformat/88 V1.8 by
* Christoph H. Hochst\"atter.
* I have fixed the shift values to the ones I always use. Maybe a new
* ioctl() should be created to be able to modify them.
* There is a bug in the driver that makes it impossible to format a
* floppy as the first thing after bootup.
*/
/*
* 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
* this helped the floppy driver as well. Much cleaner, and still seems to
* work.
*/
/* 1994/6/24 --bbroad-- added the floppy table entries and made
* minor modifications to allow 2.88 floppies to be run.
*/
/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
* disk types.
*/
/*
* 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
* format bug fixes, but unfortunately some new bugs too...
*/
/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
* errors to allow safe writing by specialized programs.
*/
/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
* by defining bit 1 of the "stretch" parameter to mean put sectors on the
* opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
* drives are "upside-down").
*/
/*
* 1995/8/26 -- Andreas Busse -- added Mips support.
*/
/*
* 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
* features to asm/floppy.h.
*/
/*
* 1998/1/21 -- Richard Gooch <rgooch@atnf.csiro.au> -- devfs support
*/
/*
* 1998/05/07 -- Russell King -- More portability cleanups; moved definition of
* interrupt and dma channel to asm/floppy.h. Cleaned up some formatting &
* use of '0' for NULL.
*/
/*
* 1998/06/07 -- Alan Cox -- Merged the 2.0.34 fixes for resource allocation
* failures.
*/
/*
* 1998/09/20 -- David Weinehall -- Added slow-down code for buggy PS/2-drives.
*/
/*
* 1999/08/13 -- Paul Slootman -- floppy stopped working on Alpha after 24
* days, 6 hours, 32 minutes and 32 seconds (i.e. MAXINT jiffies; ints were
* being used to store jiffies, which are unsigned longs).
*/
/*
* 2000/08/28 -- Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* - get rid of check_region
* - s/suser/capable/
*/
/*
* 2001/08/26 -- Paul Gortmaker - fix insmod oops on machines with no
* floppy controller (lingering task on list after module is gone... boom.)
*/
/*
* 2002/02/07 -- Anton Altaparmakov - Fix io ports reservation to correct range
* (0x3f2-0x3f5, 0x3f7). This fix is a bit of a hack but the proper fix
* requires many non-obvious changes in arch dependent code.
*/
/* 2003/07/28 -- Daniele Bellucci <bellucda@tiscali.it>.
* Better audit of register_blkdev.
*/
#undef FLOPPY_SILENT_DCL_CLEAR
#define REALLY_SLOW_IO
#define DEBUGT 2
#define DPRINT(format, args...) \
pr_info("floppy%d: " format, current_drive, ##args)
#define DCL_DEBUG /* debug disk change line */
#ifdef DCL_DEBUG
#define debug_dcl(test, fmt, args...) \
do { if ((test) & FD_DEBUG) DPRINT(fmt, ##args); } while (0)
#else
#define debug_dcl(test, fmt, args...) \
do { if (0) DPRINT(fmt, ##args); } while (0)
#endif
/* do print messages for unexpected interrupts */
static int print_unex = 1;
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#define FDPATCHES
#include <linux/fdreg.h>
#include <linux/fd.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <linux/string.h>
#include <linux/jiffies.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/mc146818rtc.h> /* CMOS defines */
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/async.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
* It's been recommended that take about 1/4 of the default speed
* in some more extreme cases.
*/
static DEFINE_MUTEX(floppy_mutex);
static int slow_floppy;
#include <asm/dma.h>
#include <asm/irq.h>
static int FLOPPY_IRQ = 6;
static int FLOPPY_DMA = 2;
static int can_use_virtual_dma = 2;
/* =======
* can use virtual DMA:
* 0 = use of virtual DMA disallowed by config
* 1 = use of virtual DMA prescribed by config
* 2 = no virtual DMA preference configured. By default try hard DMA,
* but fall back on virtual DMA when not enough memory available
*/
static int use_virtual_dma;
/* =======
* use virtual DMA
* 0 using hard DMA
* 1 using virtual DMA
* This variable is set to virtual when a DMA mem problem arises, and
* reset back in floppy_grab_irq_and_dma.
* It is not safe to reset it in other circumstances, because the floppy
* driver may have several buffers in use at once, and we do currently not
* record each buffers capabilities
*/
static DEFINE_SPINLOCK(floppy_lock);
static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
#define K_64 0x10000 /* 64KB */
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
* 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
* a drive is allowed.
*
* NOTE: This must come before we include the arch floppy header because
* some ports reference this variable from there. -DaveM
*/
static int allowed_drive_mask = 0x33;
#include <asm/floppy.h>
static int irqdma_allocated;
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
#include <linux/completion.h>
static struct request *current_req;
static void do_fd_request(struct request_queue *q);
static int set_next_request(void);
#ifndef fd_get_dma_residue
#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
#endif
/* Dma Memory related stuff */
#ifndef fd_dma_mem_free
#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
#endif
#ifndef fd_dma_mem_alloc
#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size))
#endif
static inline void fallback_on_nodma_alloc(char **addr, size_t l)
{
#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
if (*addr)
return; /* we have the memory */
if (can_use_virtual_dma != 2)
return; /* no fallback allowed */
pr_info("DMA memory shortage. Temporarily falling back on virtual DMA\n");
*addr = (char *)nodma_mem_alloc(l);
#else
return;
#endif
}
/* End dma memory related stuff */
static unsigned long fake_change;
static bool initialized;
#define ITYPE(x) (((x) >> 2) & 0x1f)
#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
#define UNIT(x) ((x) & 0x03) /* drive on fdc */
#define FDC(x) (((x) & 0x04) >> 2) /* fdc of drive */
/* reverse mapping from unit and fdc to drive */
#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
#define DP (&drive_params[current_drive])
#define DRS (&drive_state[current_drive])
#define DRWE (&write_errors[current_drive])
#define FDCS (&fdc_state[fdc])
#define UDP (&drive_params[drive])
#define UDRS (&drive_state[drive])
#define UDRWE (&write_errors[drive])
#define UFDCS (&fdc_state[FDC(drive)])
#define PH_HEAD(floppy, head) (((((floppy)->stretch & 2) >> 1) ^ head) << 2)
#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
/* read/write */
#define COMMAND (raw_cmd->cmd[0])
#define DR_SELECT (raw_cmd->cmd[1])
#define TRACK (raw_cmd->cmd[2])
#define HEAD (raw_cmd->cmd[3])
#define SECTOR (raw_cmd->cmd[4])
#define SIZECODE (raw_cmd->cmd[5])
#define SECT_PER_TRACK (raw_cmd->cmd[6])
#define GAP (raw_cmd->cmd[7])
#define SIZECODE2 (raw_cmd->cmd[8])
#define NR_RW 9
/* format */
#define F_SIZECODE (raw_cmd->cmd[2])
#define F_SECT_PER_TRACK (raw_cmd->cmd[3])
#define F_GAP (raw_cmd->cmd[4])
#define F_FILL (raw_cmd->cmd[5])
#define NR_F 6
/*
* Maximum disk size (in kilobytes).
* This default is used whenever the current disk size is unknown.
* [Now it is rather a minimum]
*/
#define MAX_DISK_SIZE 4 /* 3984 */
/*
* globals used by 'result()'
*/
#define MAX_REPLIES 16
static unsigned char reply_buffer[MAX_REPLIES];
static int inr; /* size of reply buffer, when called from interrupt */
#define ST0 (reply_buffer[0])
#define ST1 (reply_buffer[1])
#define ST2 (reply_buffer[2])
#define ST3 (reply_buffer[0]) /* result of GETSTATUS */
#define R_TRACK (reply_buffer[3])
#define R_HEAD (reply_buffer[4])
#define R_SECTOR (reply_buffer[5])
#define R_SIZECODE (reply_buffer[6])
#define SEL_DLY (2 * HZ / 100)
/*
* this struct defines the different floppy drive types.
*/
static struct {
struct floppy_drive_params params;
const char *name; /* name printed while booting */
} default_drive_params[] = {
/* NOTE: the time values in jiffies should be in msec!
CMOS drive type
| Maximum data rate supported by drive type
| | Head load time, msec
| | | Head unload time, msec (not used)
| | | | Step rate interval, usec
| | | | | Time needed for spinup time (jiffies)
| | | | | | Timeout for spinning down (jiffies)
| | | | | | | Spindown offset (where disk stops)
| | | | | | | | Select delay
| | | | | | | | | RPS
| | | | | | | | | | Max number of tracks
| | | | | | | | | | | Interrupt timeout
| | | | | | | | | | | | Max nonintlv. sectors
| | | | | | | | | | | | | -Max Errors- flags */
{{0, 500, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 80, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
{{1, 300, 16, 16, 8000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 40, 3*HZ, 17, {3,1,2,0,2}, 0,
0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
{{2, 500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6, 83, 3*HZ, 17, {3,1,2,0,2}, 0,
0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
{{3, 250, 16, 16, 3000, 1*HZ, 3*HZ, 0, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
{{4, 500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 20, {3,1,2,0,2}, 0,
0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
{{5, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
{{6, 1000, 15, 8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5, 83, 3*HZ, 40, {3,1,2,0,2}, 0,
0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
/* | --autodetected formats--- | | |
* read_track | | Name printed when booting
* | Native format
* Frequency of disk change checks */
};
static struct floppy_drive_params drive_params[N_DRIVE];
static struct floppy_drive_struct drive_state[N_DRIVE];
static struct floppy_write_errors write_errors[N_DRIVE];
static struct timer_list motor_off_timer[N_DRIVE];
static struct gendisk *disks[N_DRIVE];
static struct block_device *opened_bdev[N_DRIVE];
static DEFINE_MUTEX(open_lock);
static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
static int fdc_queue;
/*
* This struct defines the different floppy types.
*
* Bit 0 of 'stretch' tells if the tracks need to be doubled for some
* types (e.g. 360kB diskette in 1.2MB drive, etc.). Bit 1 of 'stretch'
* tells if the disk is in Commodore 1581 format, which means side 0 sectors
* are located on side 1 of the disk but with a side 0 ID, and vice-versa.
* This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
* 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
* side 0 is on physical side 0 (but with the misnamed sector IDs).
* 'stretch' should probably be renamed to something more general, like
* 'options'.
*
* Bits 2 through 9 of 'stretch' tell the number of the first sector.
* The LSB (bit 2) is flipped. For most disks, the first sector
* is 1 (represented by 0x00<<2). For some CP/M and music sampler
* disks (such as Ensoniq EPS 16plus) it is 0 (represented as 0x01<<2).
* For Amstrad CPC disks it is 0xC1 (represented as 0xC0<<2).
*
* Other parameters should be self-explanatory (see also setfdprm(8)).
*/
/*
Size
| Sectors per track
| | Head
| | | Tracks
| | | | Stretch
| | | | | Gap 1 size
| | | | | | Data rate, | 0x40 for perp
| | | | | | | Spec1 (stepping rate, head unload
| | | | | | | | /fmt gap (gap2) */
static struct floppy_struct floppy_type[32] = {
{ 0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL }, /* 0 no testing */
{ 720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360" }, /* 1 360KB PC */
{ 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" }, /* 2 1.2MB AT */
{ 720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360" }, /* 3 360KB SS 3.5" */
{ 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720" }, /* 4 720KB 3.5" */
{ 720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360" }, /* 5 360KB AT */
{ 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720" }, /* 6 720KB AT */
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" }, /* 7 1.44MB 3.5" */
{ 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" }, /* 8 2.88MB 3.5" */
{ 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120" }, /* 9 3.12MB 3.5" */
{ 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25" */
{ 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5" */
{ 820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410" }, /* 12 410KB 5.25" */
{ 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820" }, /* 13 820KB 3.5" */
{ 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" }, /* 14 1.48MB 5.25" */
{ 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" }, /* 15 1.72MB 3.5" */
{ 840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420" }, /* 16 420KB 5.25" */
{ 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830" }, /* 17 830KB 3.5" */
{ 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" }, /* 18 1.49MB 5.25" */
{ 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5" */
{ 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880" }, /* 20 880KB 5.25" */
{ 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5" */
{ 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5" */
{ 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25" */
{ 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5" */
{ 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5" */
{ 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5" */
{ 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5" */
{ 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5" */
{ 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5" */
{ 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800" }, /* 30 800KB 3.5" */
{ 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
};
#define SECTSIZE (_FD_SECTSIZE(*floppy))
/* Auto-detection: Disk type used until the next media change occurs. */
static struct floppy_struct *current_type[N_DRIVE];
/*
* User-provided type information. current_type points to
* the respective entry of this array.
*/
static struct floppy_struct user_params[N_DRIVE];
static sector_t floppy_sizes[256];
static char floppy_device_name[] = "floppy";
/*
* The driver is trying to determine the correct media format
* while probing is set. rw_interrupt() clears it after a
* successful access.
*/
static int probing;
/* Synchronization of FDC access. */
#define FD_COMMAND_NONE -1
#define FD_COMMAND_ERROR 2
#define FD_COMMAND_OKAY 3
static volatile int command_status = FD_COMMAND_NONE;
static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
/* Errors during formatting are counted here. */
static int format_errors;
/* Format request descriptor. */
static struct format_descr format_req;
/*
* Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
* Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
* H is head unload time (1=16ms, 2=32ms, etc)
*/
/*
* Track buffer
* Because these are written to by the DMA controller, they must
* not contain a 64k byte boundary crossing, or data will be
* corrupted/lost.
*/
static char *floppy_track_buffer;
static int max_buffer_sectors;
static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
/* this is called after the interrupt of the
* main command */
void (*redo)(void); /* this is called to retry the operation */
void (*error)(void); /* this is called to tally an error */
done_f done; /* this is called to say if the operation has
* succeeded/failed */
} *cont;
static void floppy_ready(void);
static void floppy_start(void);
static void process_fd_request(void);
static void recalibrate_floppy(void);
static void floppy_shutdown(struct work_struct *);
static int floppy_request_regions(int);
static void floppy_release_regions(int);
static int floppy_grab_irq_and_dma(void);
static void floppy_release_irq_and_dma(void);
/*
* The "reset" variable should be tested whenever an interrupt is scheduled,
* after the commands have been sent. This is to ensure that the driver doesn't
* get wedged when the interrupt doesn't come because of a failed command.
* reset doesn't need to be tested before sending commands, because
* output_byte is automatically disabled when reset is set.
*/
static void reset_fdc(void);
/*
* These are global variables, as that's the easiest way to give
* information to interrupts. They are the data used for the current
* request.
*/
#define NO_TRACK -1
#define NEED_1_RECAL -2
#define NEED_2_RECAL -3
static atomic_t usage_count = ATOMIC_INIT(0);
/* buffer related variables */
static int buffer_track = -1;
static int buffer_drive = -1;
static int buffer_min = -1;
static int buffer_max = -1;
/* fdc related variables, should end up in a struct */
static struct floppy_fdc_state fdc_state[N_FDC];
static int fdc; /* current fdc */
static struct workqueue_struct *floppy_wq;
static struct floppy_struct *_floppy = floppy_type;
static unsigned char current_drive;
static long current_count_sectors;
static unsigned char fsector_t; /* sector in track */
static unsigned char in_sector_offset; /* offset within physical sector,
* expressed in units of 512 bytes */
static inline bool drive_no_geom(int drive)
{
return !current_type[drive] && !ITYPE(UDRS->fd_device);
}
#ifndef fd_eject
static inline int fd_eject(int drive)
{
return -EINVAL;
}
#endif
/*
* Debugging
* =========
*/
#ifdef DEBUGT
static long unsigned debugtimer;
static inline void set_debugt(void)
{
debugtimer = jiffies;
}
static inline void debugt(const char *func, const char *msg)
{
if (DP->flags & DEBUGT)
pr_info("%s:%s dtime=%lu\n", func, msg, jiffies - debugtimer);
}
#else
static inline void set_debugt(void) { }
static inline void debugt(const char *func, const char *msg) { }
#endif /* DEBUGT */
static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
static const char *timeout_message;
static void is_alive(const char *func, const char *message)
{
/* this routine checks whether the floppy driver is "alive" */
if (test_bit(0, &fdc_busy) && command_status < 2 &&
!delayed_work_pending(&fd_timeout)) {
DPRINT("%s: timeout handler died. %s\n", func, message);
}
}
static void (*do_floppy)(void) = NULL;
#define OLOGSIZE 20
static void (*lasthandler)(void);
static unsigned long interruptjiffies;
static unsigned long resultjiffies;
static int resultsize;
static unsigned long lastredo;
static struct output_log {
unsigned char data;
unsigned char status;
unsigned long jiffies;
} output_log[OLOGSIZE];
static int output_log_pos;
#define current_reqD -1
#define MAXTIMEOUT -2
static void __reschedule_timeout(int drive, const char *message)
{
unsigned long delay;
if (drive == current_reqD)
drive = current_drive;
if (drive < 0 || drive >= N_DRIVE) {
delay = 20UL * HZ;
drive = 0;
} else
delay = UDP->timeout;
mod_delayed_work(floppy_wq, &fd_timeout, delay);
if (UDP->flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
}
static void reschedule_timeout(int drive, const char *message)
{
unsigned long flags;
spin_lock_irqsave(&floppy_lock, flags);
__reschedule_timeout(drive, message);
spin_unlock_irqrestore(&floppy_lock, flags);
}
#define INFBOUND(a, b) (a) = max_t(int, a, b)
#define SUPBOUND(a, b) (a) = min_t(int, a, b)
/*
* Bottom half floppy driver.
* ==========================
*
* This part of the file contains the code talking directly to the hardware,
* and also the main service loop (seek-configure-spinup-command)
*/
/*
* disk change.
* This routine is responsible for maintaining the FD_DISK_CHANGE flag,
* and the last_checked date.
*
* last_checked is the date of the last check which showed 'no disk change'
* FD_DISK_CHANGE is set under two conditions:
* 1. The floppy has been changed after some i/o to that floppy already
* took place.
* 2. No floppy disk is in the drive. This is done in order to ensure that
* requests are quickly flushed in case there is no disk in the drive. It
* follows that FD_DISK_CHANGE can only be cleared if there is a disk in
* the drive.
*
* For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
* For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
* each seek. If a disk is present, the disk change line should also be
* cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
* change line is set, this means either that no disk is in the drive, or
* that it has been removed since the last seek.
*
* This means that we really have a third possibility too:
* The floppy has been changed after the last seek.
*/
static int disk_change(int drive)
{
int fdc = FDC(drive);
if (time_before(jiffies, UDRS->select_date + UDP->select_delay))
DPRINT("WARNING disk change called early\n");
if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
(FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) {
DPRINT("probing disk change on unselected drive\n");
DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive),
(unsigned int)FDCS->dor);
}
debug_dcl(UDP->flags,
"checking disk change line for drive %d\n", drive);
debug_dcl(UDP->flags, "jiffies=%lu\n", jiffies);
debug_dcl(UDP->flags, "disk change line=%x\n", fd_inb(FD_DIR) & 0x80);
debug_dcl(UDP->flags, "flags=%lx\n", UDRS->flags);
if (UDP->flags & FD_BROKEN_DCL)
return test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) {
set_bit(FD_VERIFY_BIT, &UDRS->flags);
/* verify write protection */
if (UDRS->maxblock) /* mark it changed */
set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
/* invalidate its geometry */
if (UDRS->keep_data >= 0) {
if ((UDP->flags & FTD_MSG) &&
current_type[drive] != NULL)
DPRINT("Disk type is undefined after disk change\n");
current_type[drive] = NULL;
floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE << 1;
}
return 1;
} else {
UDRS->last_checked = jiffies;
clear_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags);
}
return 0;
}
static inline int is_selected(int dor, int unit)
{
return ((dor & (0x10 << unit)) && (dor & 3) == unit);
}
static bool is_ready_state(int status)
{
int state = status & (STATUS_READY | STATUS_DIR | STATUS_DMA);
return state == STATUS_READY;
}
static int set_dor(int fdc, char mask, char data)
{
unsigned char unit;
unsigned char drive;
unsigned char newdor;
unsigned char olddor;
if (FDCS->address == -1)
return -1;
olddor = FDCS->dor;
newdor = (olddor & mask) | data;
if (newdor != olddor) {
unit = olddor & 0x3;
if (is_selected(olddor, unit) && !is_selected(newdor, unit)) {
drive = REVDRIVE(fdc, unit);
debug_dcl(UDP->flags,
"calling disk change from set_dor\n");
disk_change(drive);
}
FDCS->dor = newdor;
fd_outb(newdor, FD_DOR);
unit = newdor & 0x3;
if (!is_selected(olddor, unit) && is_selected(newdor, unit)) {
drive = REVDRIVE(fdc, unit);
UDRS->select_date = jiffies;
}
}
return olddor;
}
static void twaddle(void)
{
if (DP->select_delay)
return;
fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR);
fd_outb(FDCS->dor, FD_DOR);
DRS->select_date = jiffies;
}
/*
* Reset all driver information about the current fdc.
* This is needed after a reset, and after a raw command.
*/
static void reset_fdc_info(int mode)
{
int drive;
FDCS->spec1 = FDCS->spec2 = -1;
FDCS->need_configure = 1;
FDCS->perp_mode = 1;
FDCS->rawcmd = 0;
for (drive = 0; drive < N_DRIVE; drive++)
if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL))
UDRS->track = NEED_2_RECAL;
}
/* selects the fdc and drive, and enables the fdc's input/dma. */
static void set_fdc(int drive)
{
if (drive >= 0 && drive < N_DRIVE) {
fdc = FDC(drive);
current_drive = drive;
}
if (fdc != 1 && fdc != 0) {
pr_info("bad fdc value\n");
return;
}
set_dor(fdc, ~0, 8);
#if N_FDC > 1
set_dor(1 - fdc, ~8, 0);
#endif
if (FDCS->rawcmd == 2)
reset_fdc_info(1);
if (fd_inb(FD_STATUS) != STATUS_READY)
FDCS->reset = 1;
}
/* locks the driver */
static int lock_fdc(int drive)
{
if (WARN(atomic_read(&usage_count) == 0,
"Trying to lock fdc while usage count=0\n"))
return -1;
if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy)))
return -EINTR;
command_status = FD_COMMAND_NONE;
reschedule_timeout(drive, "lock fdc");
set_fdc(drive);
return 0;
}
/* unlocks the driver */
static void unlock_fdc(void)
{
if (!test_bit(0, &fdc_busy))
DPRINT("FDC access conflict!\n");
raw_cmd = NULL;
command_status = FD_COMMAND_NONE;
cancel_delayed_work(&fd_timeout);
do_floppy = NULL;
cont = NULL;
clear_bit(0, &fdc_busy);
wake_up(&fdc_wait);
}
/* switches the motor off after a given timeout */
static void motor_off_callback(unsigned long nr)
{
unsigned char mask = ~(0x10 << UNIT(nr));
set_dor(FDC(nr), mask, 0);
}
/* schedules motor off */
static void floppy_off(unsigned int drive)
{
unsigned long volatile delta;
int fdc = FDC(drive);
if (!(FDCS->dor & (0x10 << UNIT(drive))))
return;
del_timer(motor_off_timer + drive);
/* make spindle stop in a position which minimizes spinup time
* next time */
if (UDP->rps) {
delta = jiffies - UDRS->first_read_date + HZ -
UDP->spindown_offset;
delta = ((delta * UDP->rps) % HZ) / UDP->rps;
motor_off_timer[drive].expires =
jiffies + UDP->spindown - delta;
}
add_timer(motor_off_timer + drive);
}
/*
* cycle through all N_DRIVE floppy drives, for disk change testing.
* stopping at current drive. This is done before any long operation, to
* be sure to have up to date disk change information.
*/
static void scandrives(void)
{
int i;
int drive;
int saved_drive;
if (DP->select_delay)
return;
saved_drive = current_drive;
for (i = 0; i < N_DRIVE; i++) {
drive = (saved_drive + i + 1) % N_DRIVE;
if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
continue; /* skip closed drives */
set_fdc(drive);
if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
(0x10 << UNIT(drive))))
/* switch the motor off again, if it was off to
* begin with */
set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
}
set_fdc(saved_drive);
}
static void empty(void)
{
}
static void (*floppy_work_fn)(void);
static void floppy_work_workfn(struct work_struct *work)
{
floppy_work_fn();
}
static DECLARE_WORK(floppy_work, floppy_work_workfn);
static void schedule_bh(void (*handler)(void))
{
WARN_ON(work_pending(&floppy_work));
floppy_work_fn = handler;
queue_work(floppy_wq, &floppy_work);
}
static void (*fd_timer_fn)(void) = NULL;
static void fd_timer_workfn(struct work_struct *work)
{
fd_timer_fn();
}
static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
static void cancel_activity(void)
{
do_floppy = NULL;
cancel_delayed_work_sync(&fd_timer);
cancel_work_sync(&floppy_work);
}
/* this function makes sure that the disk stays in the drive during the
* transfer */
static void fd_watchdog(void)
{
debug_dcl(DP->flags, "calling disk change from watchdog\n");
if (disk_change(current_drive)) {
DPRINT("disk removed during i/o\n");
cancel_activity();
cont->done(0);
reset_fdc();
} else {
cancel_delayed_work(&fd_timer);
fd_timer_fn = fd_watchdog;
queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
static void main_command_interrupt(void)
{
cancel_delayed_work(&fd_timer);
cont->interrupt();
}
/* waits for a delay (spinup or select) to pass */
static int fd_wait_for_completion(unsigned long expires,
void (*function)(void))
{
if (FDCS->reset) {
reset_fdc(); /* do the reset during sleep to win time
* if we don't need to sleep, it's a good
* occasion anyways */
return 1;
}
if (time_before(jiffies, expires)) {
cancel_delayed_work(&fd_timer);
fd_timer_fn = function;
queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
return 0;
}
static void setup_DMA(void)
{
unsigned long f;
if (raw_cmd->length == 0) {
int i;
pr_info("zero dma transfer size:");
for (i = 0; i < raw_cmd->cmd_count; i++)
pr_cont("%x,", raw_cmd->cmd[i]);
pr_cont("\n");
cont->done(0);
FDCS->reset = 1;
return;
}
if (((unsigned long)raw_cmd->kernel_data) % 512) {
pr_info("non aligned address: %p\n", raw_cmd->kernel_data);
cont->done(0);
FDCS->reset = 1;
return;
}
f = claim_dma_lock();
fd_disable_dma();
#ifdef fd_dma_setup
if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length,
(raw_cmd->flags & FD_RAW_READ) ?
DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) {
release_dma_lock(f);
cont->done(0);
FDCS->reset = 1;
return;
}
release_dma_lock(f);
#else
fd_clear_dma_ff();
fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ) ?
DMA_MODE_READ : DMA_MODE_WRITE);
fd_set_dma_addr(raw_cmd->kernel_data);
fd_set_dma_count(raw_cmd->length);
virtual_dma_port = FDCS->address;
fd_enable_dma();
release_dma_lock(f);
#endif
}
static void show_floppy(void);
/* waits until the fdc becomes ready */
static int wait_til_ready(void)
{
int status;
int counter;
if (FDCS->reset)
return -1;
for (counter = 0; counter < 10000; counter++) {
status = fd_inb(FD_STATUS);
if (status & STATUS_READY)
return status;
}
if (initialized) {
DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc);
show_floppy();
}
FDCS->reset = 1;
return -1;
}
/* sends a command byte to the fdc */
static int output_byte(char byte)
{
int status = wait_til_ready();
if (status < 0)
return -1;
if (is_ready_state(status)) {
fd_outb(byte, FD_DATA);
output_log[output_log_pos].data = byte;
output_log[output_log_pos].status = status;
output_log[output_log_pos].jiffies = jiffies;
output_log_pos = (output_log_pos + 1) % OLOGSIZE;
return 0;
}
FDCS->reset = 1;
if (initialized) {
DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
byte, fdc, status);
show_floppy();
}
return -1;
}
/* gets the response from the fdc */
static int result(void)
{
int i;
int status = 0;
for (i = 0; i < MAX_REPLIES; i++) {
status = wait_til_ready();
if (status < 0)
break;
status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA;
if ((status & ~STATUS_BUSY) == STATUS_READY) {
resultjiffies = jiffies;
resultsize = i;
return i;
}
if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY))
reply_buffer[i] = fd_inb(FD_DATA);
else
break;
}
if (initialized) {
DPRINT("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
fdc, status, i);
show_floppy();
}
FDCS->reset = 1;
return -1;
}
#define MORE_OUTPUT -2
/* does the fdc need more output? */
static int need_more_output(void)
{
int status = wait_til_ready();
if (status < 0)
return -1;
if (is_ready_state(status))
return MORE_OUTPUT;
return result();
}
/* Set perpendicular mode as required, based on data rate, if supported.
* 82077 Now tested. 1Mbps data rate only possible with 82077-1.
*/
static void perpendicular_mode(void)
{
unsigned char perp_mode;
if (raw_cmd->rate & 0x40) {
switch (raw_cmd->rate & 3) {
case 0:
perp_mode = 2;
break;
case 3:
perp_mode = 3;
break;
default:
DPRINT("Invalid data rate for perpendicular mode!\n");
cont->done(0);
FDCS->reset = 1;
/*
* convenient way to return to
* redo without too much hassle
* (deep stack et al.)
*/
return;
}
} else
perp_mode = 0;
if (FDCS->perp_mode == perp_mode)
return;
if (FDCS->version >= FDC_82077_ORIG) {
output_byte(FD_PERPENDICULAR);
output_byte(perp_mode);
FDCS->perp_mode = perp_mode;
} else if (perp_mode) {
DPRINT("perpendicular mode not supported by this FDC.\n");
}
} /* perpendicular_mode */
static int fifo_depth = 0xa;
static int no_fifo;
static int fdc_configure(void)
{
/* Turn on FIFO */
output_byte(FD_CONFIGURE);
if (need_more_output() != MORE_OUTPUT)
return 0;
output_byte(0);
output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
output_byte(0); /* pre-compensation from track
0 upwards */
return 1;
}
#define NOMINAL_DTR 500
/* Issue a "SPECIFY" command to set the step rate time, head unload time,
* head load time, and DMA disable flag to values needed by floppy.
*
* The value "dtr" is the data transfer rate in Kbps. It is needed
* to account for the data rate-based scaling done by the 82072 and 82077
* FDC types. This parameter is ignored for other types of FDCs (i.e.
* 8272a).
*
* Note that changing the data transfer rate has a (probably deleterious)
* effect on the parameters subject to scaling for 82072/82077 FDCs, so
* fdc_specify is called again after each data transfer rate
* change.
*
* srt: 1000 to 16000 in microseconds
* hut: 16 to 240 milliseconds
* hlt: 2 to 254 milliseconds
*
* These values are rounded up to the next highest available delay time.
*/
static void fdc_specify(void)
{
unsigned char spec1;
unsigned char spec2;
unsigned long srt;
unsigned long hlt;
unsigned long hut;
unsigned long dtr = NOMINAL_DTR;
unsigned long scale_dtr = NOMINAL_DTR;
int hlt_max_code = 0x7f;
int hut_max_code = 0xf;
if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
fdc_configure();
FDCS->need_configure = 0;
}
switch (raw_cmd->rate & 0x03) {
case 3:
dtr = 1000;
break;
case 1:
dtr = 300;
if (FDCS->version >= FDC_82078) {
/* chose the default rate table, not the one
* where 1 = 2 Mbps */
output_byte(FD_DRIVESPEC);
if (need_more_output() == MORE_OUTPUT) {
output_byte(UNIT(current_drive));
output_byte(0xc0);
}
}
break;
case 2:
dtr = 250;
break;
}
if (FDCS->version >= FDC_82072) {
scale_dtr = dtr;
hlt_max_code = 0x00; /* 0==256msec*dtr0/dtr (not linear!) */
hut_max_code = 0x0; /* 0==256msec*dtr0/dtr (not linear!) */
}
/* Convert step rate from microseconds to milliseconds and 4 bits */
srt = 16 - DIV_ROUND_UP(DP->srt * scale_dtr / 1000, NOMINAL_DTR);
if (slow_floppy)
srt = srt / 4;
SUPBOUND(srt, 0xf);
INFBOUND(srt, 0);
hlt = DIV_ROUND_UP(DP->hlt * scale_dtr / 2, NOMINAL_DTR);
if (hlt < 0x01)
hlt = 0x01;
else if (hlt > 0x7f)
hlt = hlt_max_code;
hut = DIV_ROUND_UP(DP->hut * scale_dtr / 16, NOMINAL_DTR);
if (hut < 0x1)
hut = 0x1;
else if (hut > 0xf)
hut = hut_max_code;
spec1 = (srt << 4) | hut;
spec2 = (hlt << 1) | (use_virtual_dma & 1);
/* If these parameters did not change, just return with success */
if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
/* Go ahead and set spec1 and spec2 */
output_byte(FD_SPECIFY);
output_byte(FDCS->spec1 = spec1);
output_byte(FDCS->spec2 = spec2);
}
} /* fdc_specify */
/* Set the FDC's data transfer rate on behalf of the specified drive.
* NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
* of the specify command (i.e. using the fdc_specify function).
*/
static int fdc_dtr(void)
{
/* If data rate not already set to desired value, set it. */
if ((raw_cmd->rate & 3) == FDCS->dtr)
return 0;
/* Set dtr */
fd_outb(raw_cmd->rate & 3, FD_DCR);
/* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
* need a stabilization period of several milliseconds to be
* enforced after data rate changes before R/W operations.
* Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
*/
FDCS->dtr = raw_cmd->rate & 3;
return fd_wait_for_completion(jiffies + 2UL * HZ / 100, floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
{
pr_cont(": track %d, head %d, sector %d, size %d",
R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
} /* tell_sector */
static void print_errors(void)
{
DPRINT("");
if (ST0 & ST0_ECE) {
pr_cont("Recalibrate failed!");
} else if (ST2 & ST2_CRC) {
pr_cont("data CRC error");
tell_sector();
} else if (ST1 & ST1_CRC) {
pr_cont("CRC error");
tell_sector();
} else if ((ST1 & (ST1_MAM | ST1_ND)) ||
(ST2 & ST2_MAM)) {
if (!probing) {
pr_cont("sector not found");
tell_sector();
} else
pr_cont("probe failed...");
} else if (ST2 & ST2_WC) { /* seek error */
pr_cont("wrong cylinder");
} else if (ST2 & ST2_BC) { /* cylinder marked as bad */
pr_cont("bad cylinder");
} else {
pr_cont("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x",
ST0, ST1, ST2);
tell_sector();
}
pr_cont("\n");
}
/*
* OK, this error interpreting routine is called after a
* DMA read/write has succeeded
* or failed, so we check the results, and copy any buffers.
* hhb: Added better error reporting.
* ak: Made this into a separate routine.
*/
static int interpret_errors(void)
{
char bad;
if (inr != 7) {
DPRINT("-- FDC reply error\n");
FDCS->reset = 1;
return 1;
}
/* check IC to find cause of interrupt */
switch (ST0 & ST0_INTR) {
case 0x40: /* error occurred during command execution */
if (ST1 & ST1_EOC)
return 0; /* occurs with pseudo-DMA */
bad = 1;
if (ST1 & ST1_WP) {
DPRINT("Drive is write protected\n");
clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
cont->done(0);
bad = 2;
} else if (ST1 & ST1_ND) {
set_bit(FD_NEED_TWADDLE_BIT, &DRS->flags);
} else if (ST1 & ST1_OR) {
if (DP->flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
} else if (*errors >= DP->max_errors.reporting) {
print_errors();
}
if (ST2 & ST2_WC || ST2 & ST2_BC)
/* wrong cylinder => recal */
DRS->track = NEED_2_RECAL;
return bad;
case 0x80: /* invalid command given */
DPRINT("Invalid FDC command given!\n");
cont->done(0);
return 2;
case 0xc0:
DPRINT("Abnormal termination caused by polling\n");
cont->error();
return 2;
default: /* (0) Normal command termination */
return 0;
}
}
/*
* This routine is called when everything should be correctly set up
* for the transfer (i.e. floppy motor is on, the correct floppy is
* selected, and the head is sitting on the right track).
*/
static void setup_rw_floppy(void)
{
int i;
int r;
int flags;
int dflags;
unsigned long ready_date;
void (*function)(void);
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
flags |= FD_RAW_INTR;
if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) {
ready_date = DRS->spinup_date + DP->spinup;
/* If spinup will take a long time, rerun scandrives
* again just before spinup completion. Beware that
* after scandrives, we must again wait for selection.
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
function = floppy_start;
} else
function = setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
return;
}
dflags = DRS->flags;
if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
setup_DMA();
if (flags & FD_RAW_INTR)
do_floppy = main_command_interrupt;
r = 0;
for (i = 0; i < raw_cmd->cmd_count; i++)
r |= output_byte(raw_cmd->cmd[i]);
debugt(__func__, "rw_command");
if (r) {
cont->error();
reset_fdc();
return;
}
if (!(flags & FD_RAW_INTR)) {
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
fd_watchdog();
}
static int blind_seek;
/*
* This is the routine called after every seek (or recalibrate) interrupt
* from the floppy controller.
*/
static void seek_interrupt(void)
{
debugt(__func__, "");
if (inr != 2 || (ST0 & 0xF8) != 0x20) {
DPRINT("seek failed\n");
DRS->track = NEED_2_RECAL;
cont->error();
cont->redo();
return;
}
if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) {
debug_dcl(DP->flags,
"clearing NEWCHANGE flag because of effective seek\n");
debug_dcl(DP->flags, "jiffies=%lu\n", jiffies);
clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
/* effective seek */
DRS->select_date = jiffies;
}
DRS->track = ST1;
floppy_ready();
}
static void check_wp(void)
{
if (test_bit(FD_VERIFY_BIT, &DRS->flags)) {
/* check write protection */
output_byte(FD_GETSTATUS);
output_byte(UNIT(current_drive));
if (result() != 1) {
FDCS->reset = 1;
return;
}
clear_bit(FD_VERIFY_BIT, &DRS->flags);
clear_bit(FD_NEED_TWADDLE_BIT, &DRS->flags);
debug_dcl(DP->flags,
"checking whether disk is write protected\n");
debug_dcl(DP->flags, "wp=%x\n", ST3 & 0x40);
if (!(ST3 & 0x40))
set_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
else
clear_bit(FD_DISK_WRITABLE_BIT, &DRS->flags);
}
}
static void seek_floppy(void)
{
int track;
blind_seek = 0;
debug_dcl(DP->flags, "calling disk change from %s\n", __func__);
if (!test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) &&
disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) {
/* the media changed flag should be cleared after the seek.
* If it isn't, this means that there is really no disk in
* the drive.
*/
set_bit(FD_DISK_CHANGED_BIT, &DRS->flags);
cont->done(0);
cont->redo();
return;
}
if (DRS->track <= NEED_1_RECAL) {
recalibrate_floppy();
return;
} else if (test_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags) &&
(raw_cmd->flags & FD_RAW_NEED_DISK) &&
(DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
/* we seek to clear the media-changed condition. Does anybody
* know a more elegant way, which works on all drives? */
if (raw_cmd->track)
track = raw_cmd->track - 1;
else {
if (DP->flags & FD_SILENT_DCL_CLEAR) {
set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
blind_seek = 1;
raw_cmd->flags |= FD_RAW_NEED_SEEK;
}
track = 1;
}
} else {
check_wp();
if (raw_cmd->track != DRS->track &&
(raw_cmd->flags & FD_RAW_NEED_SEEK))
track = raw_cmd->track;
else {
setup_rw_floppy();
return;
}
}
do_floppy = seek_interrupt;
output_byte(FD_SEEK);
output_byte(UNIT(current_drive));
if (output_byte(track) < 0) {
reset_fdc();
return;
}
debugt(__func__, "");
}
static void recal_interrupt(void)
{
debugt(__func__, "");
if (inr != 2)
FDCS->reset = 1;
else if (ST0 & ST0_ECE) {
switch (DRS->track) {
case NEED_1_RECAL:
debugt(__func__, "need 1 recal");
/* after a second recalibrate, we still haven't
* reached track 0. Probably no drive. Raise an
* error, as failing immediately might upset
* computers possessed by the Devil :-) */
cont->error();
cont->redo();
return;
case NEED_2_RECAL:
debugt(__func__, "need 2 recal");
/* If we already did a recalibrate,
* and we are not at track 0, this
* means we have moved. (The only way
* not to move at recalibration is to
* be already at track 0.) Clear the
* new change flag */
debug_dcl(DP->flags,
"clearing NEWCHANGE flag because of second recalibrate\n");
clear_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
DRS->select_date = jiffies;
/* fall through */
default:
debugt(__func__, "default");
/* Recalibrate moves the head by at
* most 80 steps. If after one
* recalibrate we don't have reached
* track 0, this might mean that we
* started beyond track 80. Try
* again. */
DRS->track = NEED_1_RECAL;
break;
}
} else
DRS->track = ST1;
floppy_ready();
}
static void print_result(char *message, int inr)
{
int i;
DPRINT("%s ", message);
if (inr >= 0)
for (i = 0; i < inr; i++)
pr_cont("repl[%d]=%x ", i, reply_buffer[i]);
pr_cont("\n");
}
/* interrupt handler. Note that this can be called externally on the Sparc */
irqreturn_t floppy_interrupt(int irq, void *dev_id)
{
int do_print;
unsigned long f;
void (*handler)(void) = do_floppy;
lasthandler = handler;
interruptjiffies = jiffies;
f = claim_dma_lock();
fd_disable_dma();
release_dma_lock(f);
do_floppy = NULL;
if (fdc >= N_FDC || FDCS->address == -1) {
/* we don't even know which FDC is the culprit */
pr_info("DOR0=%x\n", fdc_state[0].dor);
pr_info("floppy interrupt on bizarre fdc %d\n", fdc);
pr_info("handler=%pf\n", handler);
is_alive(__func__, "bizarre fdc");
return IRQ_NONE;
}
FDCS->reset = 0;
/* We have to clear the reset flag here, because apparently on boxes
* with level triggered interrupts (PS/2, Sparc, ...), it is needed to
* emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
* emission of the SENSEI's.
* It is OK to emit floppy commands because we are in an interrupt
* handler here, and thus we have to fear no interference of other
* activity.
*/
do_print = !handler && print_unex && initialized;
inr = result();
if (do_print)
print_result("unexpected interrupt", inr);
if (inr == 0) {
int max_sensei = 4;
do {
output_byte(FD_SENSEI);
inr = result();
if (do_print)
print_result("sensei", inr);
max_sensei--;
} while ((ST0 & 0x83) != UNIT(current_drive) &&
inr == 2 && max_sensei);
}
if (!handler) {
FDCS->reset = 1;
return IRQ_NONE;
}
schedule_bh(handler);
is_alive(__func__, "normal interrupt end");
/* FIXME! Was it really for us? */
return IRQ_HANDLED;
}
static void recalibrate_floppy(void)
{
debugt(__func__, "");
do_floppy = recal_interrupt;
output_byte(FD_RECALIBRATE);
if (output_byte(UNIT(current_drive)) < 0)
reset_fdc();
}
/*
* Must do 4 FD_SENSEIs after reset because of ``drive polling''.
*/
static void reset_interrupt(void)
{
debugt(__func__, "");
result(); /* get the status ready for set_fdc */
if (FDCS->reset) {
pr_info("reset set in interrupt, calling %pf\n", cont->error);
cont->error(); /* a reset just after a reset. BAD! */
}
cont->redo();
}
/*
* reset is done by pulling bit 2 of DOR low for a while (old FDCs),
* or by setting the self clearing bit 7 of STATUS (newer FDCs)
*/
static void reset_fdc(void)
{
unsigned long flags;
do_floppy = reset_interrupt;
FDCS->reset = 0;
reset_fdc_info(0);
/* Pseudo-DMA may intercept 'reset finished' interrupt. */
/* Irrelevant for systems with true DMA (i386). */
flags = claim_dma_lock();
fd_disable_dma();
release_dma_lock(flags);
if (FDCS->version >= FDC_82072A)
fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS);
else {
fd_outb(FDCS->dor & ~0x04, FD_DOR);
udelay(FD_RESET_DELAY);
fd_outb(FDCS->dor, FD_DOR);
}
}
static void show_floppy(void)
{
int i;
pr_info("\n");
pr_info("floppy driver state\n");
pr_info("-------------------\n");
pr_info("now=%lu last interrupt=%lu diff=%lu last called handler=%pf\n",
jiffies, interruptjiffies, jiffies - interruptjiffies,
lasthandler);
pr_info("timeout_message=%s\n", timeout_message);
pr_info("last output bytes:\n");
for (i = 0; i < OLOGSIZE; i++)
pr_info("%2x %2x %lu\n",
output_log[(i + output_log_pos) % OLOGSIZE].data,
output_log[(i + output_log_pos) % OLOGSIZE].status,
output_log[(i + output_log_pos) % OLOGSIZE].jiffies);
pr_info("last result at %lu\n", resultjiffies);
pr_info("last redo_fd_request at %lu\n", lastredo);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
reply_buffer, resultsize, true);
pr_info("status=%x\n", fd_inb(FD_STATUS));
pr_info("fdc_busy=%lu\n", fdc_busy);
if (do_floppy)
pr_info("do_floppy=%pf\n", do_floppy);
if (work_pending(&floppy_work))
pr_info("floppy_work.func=%pf\n", floppy_work.func);
if (delayed_work_pending(&fd_timer))
pr_info("delayed work.function=%p expires=%ld\n",
fd_timer.work.func,
fd_timer.timer.expires - jiffies);
if (delayed_work_pending(&fd_timeout))
pr_info("timer_function=%p expires=%ld\n",
fd_timeout.work.func,
fd_timeout.timer.expires - jiffies);
pr_info("cont=%p\n", cont);
pr_info("current_req=%p\n", current_req);
pr_info("command_status=%d\n", command_status);
pr_info("\n");
}
static void floppy_shutdown(struct work_struct *arg)
{
unsigned long flags;
if (initialized)
show_floppy();
cancel_activity();
flags = claim_dma_lock();
fd_disable_dma();
release_dma_lock(flags);
/* avoid dma going to a random drive after shutdown */
if (initialized)
DPRINT("floppy timeout called\n");
FDCS->reset = 1;
if (cont) {
cont->done(0);
cont->redo(); /* this will recall reset when needed */
} else {
pr_info("no cont in shutdown!\n");
process_fd_request();
}
is_alive(__func__, "");
}
/* start motor, check media-changed condition and write protection */
static int start_motor(void (*function)(void))
{
int mask;
int data;
mask = 0xfc;
data = UNIT(current_drive);
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) {
if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) {
set_debugt();
/* no read since this drive is running */
DRS->first_read_date = 0;
/* note motor start time if motor is not yet running */
DRS->spinup_date = jiffies;
data |= (0x10 << UNIT(current_drive));
}
} else if (FDCS->dor & (0x10 << UNIT(current_drive)))
mask &= ~(0x10 << UNIT(current_drive));
/* starts motor and selects floppy */
del_timer(motor_off_timer + current_drive);
set_dor(fdc, mask, data);
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(DRS->select_date + DP->select_delay,
function);
}
static void floppy_ready(void)
{
if (FDCS->reset) {
reset_fdc();
return;
}
if (start_motor(floppy_ready))
return;
if (fdc_dtr())
return;
debug_dcl(DP->flags, "calling disk change from floppy_ready\n");
if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
disk_change(current_drive) && !DP->select_delay)
twaddle(); /* this clears the dcl on certain
* drive/controller combinations */
#ifdef fd_chose_dma_mode
if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) {
unsigned long flags = claim_dma_lock();
fd_chose_dma_mode(raw_cmd->kernel_data, raw_cmd->length);
release_dma_lock(flags);
}
#endif
if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) {
perpendicular_mode();
fdc_specify(); /* must be done here because of hut, hlt ... */
seek_floppy();
} else {
if ((raw_cmd->flags & FD_RAW_READ) ||
(raw_cmd->flags & FD_RAW_WRITE))
fdc_specify();
setup_rw_floppy();
}
}
static void floppy_start(void)
{
reschedule_timeout(current_reqD, "floppy start");
scandrives();
debug_dcl(DP->flags, "setting NEWCHANGE in floppy_start\n");
set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
floppy_ready();
}
/*
* ========================================================================
* here ends the bottom half. Exported routines are:
* floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
* start_motor, reset_fdc, reset_fdc_info, interpret_errors.
* Initialization also uses output_byte, result, set_dor, floppy_interrupt
* and set_dor.
* ========================================================================
*/
/*
* General purpose continuations.
* ==============================
*/
static void do_wakeup(void)
{
reschedule_timeout(MAXTIMEOUT, "do wakeup");
cont = NULL;
command_status += 2;
wake_up(&command_done);
}
static const struct cont_t wakeup_cont = {
.interrupt = empty,
.redo = do_wakeup,
.error = empty,
.done = (done_f)empty
};
static const struct cont_t intr_cont = {
.interrupt = empty,
.redo = process_fd_request,
.error = empty,
.done = (done_f)empty
};
static int wait_til_done(void (*handler)(void), bool interruptible)
{
int ret;
schedule_bh(handler);
if (interruptible)
wait_event_interruptible(command_done, command_status >= 2);
else
wait_event(command_done, command_status >= 2);
if (command_status < 2) {
cancel_activity();
cont = &intr_cont;
reset_fdc();
return -EINTR;
}
if (FDCS->reset)
command_status = FD_COMMAND_ERROR;
if (command_status == FD_COMMAND_OKAY)
ret = 0;
else
ret = -EIO;
command_status = FD_COMMAND_NONE;
return ret;
}
static void generic_done(int result)
{
command_status = result;
cont = &wakeup_cont;
}
static void generic_success(void)
{
cont->done(1);
}
static void generic_failure(void)
{
cont->done(0);
}
static void success_and_wakeup(void)
{
generic_success();
cont->redo();
}
/*
* formatting and rw support.
* ==========================
*/
static int next_valid_format(void)
{
int probed_format;
probed_format = DRS->probed_format;
while (1) {
if (probed_format >= 8 || !DP->autodetect[probed_format]) {
DRS->probed_format = 0;
return 1;
}
if (floppy_type[DP->autodetect[probed_format]].sect) {
DRS->probed_format = probed_format;
return 0;
}
probed_format++;
}
}
static void bad_flp_intr(void)
{
int err_count;
if (probing) {
DRS->probed_format++;
if (!next_valid_format())
return;
}
err_count = ++(*errors);
INFBOUND(DRWE->badness, err_count);
if (err_count > DP->max_errors.abort)
cont->done(0);
if (err_count > DP->max_errors.reset)
FDCS->reset = 1;
else if (err_count > DP->max_errors.recal)
DRS->track = NEED_2_RECAL;
}
static void set_floppy(int drive)
{
int type = ITYPE(UDRS->fd_device);
if (type)
_floppy = floppy_type + type;
else
_floppy = current_type[drive];
}
/*
* formatting support.
* ===================
*/
static void format_interrupt(void)
{
switch (interpret_errors()) {
case 1:
cont->error();
case 2:
break;
case 0:
cont->done(1);
}
cont->redo();
}
#define FM_MODE(x, y) ((y) & ~(((x)->rate & 0x80) >> 1))
#define CT(x) ((x) | 0xc0)
static void setup_format_params(int track)
{
int n;
int il;
int count;
int head_shift;
int track_shift;
struct fparm {
unsigned char track, head, sect, size;
} *here = (struct fparm *)floppy_track_buffer;
raw_cmd = &default_raw_cmd;
raw_cmd->track = track;
raw_cmd->flags = (FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK);
raw_cmd->rate = _floppy->rate & 0x43;
raw_cmd->cmd_count = NR_F;
COMMAND = FM_MODE(_floppy, FD_FORMAT);
DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head);
F_SIZECODE = FD_SIZECODE(_floppy);
F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
F_GAP = _floppy->fmt_gap;
F_FILL = FD_FILL_BYTE;
raw_cmd->kernel_data = floppy_track_buffer;
raw_cmd->length = 4 * F_SECT_PER_TRACK;
/* allow for about 30ms for data transport per track */
head_shift = (F_SECT_PER_TRACK + 5) / 6;
/* a ``cylinder'' is two tracks plus a little stepping time */
track_shift = 2 * head_shift + 3;
/* position of logical sector 1 on this track */
n = (track_shift * format_req.track + head_shift * format_req.head)
% F_SECT_PER_TRACK;
/* determine interleave */
il = 1;
if (_floppy->fmt_gap < 0x22)
il++;
/* initialize field */
for (count = 0; count < F_SECT_PER_TRACK; ++count) {
here[count].track = format_req.track;
here[count].head = format_req.head;
here[count].sect = 0;
here[count].size = F_SIZECODE;
}
/* place logical sectors */
for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
here[n].sect = count;
n = (n + il) % F_SECT_PER_TRACK;
if (here[n].sect) { /* sector busy, find next free sector */
++n;
if (n >= F_SECT_PER_TRACK) {
n -= F_SECT_PER_TRACK;
while (here[n].sect)
++n;
}
}
}
if (_floppy->stretch & FD_SECTBASEMASK) {
for (count = 0; count < F_SECT_PER_TRACK; count++)
here[count].sect += FD_SECTBASE(_floppy) - 1;
}
}
static void redo_format(void)
{
buffer_track = -1;
setup_format_params(format_req.track << STRETCH(_floppy));
floppy_start();
debugt(__func__, "queue format request");
}
static const struct cont_t format_cont = {
.interrupt = format_interrupt,
.redo = redo_format,
.error = bad_flp_intr,
.done = generic_done
};
static int do_format(int drive, struct format_descr *tmp_format_req)
{
int ret;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
if (!_floppy ||
_floppy->track > DP->tracks ||
tmp_format_req->track >= _floppy->track ||
tmp_format_req->head >= _floppy->head ||
(_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
!_floppy->fmt_gap) {
process_fd_request();
return -EINVAL;
}
format_req = *tmp_format_req;
format_errors = 0;
cont = &format_cont;
errors = &format_errors;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
process_fd_request();
return ret;
}
/*
* Buffer read/write and support
* =============================
*/
static void floppy_end_request(struct request *req, int error)
{
unsigned int nr_sectors = current_count_sectors;
unsigned int drive = (unsigned long)req->rq_disk->private_data;
/* current_count_sectors can be zero if transfer failed */
if (error)
nr_sectors = blk_rq_cur_sectors(req);
if (__blk_end_request(req, error, nr_sectors << 9))
return;
/* We're done with the request */
floppy_off(drive);
current_req = NULL;
}
/* new request_done. Can handle physical sectors which are smaller than a
* logical buffer */
static void request_done(int uptodate)
{
struct request *req = current_req;
struct request_queue *q;
unsigned long flags;
int block;
char msg[sizeof("request done ") + sizeof(int) * 3];
probing = 0;
snprintf(msg, sizeof(msg), "request done %d", uptodate);
reschedule_timeout(MAXTIMEOUT, msg);
if (!req) {
pr_info("floppy.c: no request in request_done\n");
return;
}
q = req->q;
if (uptodate) {
/* maintain values for invalidation on geometry
* change */
block = current_count_sectors + blk_rq_pos(req);
INFBOUND(DRS->maxblock, block);
if (block > _floppy->sect)
DRS->maxtrack = 1;
/* unlock chained buffers */
spin_lock_irqsave(q->queue_lock, flags);
floppy_end_request(req, 0);
spin_unlock_irqrestore(q->queue_lock, flags);
} else {
if (rq_data_dir(req) == WRITE) {
/* record write error information */
DRWE->write_errors++;
if (DRWE->write_errors == 1) {
DRWE->first_error_sector = blk_rq_pos(req);
DRWE->first_error_generation = DRS->generation;
}
DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation;
}
spin_lock_irqsave(q->queue_lock, flags);
floppy_end_request(req, -EIO);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
/* Interrupt handler evaluating the result of the r/w operation */
static void rw_interrupt(void)
{
int eoc;
int ssize;
int heads;
int nr_sectors;
if (R_HEAD >= 2) {
/* some Toshiba floppy controllers occasionnally seem to
* return bogus interrupts after read/write operations, which
* can be recognized by a bad head number (>= 2) */
return;
}
if (!DRS->first_read_date)
DRS->first_read_date = jiffies;
nr_sectors = 0;
ssize = DIV_ROUND_UP(1 << SIZECODE, 4);
if (ST1 & ST1_EOC)
eoc = 1;
else
eoc = 0;
if (COMMAND & 0x80)
heads = 2;
else
heads = 1;
nr_sectors = (((R_TRACK - TRACK) * heads +
R_HEAD - HEAD) * SECT_PER_TRACK +
R_SECTOR - SECTOR + eoc) << SIZECODE >> 2;
if (nr_sectors / ssize >
DIV_ROUND_UP(in_sector_offset + current_count_sectors, ssize)) {
DPRINT("long rw: %x instead of %lx\n",
nr_sectors, current_count_sectors);
pr_info("rs=%d s=%d\n", R_SECTOR, SECTOR);
pr_info("rh=%d h=%d\n", R_HEAD, HEAD);
pr_info("rt=%d t=%d\n", R_TRACK, TRACK);
pr_info("heads=%d eoc=%d\n", heads, eoc);
pr_info("spt=%d st=%d ss=%d\n",
SECT_PER_TRACK, fsector_t, ssize);
pr_info("in_sector_offset=%d\n", in_sector_offset);
}
nr_sectors -= in_sector_offset;
INFBOUND(nr_sectors, 0);
SUPBOUND(current_count_sectors, nr_sectors);
switch (interpret_errors()) {
case 2:
cont->redo();
return;
case 1:
if (!current_count_sectors) {
cont->error();
cont->redo();
return;
}
break;
case 0:
if (!current_count_sectors) {
cont->redo();
return;
}
current_type[current_drive] = _floppy;
floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
break;
}
if (probing) {
if (DP->flags & FTD_MSG)
DPRINT("Auto-detected floppy type %s in fd%d\n",
_floppy->name, current_drive);
current_type[current_drive] = _floppy;
floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
probing = 0;
}
if (CT(COMMAND) != FD_READ ||
raw_cmd->kernel_data == bio_data(current_req->bio)) {
/* transfer directly from buffer */
cont->done(1);
} else if (CT(COMMAND) == FD_READ) {
buffer_track = raw_cmd->track;
buffer_drive = current_drive;
INFBOUND(buffer_max, nr_sectors + fsector_t);
}
cont->redo();
}
/* Compute maximal contiguous buffer size. */
static int buffer_chain_size(void)
{
struct bio_vec bv;
int size;
struct req_iterator iter;
char *base;
base = bio_data(current_req->bio);
size = 0;
rq_for_each_segment(bv, current_req, iter) {
if (page_address(bv.bv_page) + bv.bv_offset != base + size)
break;
size += bv.bv_len;
}
return size >> 9;
}
/* Compute the maximal transfer size */
static int transfer_size(int ssize, int max_sector, int max_size)
{
SUPBOUND(max_sector, fsector_t + max_size);
/* alignment */
max_sector -= (max_sector % _floppy->sect) % ssize;
/* transfer size, beginning not aligned */
current_count_sectors = max_sector - fsector_t;
return max_sector;
}
/*
* Move data from/to the track buffer to/from the buffer cache.
*/
static void copy_buffer(int ssize, int max_sector, int max_sector_2)
{
int remaining; /* number of transferred 512-byte sectors */
struct bio_vec bv;
char *buffer;
char *dma_buffer;
int size;
struct req_iterator iter;
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
blk_rq_sectors(current_req));
if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
buffer_max > fsector_t + blk_rq_sectors(current_req))
current_count_sectors = min_t(int, buffer_max - fsector_t,
blk_rq_sectors(current_req));
remaining = current_count_sectors << 9;
if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
DPRINT("in copy buffer\n");
pr_info("current_count_sectors=%ld\n", current_count_sectors);
pr_info("remaining=%d\n", remaining >> 9);
pr_info("current_req->nr_sectors=%u\n",
blk_rq_sectors(current_req));
pr_info("current_req->current_nr_sectors=%u\n",
blk_rq_cur_sectors(current_req));
pr_info("max_sector=%d\n", max_sector);
pr_info("ssize=%d\n", ssize);
}
buffer_max = max(max_sector, buffer_max);
dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
size = blk_rq_cur_bytes(current_req);
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
break;
size = bv.bv_len;
SUPBOUND(size, remaining);
buffer = page_address(bv.bv_page) + bv.bv_offset;
if (dma_buffer + size >
floppy_track_buffer + (max_buffer_sectors << 10) ||
dma_buffer < floppy_track_buffer) {
DPRINT("buffer overrun in copy buffer %d\n",
(int)((floppy_track_buffer - dma_buffer) >> 9));
pr_info("fsector_t=%d buffer_min=%d\n",
fsector_t, buffer_min);
pr_info("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
pr_info("read\n");
if (CT(COMMAND) == FD_WRITE)
pr_info("write\n");
break;
}
if (((unsigned long)buffer) % 512)
DPRINT("%p buffer not aligned\n", buffer);
if (CT(COMMAND) == FD_READ)
memcpy(buffer, dma_buffer, size);
else
memcpy(dma_buffer, buffer, size);
remaining -= size;
dma_buffer += size;
}
if (remaining) {
if (remaining > 0)
max_sector -= remaining >> 9;
DPRINT("weirdness: remaining %d\n", remaining >> 9);
}
}
/* work around a bug in pseudo DMA
* (on some FDCs) pseudo DMA does not stop when the CPU stops
* sending data. Hence we need a different way to signal the
* transfer length: We use SECT_PER_TRACK. Unfortunately, this
* does not work with MT, hence we can only transfer one head at
* a time
*/
static void virtualdmabug_workaround(void)
{
int hard_sectors;
int end_sector;
if (CT(COMMAND) == FD_WRITE) {
COMMAND &= ~0x80; /* switch off multiple track mode */
hard_sectors = raw_cmd->length >> (7 + SIZECODE);
end_sector = SECTOR + hard_sectors - 1;
if (end_sector > SECT_PER_TRACK) {
pr_info("too many sectors %d > %d\n",
end_sector, SECT_PER_TRACK);
return;
}
SECT_PER_TRACK = end_sector;
/* make sure SECT_PER_TRACK
* points to end of transfer */
}
}
/*
* Formulate a read/write request.
* this routine decides where to load the data (directly to buffer, or to
* tmp floppy area), how much data to load (the size of the buffer, the whole
* track, or a single sector)
* All floppy_track_buffer handling goes in here. If we ever add track buffer
* allocation on the fly, it should be done here. No other part should need
* modification.
*/
static int make_raw_rw_request(void)
{
int aligned_sector_t;
int max_sector;
int max_size;
int tracksize;
int ssize;
if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
return 0;
set_fdc((long)current_req->rq_disk->private_data);
raw_cmd = &default_raw_cmd;
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
raw_cmd->cmd_count = NR_RW;
if (rq_data_dir(current_req) == READ) {
raw_cmd->flags |= FD_RAW_READ;
COMMAND = FM_MODE(_floppy, FD_READ);
} else if (rq_data_dir(current_req) == WRITE) {
raw_cmd->flags |= FD_RAW_WRITE;
COMMAND = FM_MODE(_floppy, FD_WRITE);
} else {
DPRINT("%s: unknown command\n", __func__);
return 0;
}
max_sector = _floppy->sect * _floppy->head;
TRACK = (int)blk_rq_pos(current_req) / max_sector;
fsector_t = (int)blk_rq_pos(current_req) % max_sector;
if (_floppy->track && TRACK >= _floppy->track) {
if (blk_rq_cur_sectors(current_req) & 1) {
current_count_sectors = 1;
return 1;
} else
return 0;
}
HEAD = fsector_t / _floppy->sect;
if (((_floppy->stretch & (FD_SWAPSIDES | FD_SECTBASEMASK)) ||
test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags)) &&
fsector_t < _floppy->sect)
max_sector = _floppy->sect;
/* 2M disks have phantom sectors on the first track */
if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) {
max_sector = 2 * _floppy->sect / 3;
if (fsector_t >= max_sector) {
current_count_sectors =
min_t(int, _floppy->sect - fsector_t,
blk_rq_sectors(current_req));
return 1;
}
SIZECODE = 2;
} else
SIZECODE = FD_SIZECODE(_floppy);
raw_cmd->rate = _floppy->rate & 0x43;
if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2)
raw_cmd->rate = 1;
if (SIZECODE)
SIZECODE2 = 0xff;
else
SIZECODE2 = 0x80;
raw_cmd->track = TRACK << STRETCH(_floppy);
DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD);
GAP = _floppy->gap;
ssize = DIV_ROUND_UP(1 << SIZECODE, 4);
SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
FD_SECTBASE(_floppy);
/* tracksize describes the size which can be filled up with sectors
* of size ssize.
*/
tracksize = _floppy->sect - _floppy->sect % ssize;
if (tracksize < _floppy->sect) {
SECT_PER_TRACK++;
if (tracksize <= fsector_t % _floppy->sect)
SECTOR--;
/* if we are beyond tracksize, fill up using smaller sectors */
while (tracksize <= fsector_t % _floppy->sect) {
while (tracksize + ssize > _floppy->sect) {
SIZECODE--;
ssize >>= 1;
}
SECTOR++;
SECT_PER_TRACK++;
tracksize += ssize;
}
max_sector = HEAD * _floppy->sect + tracksize;
} else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) {
max_sector = _floppy->sect;
} else if (!HEAD && CT(COMMAND) == FD_WRITE) {
/* for virtual DMA bug workaround */
max_sector = _floppy->sect;
}
in_sector_offset = (fsector_t % _floppy->sect) % ssize;
aligned_sector_t = fsector_t - in_sector_offset;
max_size = blk_rq_sectors(current_req);
if ((raw_cmd->track == buffer_track) &&
(current_drive == buffer_drive) &&
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
/* data already in track buffer */
if (CT(COMMAND) == FD_READ) {
copy_buffer(1, max_sector, buffer_max);
return 1;
}
} else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
if (CT(COMMAND) == FD_WRITE) {
unsigned int sectors;
sectors = fsector_t + blk_rq_sectors(current_req);
if (sectors > ssize && sectors < ssize + ssize)
max_size = ssize + ssize;
else
max_size = ssize;
}
raw_cmd->flags &= ~FD_RAW_WRITE;
raw_cmd->flags |= FD_RAW_READ;
COMMAND = FM_MODE(_floppy, FD_READ);
} else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) {
unsigned long dma_limit;
int direct, indirect;
indirect =
transfer_size(ssize, max_sector,
max_buffer_sectors * 2) - fsector_t;
/*
* Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
* on a 64 bit machine!
*/
max_size = buffer_chain_size();
dma_limit = (MAX_DMA_ADDRESS -
((unsigned long)bio_data(current_req->bio))) >> 9;
if ((unsigned long)max_size > dma_limit)
max_size = dma_limit;
/* 64 kb boundaries */
if (CROSS_64KB(bio_data(current_req->bio), max_size << 9))
max_size = (K_64 -
((unsigned long)bio_data(current_req->bio)) %
K_64) >> 9;
direct = transfer_size(ssize, max_sector, max_size) - fsector_t;
/*
* We try to read tracks, but if we get too many errors, we
* go back to reading just one sector at a time.
*
* This means we should be able to read a sector even if there
* are other bad sectors on this track.
*/
if (!direct ||
(indirect * 2 > direct * 3 &&
*errors < DP->max_errors.read_track &&
((!probing ||
(DP->read_track & (1 << DRS->probed_format)))))) {
max_size = blk_rq_sectors(current_req);
} else {
raw_cmd->kernel_data = bio_data(current_req->bio);
raw_cmd->length = current_count_sectors << 9;
if (raw_cmd->length == 0) {
DPRINT("%s: zero dma transfer attempted\n", __func__);
DPRINT("indirect=%d direct=%d fsector_t=%d\n",
indirect, direct, fsector_t);
return 0;
}
virtualdmabug_workaround();
return 2;
}
}
if (CT(COMMAND) == FD_READ)
max_size = max_sector; /* unbounded */
/* claim buffer track if needed */
if (buffer_track != raw_cmd->track || /* bad track */
buffer_drive != current_drive || /* bad drive */
fsector_t > buffer_max ||
fsector_t < buffer_min ||
((CT(COMMAND) == FD_READ ||
(!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
max_sector > 2 * max_buffer_sectors + buffer_min &&
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)) {
/* not enough space */
buffer_track = -1;
buffer_drive = current_drive;
buffer_max = buffer_min = aligned_sector_t;
}
raw_cmd->kernel_data = floppy_track_buffer +
((aligned_sector_t - buffer_min) << 9);
if (CT(COMMAND) == FD_WRITE) {
/* copy write buffer to track buffer.
* if we get here, we know that the write
* is either aligned or the data already in the buffer
* (buffer will be overwritten) */
if (in_sector_offset && buffer_track == -1)
DPRINT("internal error offset !=0 on write\n");
buffer_track = raw_cmd->track;
buffer_drive = current_drive;
copy_buffer(ssize, max_sector,
2 * max_buffer_sectors + buffer_min);
} else
transfer_size(ssize, max_sector,
2 * max_buffer_sectors + buffer_min -
aligned_sector_t);
/* round up current_count_sectors to get dma xfer size */
raw_cmd->length = in_sector_offset + current_count_sectors;
raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1;
raw_cmd->length <<= 9;
if ((raw_cmd->length < current_count_sectors << 9) ||
(raw_cmd->kernel_data != bio_data(current_req->bio) &&
CT(COMMAND) == FD_WRITE &&
(aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
aligned_sector_t < buffer_min)) ||
raw_cmd->length % (128 << SIZECODE) ||
raw_cmd->length <= 0 || current_count_sectors <= 0) {
DPRINT("fractionary current count b=%lx s=%lx\n",
raw_cmd->length, current_count_sectors);
if (raw_cmd->kernel_data != bio_data(current_req->bio))
pr_info("addr=%d, length=%ld\n",
(int)((raw_cmd->kernel_data -
floppy_track_buffer) >> 9),
current_count_sectors);
pr_info("st=%d ast=%d mse=%d msi=%d\n",
fsector_t, aligned_sector_t, max_sector, max_size);
pr_info("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
pr_info("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
COMMAND, SECTOR, HEAD, TRACK);
pr_info("buffer drive=%d\n", buffer_drive);
pr_info("buffer track=%d\n", buffer_track);
pr_info("buffer_min=%d\n", buffer_min);
pr_info("buffer_max=%d\n", buffer_max);
return 0;
}
if (raw_cmd->kernel_data != bio_data(current_req->bio)) {
if (raw_cmd->kernel_data < floppy_track_buffer ||
current_count_sectors < 0 ||
raw_cmd->length < 0 ||
raw_cmd->kernel_data + raw_cmd->length >
floppy_track_buffer + (max_buffer_sectors << 10)) {
DPRINT("buffer overrun in schedule dma\n");
pr_info("fsector_t=%d buffer_min=%d current_count=%ld\n",
fsector_t, buffer_min, raw_cmd->length >> 9);
pr_info("current_count_sectors=%ld\n",
current_count_sectors);
if (CT(COMMAND) == FD_READ)
pr_info("read\n");
if (CT(COMMAND) == FD_WRITE)
pr_info("write\n");
return 0;
}
} else if (raw_cmd->length > blk_rq_bytes(current_req) ||
current_count_sectors > blk_rq_sectors(current_req)) {
DPRINT("buffer overrun in direct transfer\n");
return 0;
} else if (raw_cmd->length < current_count_sectors << 9) {
DPRINT("more sectors than bytes\n");
pr_info("bytes=%ld\n", raw_cmd->length >> 9);
pr_info("sectors=%ld\n", current_count_sectors);
}
if (raw_cmd->length == 0) {
DPRINT("zero dma transfer attempted from make_raw_request\n");
return 0;
}
virtualdmabug_workaround();
return 2;
}
/*
* Round-robin between our available drives, doing one request from each
*/
static int set_next_request(void)
{
struct request_queue *q;
int old_pos = fdc_queue;
do {
q = disks[fdc_queue]->queue;
if (++fdc_queue == N_DRIVE)
fdc_queue = 0;
if (q) {
current_req = blk_fetch_request(q);
if (current_req) {
current_req->error_count = 0;
break;
}
}
} while (fdc_queue != old_pos);
return current_req != NULL;
}
static void redo_fd_request(void)
{
int drive;
int tmp;
lastredo = jiffies;
if (current_drive < N_DRIVE)
floppy_off(current_drive);
do_request:
if (!current_req) {
int pending;
spin_lock_irq(&floppy_lock);
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
if (!pending) {
do_floppy = NULL;
unlock_fdc();
return;
}
}
drive = (long)current_req->rq_disk->private_data;
set_fdc(drive);
reschedule_timeout(current_reqD, "redo fd request");
set_floppy(drive);
raw_cmd = &default_raw_cmd;
raw_cmd->flags = 0;
if (start_motor(redo_fd_request))
return;
disk_change(current_drive);
if (test_bit(current_drive, &fake_change) ||
test_bit(FD_DISK_CHANGED_BIT, &DRS->flags)) {
DPRINT("disk absent or changed during operation\n");
request_done(0);
goto do_request;
}
if (!_floppy) { /* Autodetection */
if (!probing) {
DRS->probed_format = 0;
if (next_valid_format()) {
DPRINT("no autodetectable formats\n");
_floppy = NULL;
request_done(0);
goto do_request;
}
}
probing = 1;
_floppy = floppy_type + DP->autodetect[DRS->probed_format];
} else
probing = 0;
errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);
goto do_request;
}
if (test_bit(FD_NEED_TWADDLE_BIT, &DRS->flags))
twaddle();
schedule_bh(floppy_start);
debugt(__func__, "queue fd request");
return;
}
static const struct cont_t rw_cont = {
.interrupt = rw_interrupt,
.redo = redo_fd_request,
.error = bad_flp_intr,
.done = request_done
};
static void process_fd_request(void)
{
cont = &rw_cont;
schedule_bh(redo_fd_request);
}
static void do_fd_request(struct request_queue *q)
{
if (WARN(max_buffer_sectors == 0,
"VFS: %s called on non-open device\n", __func__))
return;
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
return;
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return;
}
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);
process_fd_request();
is_alive(__func__, "");
}
static const struct cont_t poll_cont = {
.interrupt = success_and_wakeup,
.redo = floppy_ready,
.error = generic_failure,
.done = generic_done
};
static int poll_drive(bool interruptible, int flag)
{
/* no auto-sense, just clear dcl */
raw_cmd = &default_raw_cmd;
raw_cmd->flags = flag;
raw_cmd->track = 0;
raw_cmd->cmd_count = 0;
cont = &poll_cont;
debug_dcl(DP->flags, "setting NEWCHANGE in poll_drive\n");
set_bit(FD_DISK_NEWCHANGE_BIT, &DRS->flags);
return wait_til_done(floppy_ready, interruptible);
}
/*
* User triggered reset
* ====================
*/
static void reset_intr(void)
{
pr_info("weird, reset interrupt called\n");
}
static const struct cont_t reset_cont = {
.interrupt = reset_intr,
.redo = success_and_wakeup,
.error = generic_failure,
.done = generic_done
};
static int user_reset_fdc(int drive, int arg, bool interruptible)
{
int ret;
if (lock_fdc(drive))
return -EINTR;
if (arg == FD_RESET_ALWAYS)
FDCS->reset = 1;
if (FDCS->reset) {
cont = &reset_cont;
ret = wait_til_done(reset_fdc, interruptible);
if (ret == -EINTR)
return -EINTR;
}
process_fd_request();
return 0;
}
/*
* Misc Ioctl's and support
* ========================
*/
static inline int fd_copyout(void __user *param, const void *address,
unsigned long size)
{
return copy_to_user(param, address, size) ? -EFAULT : 0;
}
static inline int fd_copyin(void __user *param, void *address,
unsigned long size)
{
return copy_from_user(address, param, size) ? -EFAULT : 0;
}
static const char *drive_name(int type, int drive)
{
struct floppy_struct *floppy;
if (type)
floppy = floppy_type + type;
else {
if (UDP->native_format)
floppy = floppy_type + UDP->native_format;
else
return "(null)";
}
if (floppy->name)
return floppy->name;
else
return "(null)";
}
/* raw commands */
static void raw_cmd_done(int flag)
{
int i;
if (!flag) {
raw_cmd->flags |= FD_RAW_FAILURE;
raw_cmd->flags |= FD_RAW_HARDFAILURE;
} else {
raw_cmd->reply_count = inr;
if (raw_cmd->reply_count > MAX_REPLIES)
raw_cmd->reply_count = 0;
for (i = 0; i < raw_cmd->reply_count; i++)
raw_cmd->reply[i] = reply_buffer[i];
if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
unsigned long flags;
flags = claim_dma_lock();
raw_cmd->length = fd_get_dma_residue();
release_dma_lock(flags);
}
if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
(!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
raw_cmd->flags |= FD_RAW_FAILURE;
if (disk_change(current_drive))
raw_cmd->flags |= FD_RAW_DISK_CHANGE;
else
raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
motor_off_callback(current_drive);
if (raw_cmd->next &&
(!(raw_cmd->flags & FD_RAW_FAILURE) ||
!(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
((raw_cmd->flags & FD_RAW_FAILURE) ||
!(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) {
raw_cmd = raw_cmd->next;
return;
}
}
generic_done(flag);
}
static const struct cont_t raw_cmd_cont = {
.interrupt = success_and_wakeup,
.redo = floppy_start,
.error = generic_failure,
.done = raw_cmd_done
};
static int raw_cmd_copyout(int cmd, void __user *param,
struct floppy_raw_cmd *ptr)
{
int ret;
while (ptr) {
struct floppy_raw_cmd cmd = *ptr;
cmd.next = NULL;
cmd.kernel_data = NULL;
ret = copy_to_user(param, &cmd, sizeof(cmd));
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) {
if (ptr->length >= 0 &&
ptr->length <= ptr->buffer_length) {
long length = ptr->buffer_length - ptr->length;
ret = fd_copyout(ptr->data, ptr->kernel_data,
length);
if (ret)
return ret;
}
}
ptr = ptr->next;
}
return 0;
}
static void raw_cmd_free(struct floppy_raw_cmd **ptr)
{
struct floppy_raw_cmd *next;
struct floppy_raw_cmd *this;
this = *ptr;
*ptr = NULL;
while (this) {
if (this->buffer_length) {
fd_dma_mem_free((unsigned long)this->kernel_data,
this->buffer_length);
this->buffer_length = 0;
}
next = this->next;
kfree(this);
this = next;
}
}
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
struct floppy_raw_cmd *ptr;
int ret;
int i;
*rcmd = NULL;
loop:
ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
if (!ptr)
return -ENOMEM;
*rcmd = ptr;
ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
if (ret)
return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > 33)
/* the command may now also take up the space
* initially intended for the reply & the
* reply count. Needed for long 82078 commands
* such as RESTORE, which takes ... 17 command
* bytes. Murphy's law #137: When you reserve
* 16 bytes for a structure, you'll one day
* discover that you really need 17...
*/
return -EINVAL;
for (i = 0; i < 16; i++)
ptr->reply[i] = 0;
ptr->resultcode = 0;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
if (ptr->length <= 0)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
if (!ptr->kernel_data)
return -ENOMEM;
ptr->buffer_length = ptr->length;
}
if (ptr->flags & FD_RAW_WRITE) {
ret = fd_copyin(ptr->data, ptr->kernel_data, ptr->length);
if (ret)
return ret;
}
if (ptr->flags & FD_RAW_MORE) {
rcmd = &(ptr->next);
ptr->rate &= 0x43;
goto loop;
}
return 0;
}
static int raw_cmd_ioctl(int cmd, void __user *param)
{
struct floppy_raw_cmd *my_raw_cmd;
int drive;
int ret2;
int ret;
if (FDCS->rawcmd <= 1)
FDCS->rawcmd = 1;
for (drive = 0; drive < N_DRIVE; drive++) {
if (FDC(drive) != fdc)
continue;
if (drive == current_drive) {
if (UDRS->fd_ref > 1) {
FDCS->rawcmd = 2;
break;
}
} else if (UDRS->fd_ref) {
FDCS->rawcmd = 2;
break;
}
}
if (FDCS->reset)
return -EIO;
ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
if (ret) {
raw_cmd_free(&my_raw_cmd);
return ret;
}
raw_cmd = my_raw_cmd;
cont = &raw_cmd_cont;
ret = wait_til_done(floppy_start, true);
debug_dcl(DP->flags, "calling disk change from raw_cmd ioctl\n");
if (ret != -EINTR && FDCS->reset)
ret = -EIO;
DRS->track = NO_TRACK;
ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
if (!ret)
ret = ret2;
raw_cmd_free(&my_raw_cmd);
return ret;
}
static int invalidate_drive(struct block_device *bdev)
{
/* invalidate the buffer track to force a reread */
set_bit((long)bdev->bd_disk->private_data, &fake_change);
process_fd_request();
check_disk_change(bdev);
return 0;
}
static int set_geometry(unsigned int cmd, struct floppy_struct *g,
int drive, int type, struct block_device *bdev)
{
int cnt;
/* sanity checking for parameters. */
if (g->sect <= 0 ||
g->head <= 0 ||
g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
/* check if reserved bits are set */
(g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
return -EINVAL;
if (type) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
if (lock_fdc(drive)) {
mutex_unlock(&open_lock);
return -EINTR;
}
floppy_type[type] = *g;
floppy_type[type].name = "user format";
for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
floppy_sizes[cnt] = floppy_sizes[cnt + 0x80] =
floppy_type[type].size + 1;
process_fd_request();
for (cnt = 0; cnt < N_DRIVE; cnt++) {
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
__invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {
int oldStretch;
if (lock_fdc(drive))
return -EINTR;
if (cmd != FDDEFPRM) {
/* notice a disk change immediately, else
* we lose our settings immediately*/
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
}
oldStretch = g->stretch;
user_params[drive] = *g;
if (buffer_drive == drive)
SUPBOUND(buffer_max, user_params[drive].sect);
current_type[drive] = &user_params[drive];
floppy_sizes[drive] = user_params[drive].size;
if (cmd == FDDEFPRM)
DRS->keep_data = -1;
else
DRS->keep_data = 1;
/* invalidation. Invalidate only when needed, i.e.
* when there are already sectors in the buffer cache
* whose number will change. This is useful, because
* mtools often changes the geometry of the disk after
* looking at the boot block */
if (DRS->maxblock > user_params[drive].sect ||
DRS->maxtrack ||
((user_params[drive].sect ^ oldStretch) &
(FD_SWAPSIDES | FD_SECTBASEMASK)))
invalidate_drive(bdev);
else
process_fd_request();
}
return 0;
}
/* handle obsolete ioctl's */
static unsigned int ioctl_table[] = {
FDCLRPRM,
FDSETPRM,
FDDEFPRM,
FDGETPRM,
FDMSGON,
FDMSGOFF,
FDFMTBEG,
FDFMTTRK,
FDFMTEND,
FDSETEMSGTRESH,
FDFLUSH,
FDSETMAXERRS,
FDGETMAXERRS,
FDGETDRVTYP,
FDSETDRVPRM,
FDGETDRVPRM,
FDGETDRVSTAT,
FDPOLLDRVSTAT,
FDRESET,
FDGETFDCSTAT,
FDWERRORCLR,
FDWERRORGET,
FDRAWCMD,
FDEJECT,
FDTWADDLE
};
static int normalize_ioctl(unsigned int *cmd, int *size)
{
int i;
for (i = 0; i < ARRAY_SIZE(ioctl_table); i++) {
if ((*cmd & 0xffff) == (ioctl_table[i] & 0xffff)) {
*size = _IOC_SIZE(*cmd);
*cmd = ioctl_table[i];
if (*size > _IOC_SIZE(*cmd)) {
pr_info("ioctl not yet supported\n");
return -EFAULT;
}
return 0;
}
}
return -EINVAL;
}
static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
{
if (type)
*g = &floppy_type[type];
else {
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(false, 0) == -EINTR)
return -EINTR;
process_fd_request();
*g = current_type[drive];
}
if (!*g)
return -ENODEV;
return 0;
}
static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
ret = get_floppy_geometry(drive, type, &g);
if (ret)
return ret;
geo->heads = g->head;
geo->sectors = g->sect;
geo->cylinders = g->track;
return 0;
}
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
unsigned long param)
{
int drive = (long)bdev->bd_disk->private_data;
int type = ITYPE(UDRS->fd_device);
int i;
int ret;
int size;
union inparam {
struct floppy_struct g; /* geometry */
struct format_descr f;
struct floppy_max_errors max_errors;
struct floppy_drive_params dp;
} inparam; /* parameters coming from user space */
const void *outparam; /* parameters passed back to user space */
/* convert compatibility eject ioctls into floppy eject ioctl.
* We do this in order to provide a means to eject floppy disks before
* installing the new fdutils package */
if (cmd == CDROMEJECT || /* CD-ROM eject */
cmd == 0x6470) { /* SunOS floppy eject */
DPRINT("obsolete eject ioctl\n");
DPRINT("please use floppycontrol --eject\n");
cmd = FDEJECT;
}
if (!((cmd & 0xff00) == 0x0200))
return -EINVAL;
/* convert the old style command into a new style command */
ret = normalize_ioctl(&cmd, &size);
if (ret)
return ret;
/* permission checks */
if (((cmd & 0x40) && !(mode & (FMODE_WRITE | FMODE_WRITE_IOCTL))) ||
((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
if (WARN_ON(size < 0 || size > sizeof(inparam)))
return -EINVAL;
/* copyin */
memset(&inparam, 0, sizeof(inparam));
if (_IOC_DIR(cmd) & _IOC_WRITE) {
ret = fd_copyin((void __user *)param, &inparam, size);
if (ret)
return ret;
}
switch (cmd) {
case FDEJECT:
if (UDRS->fd_ref != 1)
/* somebody else has this drive open */
return -EBUSY;
if (lock_fdc(drive))
return -EINTR;
/* do the actual eject. Fails on
* non-Sparc architectures */
ret = fd_eject(UNIT(drive));
set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
set_bit(FD_VERIFY_BIT, &UDRS->flags);
process_fd_request();
return ret;
case FDCLRPRM:
if (lock_fdc(drive))
return -EINTR;
current_type[drive] = NULL;
floppy_sizes[drive] = MAX_DISK_SIZE << 1;
UDRS->keep_data = 0;
return invalidate_drive(bdev);
case FDSETPRM:
case FDDEFPRM:
return set_geometry(cmd, &inparam.g, drive, type, bdev);
case FDGETPRM:
ret = get_floppy_geometry(drive, type,
(struct floppy_struct **)&outparam);
if (ret)
return ret;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;
return 0;
case FDMSGOFF:
UDP->flags &= ~FTD_MSG;
return 0;
case FDFMTBEG:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
ret = UDRS->flags;
process_fd_request();
if (ret & FD_VERIFY)
return -ENODEV;
if (!(ret & FD_DISK_WRITABLE))
return -EROFS;
return 0;
case FDFMTTRK:
if (UDRS->fd_ref != 1)
return -EBUSY;
return do_format(drive, &inparam.f);
case FDFMTEND:
case FDFLUSH:
if (lock_fdc(drive))
return -EINTR;
return invalidate_drive(bdev);
case FDSETEMSGTRESH:
UDP->max_errors.reporting = (unsigned short)(param & 0x0f);
return 0;
case FDGETMAXERRS:
outparam = &UDP->max_errors;
break;
case FDSETMAXERRS:
UDP->max_errors = inparam.max_errors;
break;
case FDGETDRVTYP:
outparam = drive_name(type, drive);
SUPBOUND(size, strlen((const char *)outparam) + 1);
break;
case FDSETDRVPRM:
*UDP = inparam.dp;
break;
case FDGETDRVPRM:
outparam = UDP;
break;
case FDPOLLDRVSTAT:
if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
process_fd_request();
/* fall through */
case FDGETDRVSTAT:
outparam = UDRS;
break;
case FDRESET:
return user_reset_fdc(drive, (int)param, true);
case FDGETFDCSTAT:
outparam = UFDCS;
break;
case FDWERRORCLR:
memset(UDRWE, 0, sizeof(*UDRWE));
return 0;
case FDWERRORGET:
outparam = UDRWE;
break;
case FDRAWCMD:
if (type)
return -EINVAL;
if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
i = raw_cmd_ioctl(cmd, (void __user *)param);
if (i == -EINTR)
return -EINTR;
process_fd_request();
return i;
case FDTWADDLE:
if (lock_fdc(drive))
return -EINTR;
twaddle();
process_fd_request();
return 0;
default:
return -EINVAL;
}
if (_IOC_DIR(cmd) & _IOC_READ)
return fd_copyout((void __user *)param, outparam, size);
return 0;
}
static int fd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long param)
{
int ret;
mutex_lock(&floppy_mutex);
ret = fd_locked_ioctl(bdev, mode, cmd, param);
mutex_unlock(&floppy_mutex);
return ret;
}
static void __init config_types(void)
{
bool has_drive = false;
int drive;
/* read drive info out of physical CMOS */
drive = 0;
if (!UDP->cmos)
UDP->cmos = FLOPPY0_TYPE;
drive = 1;
if (!UDP->cmos && FLOPPY1_TYPE)
UDP->cmos = FLOPPY1_TYPE;
/* FIXME: additional physical CMOS drive detection should go here */
for (drive = 0; drive < N_DRIVE; drive++) {
unsigned int type = UDP->cmos;
struct floppy_drive_params *params;
const char *name = NULL;
char temparea[32];
if (type < ARRAY_SIZE(default_drive_params)) {
params = &default_drive_params[type].params;
if (type) {
name = default_drive_params[type].name;
allowed_drive_mask |= 1 << drive;
} else
allowed_drive_mask &= ~(1 << drive);
} else {
params = &default_drive_params[0].params;
snprintf(temparea, sizeof(temparea),
"unknown type %d (usb?)", type);
name = temparea;
}
if (name) {
const char *prepend;
if (!has_drive) {
prepend = "";
has_drive = true;
pr_info("Floppy drive(s):");
} else {
prepend = ",";
}
pr_cont("%s fd%d is %s", prepend, drive, name);
}
*UDP = *params;
}
if (has_drive)
pr_cont("\n");
}
static void floppy_release(struct gendisk *disk, fmode_t mode)
{
int drive = (long)disk->private_data;
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
if (!UDRS->fd_ref--) {
DPRINT("floppy_release with fd_ref == 0");
UDRS->fd_ref = 0;
}
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
}
/*
* floppy_open check for aliasing (/dev/fd0 can be the same as
* /dev/PS0 etc), and disallows simultaneous access to the same
* drive with different device numbers.
*/
static int floppy_open(struct block_device *bdev, fmode_t mode)
{
int drive = (long)bdev->bd_disk->private_data;
int old_dev, new_dev;
int try;
int res = -EBUSY;
char *tmp;
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
old_dev = UDRS->fd_device;
if (opened_bdev[drive] && opened_bdev[drive] != bdev)
goto out2;
if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) {
set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
set_bit(FD_VERIFY_BIT, &UDRS->flags);
}
UDRS->fd_ref++;
opened_bdev[drive] = bdev;
res = -ENXIO;
if (!floppy_track_buffer) {
/* if opening an ED drive, reserve a big buffer,
* else reserve a small one */
if ((UDP->cmos == 6) || (UDP->cmos == 5))
try = 64; /* Only 48 actually useful */
else
try = 32; /* Only 24 actually useful */
tmp = (char *)fd_dma_mem_alloc(1024 * try);
if (!tmp && !floppy_track_buffer) {
try >>= 1; /* buffer only one side */
INFBOUND(try, 16);
tmp = (char *)fd_dma_mem_alloc(1024 * try);
}
if (!tmp && !floppy_track_buffer)
fallback_on_nodma_alloc(&tmp, 2048 * try);
if (!tmp && !floppy_track_buffer) {
DPRINT("Unable to allocate DMA memory\n");
goto out;
}
if (floppy_track_buffer) {
if (tmp)
fd_dma_mem_free((unsigned long)tmp, try * 1024);
} else {
buffer_min = buffer_max = -1;
floppy_track_buffer = tmp;
max_buffer_sectors = try;
}
}
new_dev = MINOR(bdev->bd_dev);
UDRS->fd_device = new_dev;
set_capacity(disks[drive], floppy_sizes[new_dev]);
if (old_dev != -1 && old_dev != new_dev) {
if (buffer_drive == drive)
buffer_track = -1;
}
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
UDRS->last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
check_disk_change(bdev);
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
goto out;
}
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
out:
UDRS->fd_ref--;
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
out2:
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return res;
}
/*
* Check if the disk has been changed or if a change has been faked.
*/
static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing)
{
int drive = (long)disk->private_data;
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags))
return DISK_EVENT_MEDIA_CHANGE;
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
if (lock_fdc(drive))
return -EINTR;
poll_drive(false, 0);
process_fd_request();
}
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
test_bit(drive, &fake_change) ||
drive_no_geom(drive))
return DISK_EVENT_MEDIA_CHANGE;
return 0;
}
/*
* This implements "read block 0" for floppy_revalidate().
* Needed for format autodetection, checking whether there is
* a disk in the drive, and whether that disk is writable.
*/
struct rb0_cbdata {
int drive;
struct completion complete;
};
static void floppy_rb0_cb(struct bio *bio)
{
struct rb0_cbdata *cbdata = (struct rb0_cbdata *)bio->bi_private;
int drive = cbdata->drive;
if (bio->bi_error) {
pr_info("floppy: error %d while reading block 0\n",
bio->bi_error);
set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
}
complete(&cbdata->complete);
}
static int __floppy_read_block_0(struct block_device *bdev, int drive)
{
struct bio bio;
struct bio_vec bio_vec;
struct page *page;
struct rb0_cbdata cbdata;
size_t size;
page = alloc_page(GFP_NOIO);
if (!page) {
process_fd_request();
return -ENOMEM;
}
size = bdev->bd_block_size;
if (!size)
size = 1024;
cbdata.drive = drive;
bio_init(&bio, &bio_vec, 1);
bio.bi_bdev = bdev;
bio_add_page(&bio, page, size, 0);
bio.bi_iter.bi_sector = 0;
bio.bi_flags |= (1 << BIO_QUIET);
bio.bi_private = &cbdata;
bio.bi_end_io = floppy_rb0_cb;
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
submit_bio(&bio);
process_fd_request();
init_completion(&cbdata.complete);
wait_for_completion(&cbdata.complete);
__free_page(page);
return 0;
}
/* revalidate the floppy disk, i.e. trigger format autodetection by reading
* the bootblock (block 0). "Autodetection" is also needed to check whether
* there is a disk in the drive at all... Thus we also do it for fixed
* geometry formats */
static int floppy_revalidate(struct gendisk *disk)
{
int drive = (long)disk->private_data;
int cf;
int res = 0;
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags) ||
test_bit(drive, &fake_change) ||
drive_no_geom(drive)) {
if (WARN(atomic_read(&usage_count) == 0,
"VFS: revalidate called on non-open device.\n"))
return -EFAULT;
res = lock_fdc(drive);
if (res)
return res;
cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags));
if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
process_fd_request(); /*already done by another thread */
return 0;
}
UDRS->maxblock = 0;
UDRS->maxtrack = 0;
if (buffer_drive == drive)
buffer_track = -1;
clear_bit(drive, &fake_change);
clear_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
if (cf)
UDRS->generation++;
if (drive_no_geom(drive)) {
/* auto-sensing */
res = __floppy_read_block_0(opened_bdev[drive], drive);
} else {
if (cf)
poll_drive(false, FD_RAW_NEED_DISK);
process_fd_request();
}
}
set_capacity(disk, floppy_sizes[UDRS->fd_device]);
return res;
}
static const struct block_device_operations floppy_fops = {
.owner = THIS_MODULE,
.open = floppy_open,
.release = floppy_release,
.ioctl = fd_ioctl,
.getgeo = fd_getgeo,
.check_events = floppy_check_events,
.revalidate_disk = floppy_revalidate,
};
/*
* Floppy Driver initialization
* =============================
*/
/* Determine the floppy disk controller type */
/* This routine was written by David C. Niemi */
static char __init get_fdc_version(void)
{
int r;
output_byte(FD_DUMPREGS); /* 82072 and better know DUMPREGS */
if (FDCS->reset)
return FDC_NONE;
r = result();
if (r <= 0x00)
return FDC_NONE; /* No FDC present ??? */
if ((r == 1) && (reply_buffer[0] == 0x80)) {
pr_info("FDC %d is an 8272A\n", fdc);
return FDC_8272A; /* 8272a/765 don't know DUMPREGS */
}
if (r != 10) {
pr_info("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
if (!fdc_configure()) {
pr_info("FDC %d is an 82072\n", fdc);
return FDC_82072; /* 82072 doesn't know CONFIGURE */
}
output_byte(FD_PERPENDICULAR);
if (need_more_output() == MORE_OUTPUT) {
output_byte(0);
} else {
pr_info("FDC %d is an 82072A\n", fdc);
return FDC_82072A; /* 82072A as found on Sparcs. */
}
output_byte(FD_UNLOCK);
r = result();
if ((r == 1) && (reply_buffer[0] == 0x80)) {
pr_info("FDC %d is a pre-1991 82077\n", fdc);
return FDC_82077_ORIG; /* Pre-1991 82077, doesn't know
* LOCK/UNLOCK */
}
if ((r != 1) || (reply_buffer[0] != 0x00)) {
pr_info("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
output_byte(FD_PARTID);
r = result();
if (r != 1) {
pr_info("FDC %d init: PARTID: unexpected return of %d bytes.\n",
fdc, r);
return FDC_UNKNOWN;
}
if (reply_buffer[0] == 0x80) {
pr_info("FDC %d is a post-1991 82077\n", fdc);
return FDC_82077; /* Revised 82077AA passes all the tests */
}
switch (reply_buffer[0] >> 5) {
case 0x0:
/* Either a 82078-1 or a 82078SL running at 5Volt */
pr_info("FDC %d is an 82078.\n", fdc);
return FDC_82078;
case 0x1:
pr_info("FDC %d is a 44pin 82078\n", fdc);
return FDC_82078;
case 0x2:
pr_info("FDC %d is a S82078B\n", fdc);
return FDC_S82078B;
case 0x3:
pr_info("FDC %d is a National Semiconductor PC87306\n", fdc);
return FDC_87306;
default:
pr_info("FDC %d init: 82078 variant with unknown PARTID=%d.\n",
fdc, reply_buffer[0] >> 5);
return FDC_82078_UNKN;
}
} /* get_fdc_version */
/* lilo configuration */
static void __init floppy_set_flags(int *ints, int param, int param2)
{
int i;
for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
if (param)
default_drive_params[i].params.flags |= param2;
else
default_drive_params[i].params.flags &= ~param2;
}
DPRINT("%s flag 0x%x\n", param2 ? "Setting" : "Clearing", param);
}
static void __init daring(int *ints, int param, int param2)
{
int i;
for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
if (param) {
default_drive_params[i].params.select_delay = 0;
default_drive_params[i].params.flags |=
FD_SILENT_DCL_CLEAR;
} else {
default_drive_params[i].params.select_delay =
2 * HZ / 100;
default_drive_params[i].params.flags &=
~FD_SILENT_DCL_CLEAR;
}
}
DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
}
static void __init set_cmos(int *ints, int dummy, int dummy2)
{
int current_drive = 0;
if (ints[0] != 2) {
DPRINT("wrong number of parameters for CMOS\n");
return;
}
current_drive = ints[1];
if (current_drive < 0 || current_drive >= 8) {
DPRINT("bad drive for set_cmos\n");
return;
}
#if N_FDC > 1
if (current_drive >= 4 && !FDC2)
FDC2 = 0x370;
#endif
DP->cmos = ints[2];
DPRINT("setting CMOS code to %d\n", ints[2]);
}
static struct param_table {
const char *name;
void (*fn) (int *ints, int param, int param2);
int *var;
int def_param;
int param2;
} config_params[] __initdata = {
{"allowed_drive_mask", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
{"all_drives", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
{"asus_pci", NULL, &allowed_drive_mask, 0x33, 0},
{"irq", NULL, &FLOPPY_IRQ, 6, 0},
{"dma", NULL, &FLOPPY_DMA, 2, 0},
{"daring", daring, NULL, 1, 0},
#if N_FDC > 1
{"two_fdc", NULL, &FDC2, 0x370, 0},
{"one_fdc", NULL, &FDC2, 0, 0},
#endif
{"thinkpad", floppy_set_flags, NULL, 1, FD_INVERTED_DCL},
{"broken_dcl", floppy_set_flags, NULL, 1, FD_BROKEN_DCL},
{"messages", floppy_set_flags, NULL, 1, FTD_MSG},
{"silent_dcl_clear", floppy_set_flags, NULL, 1, FD_SILENT_DCL_CLEAR},
{"debug", floppy_set_flags, NULL, 1, FD_DEBUG},
{"nodma", NULL, &can_use_virtual_dma, 1, 0},
{"omnibook", NULL, &can_use_virtual_dma, 1, 0},
{"yesdma", NULL, &can_use_virtual_dma, 0, 0},
{"fifo_depth", NULL, &fifo_depth, 0xa, 0},
{"nofifo", NULL, &no_fifo, 0x20, 0},
{"usefifo", NULL, &no_fifo, 0, 0},
{"cmos", set_cmos, NULL, 0, 0},
{"slow", NULL, &slow_floppy, 1, 0},
{"unexpected_interrupts", NULL, &print_unex, 1, 0},
{"no_unexpected_interrupts", NULL, &print_unex, 0, 0},
{"L40SX", NULL, &print_unex, 0, 0}
EXTRA_FLOPPY_PARAMS
};
static int __init floppy_setup(char *str)
{
int i;
int param;
int ints[11];
str = get_options(str, ARRAY_SIZE(ints), ints);
if (str) {
for (i = 0; i < ARRAY_SIZE(config_params); i++) {
if (strcmp(str, config_params[i].name) == 0) {
if (ints[0])
param = ints[1];
else
param = config_params[i].def_param;
if (config_params[i].fn)
config_params[i].fn(ints, param,
config_params[i].
param2);
if (config_params[i].var) {
DPRINT("%s=%d\n", str, param);
*config_params[i].var = param;
}
return 1;
}
}
}
if (str) {
DPRINT("unknown floppy option [%s]\n", str);
DPRINT("allowed options are:");
for (i = 0; i < ARRAY_SIZE(config_params); i++)
pr_cont(" %s", config_params[i].name);
pr_cont("\n");
} else
DPRINT("botched floppy option\n");
DPRINT("Read Documentation/blockdev/floppy.txt\n");
return 0;
}
static int have_no_fdc = -ENODEV;
static ssize_t floppy_cmos_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *p = to_platform_device(dev);
int drive;
drive = p->id;
return sprintf(buf, "%X\n", UDP->cmos);
}
static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL);
static struct attribute *floppy_dev_attrs[] = {
&dev_attr_cmos.attr,
NULL
};
ATTRIBUTE_GROUPS(floppy_dev);
static void floppy_device_release(struct device *dev)
{
}
static int floppy_resume(struct device *dev)
{
int fdc;
for (fdc = 0; fdc < N_FDC; fdc++)
if (FDCS->address != -1)
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
return 0;
}
static const struct dev_pm_ops floppy_pm_ops = {
.resume = floppy_resume,
.restore = floppy_resume,
};
static struct platform_driver floppy_driver = {
.driver = {
.name = "floppy",
.pm = &floppy_pm_ops,
},
};
static struct platform_device floppy_device[N_DRIVE];
static bool floppy_available(int drive)
{
if (!(allowed_drive_mask & (1 << drive)))
return false;
if (fdc_state[FDC(drive)].version == FDC_NONE)
return false;
return true;
}
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
{
int drive = (*part & 3) | ((*part & 0x80) >> 5);
if (drive >= N_DRIVE || !floppy_available(drive))
return NULL;
if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
return NULL;
*part = 0;
return get_disk(disks[drive]);
}
static int __init do_floppy_init(void)
{
int i, unit, drive, err;
set_debugt();
interruptjiffies = resultjiffies = jiffies;
#if defined(CONFIG_PPC)
if (check_legacy_ioport(FDC1))
return -ENODEV;
#endif
raw_cmd = NULL;
floppy_wq = alloc_ordered_workqueue("floppy", 0);
if (!floppy_wq)
return -ENOMEM;
for (drive = 0; drive < N_DRIVE; drive++) {
disks[drive] = alloc_disk(1);
if (!disks[drive]) {
err = -ENOMEM;
goto out_put_disk;
}
disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!disks[drive]->queue) {
err = -ENOMEM;
goto out_put_disk;
}
blk_queue_max_hw_sectors(disks[drive]->queue, 64);
disks[drive]->major = FLOPPY_MAJOR;
disks[drive]->first_minor = TOMINOR(drive);
disks[drive]->fops = &floppy_fops;
sprintf(disks[drive]->disk_name, "fd%d", drive);
setup_timer(&motor_off_timer[drive], motor_off_callback, drive);
}
err = register_blkdev(FLOPPY_MAJOR, "fd");
if (err)
goto out_put_disk;
err = platform_driver_register(&floppy_driver);
if (err)
goto out_unreg_blkdev;
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
floppy_find, NULL, NULL);
for (i = 0; i < 256; i++)
if (ITYPE(i))
floppy_sizes[i] = floppy_type[ITYPE(i)].size;
else
floppy_sizes[i] = MAX_DISK_SIZE << 1;
reschedule_timeout(MAXTIMEOUT, "floppy init");
config_types();
for (i = 0; i < N_FDC; i++) {
fdc = i;
memset(FDCS, 0, sizeof(*FDCS));
FDCS->dtr = -1;
FDCS->dor = 0x4;
#if defined(__sparc__) || defined(__mc68000__)
/*sparcs/sun3x don't have a DOR reset which we can fall back on to */
#ifdef __mc68000__
if (MACH_IS_SUN3X)
#endif
FDCS->version = FDC_82072A;
#endif
}
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
cancel_delayed_work(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
#if N_FDC > 1
fdc_state[1].address = FDC2;
#endif
fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
cancel_delayed_work(&fd_timeout);
err = -EBUSY;
goto out_unreg_region;
}
/* initialise drive state */
for (drive = 0; drive < N_DRIVE; drive++) {
memset(UDRS, 0, sizeof(*UDRS));
memset(UDRWE, 0, sizeof(*UDRWE));
set_bit(FD_DISK_NEWCHANGE_BIT, &UDRS->flags);
set_bit(FD_DISK_CHANGED_BIT, &UDRS->flags);
set_bit(FD_VERIFY_BIT, &UDRS->flags);
UDRS->fd_device = -1;
floppy_track_buffer = NULL;
max_buffer_sectors = 0;
}
/*
* Small 10 msec delay to let through any interrupt that
* initialization might have triggered, to not
* confuse detection:
*/
msleep(10);
for (i = 0; i < N_FDC; i++) {
fdc = i;
FDCS->driver_version = FD_DRIVER_VERSION;
for (unit = 0; unit < 4; unit++)
FDCS->track[unit] = 0;
if (FDCS->address == -1)
continue;
FDCS->rawcmd = 2;
if (user_reset_fdc(-1, FD_RESET_ALWAYS, false)) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
floppy_release_regions(fdc);
FDCS->address = -1;
FDCS->version = FDC_NONE;
continue;
}
/* Try to determine the floppy controller type */
FDCS->version = get_fdc_version();
if (FDCS->version == FDC_NONE) {
/* free ioports reserved by floppy_grab_irq_and_dma() */
floppy_release_regions(fdc);
FDCS->address = -1;
continue;
}
if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A)
can_use_virtual_dma = 0;
have_no_fdc = 0;
/* Not all FDCs seem to be able to handle the version command
* properly, so force a reset for the standard FDC clones,
* to avoid interrupt garbage.
*/
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
fdc = 0;
cancel_delayed_work(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
DPRINT("no floppy controllers found\n");
err = have_no_fdc;
goto out_release_dma;
}
for (drive = 0; drive < N_DRIVE; drive++) {
if (!floppy_available(drive))
continue;
floppy_device[drive].name = floppy_device_name;
floppy_device[drive].id = drive;
floppy_device[drive].dev.release = floppy_device_release;
floppy_device[drive].dev.groups = floppy_dev_groups;
err = platform_device_register(&floppy_device[drive]);
if (err)
goto out_remove_drives;
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
device_add_disk(&floppy_device[drive].dev, disks[drive]);
}
return 0;
out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
del_gendisk(disks[drive]);
platform_device_unregister(&floppy_device[drive]);
}
}
out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
platform_driver_unregister(&floppy_driver);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive])
break;
if (disks[drive]->queue) {
del_timer_sync(&motor_off_timer[drive]);
blk_cleanup_queue(disks[drive]->queue);
disks[drive]->queue = NULL;
}
put_disk(disks[drive]);
}
return err;
}
#ifndef MODULE
static __init void floppy_async_init(void *data, async_cookie_t cookie)
{
do_floppy_init();
}
#endif
static int __init floppy_init(void)
{
#ifdef MODULE
return do_floppy_init();
#else
/* Don't hold up the bootup by the floppy initialization */
async_schedule(floppy_async_init, NULL);
return 0;
#endif
}
static const struct io_region {
int offset;
int size;
} io_regions[] = {
{ 2, 1 },
/* address + 3 is sometimes reserved by pnp bios for motherboard */
{ 4, 2 },
/* address + 6 is reserved, and may be taken by IDE.
* Unfortunately, Adaptec doesn't know this :-(, */
{ 7, 1 },
};
static void floppy_release_allocated_regions(int fdc, const struct io_region *p)
{
while (p != io_regions) {
p--;
release_region(FDCS->address + p->offset, p->size);
}
}
#define ARRAY_END(X) (&((X)[ARRAY_SIZE(X)]))
static int floppy_request_regions(int fdc)
{
const struct io_region *p;
for (p = io_regions; p < ARRAY_END(io_regions); p++) {
if (!request_region(FDCS->address + p->offset,
p->size, "floppy")) {
DPRINT("Floppy io-port 0x%04lx in use\n",
FDCS->address + p->offset);
floppy_release_allocated_regions(fdc, p);
return -EBUSY;
}
}
return 0;
}
static void floppy_release_regions(int fdc)
{
floppy_release_allocated_regions(fdc, ARRAY_END(io_regions));
}
static int floppy_grab_irq_and_dma(void)
{
if (atomic_inc_return(&usage_count) > 1)
return 0;
/*
* We might have scheduled a free_irq(), wait it to
* drain first:
*/
flush_workqueue(floppy_wq);
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
FLOPPY_IRQ);
atomic_dec(&usage_count);
return -1;
}
if (fd_request_dma()) {
DPRINT("Unable to grab DMA%d for the floppy driver\n",
FLOPPY_DMA);
if (can_use_virtual_dma & 2)
use_virtual_dma = can_use_virtual_dma = 1;
if (!(can_use_virtual_dma & 1)) {
fd_free_irq();
atomic_dec(&usage_count);
return -1;
}
}
for (fdc = 0; fdc < N_FDC; fdc++) {
if (FDCS->address != -1) {
if (floppy_request_regions(fdc))
goto cleanup;
}
}
for (fdc = 0; fdc < N_FDC; fdc++) {
if (FDCS->address != -1) {
reset_fdc_info(1);
fd_outb(FDCS->dor, FD_DOR);
}
}
fdc = 0;
set_dor(0, ~0, 8); /* avoid immediate interrupt */
for (fdc = 0; fdc < N_FDC; fdc++)
if (FDCS->address != -1)
fd_outb(FDCS->dor, FD_DOR);
/*
* The driver will try and free resources and relies on us
* to know if they were allocated or not.
*/
fdc = 0;
irqdma_allocated = 1;
return 0;
cleanup:
fd_free_irq();
fd_free_dma();
while (--fdc >= 0)
floppy_release_regions(fdc);
atomic_dec(&usage_count);
return -1;
}
static void floppy_release_irq_and_dma(void)
{
int old_fdc;
#ifndef __sparc__
int drive;
#endif
long tmpsize;
unsigned long tmpaddr;
if (!atomic_dec_and_test(&usage_count))
return;
if (irqdma_allocated) {
fd_disable_dma();
fd_free_dma();
fd_free_irq();
irqdma_allocated = 0;
}
set_dor(0, ~0, 8);
#if N_FDC > 1
set_dor(1, ~8, 0);
#endif
if (floppy_track_buffer && max_buffer_sectors) {
tmpsize = max_buffer_sectors * 1024;
tmpaddr = (unsigned long)floppy_track_buffer;
floppy_track_buffer = NULL;
max_buffer_sectors = 0;
buffer_min = buffer_max = -1;
fd_dma_mem_free(tmpaddr, tmpsize);
}
#ifndef __sparc__
for (drive = 0; drive < N_FDC * 4; drive++)
if (timer_pending(motor_off_timer + drive))
pr_info("motor off timer %d still active\n", drive);
#endif
if (delayed_work_pending(&fd_timeout))
pr_info("floppy timer still active:%s\n", timeout_message);
if (delayed_work_pending(&fd_timer))
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
old_fdc = fdc;
for (fdc = 0; fdc < N_FDC; fdc++)
if (FDCS->address != -1)
floppy_release_regions(fdc);
fdc = old_fdc;
}
#ifdef MODULE
static char *floppy;
static void __init parse_floppy_cfg_string(char *cfg)
{
char *ptr;
while (*cfg) {
ptr = cfg;
while (*cfg && *cfg != ' ' && *cfg != '\t')
cfg++;
if (*cfg) {
*cfg = '\0';
cfg++;
}
if (*ptr)
floppy_setup(ptr);
}
}
static int __init floppy_module_init(void)
{
if (floppy)
parse_floppy_cfg_string(floppy);
return floppy_init();
}
module_init(floppy_module_init);
static void __exit floppy_module_exit(void)
{
int drive;
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
if (floppy_available(drive)) {
del_gendisk(disks[drive]);
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
/*
* These disks have not called add_disk(). Don't put down
* queue reference in put_disk().
*/
if (!(allowed_drive_mask & (1 << drive)) ||
fdc_state[FDC(drive)].version == FDC_NONE)
disks[drive]->queue = NULL;
put_disk(disks[drive]);
}
cancel_delayed_work_sync(&fd_timeout);
cancel_delayed_work_sync(&fd_timer);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
/* eject disk, if any */
fd_eject(0);
}
module_exit(floppy_module_exit);
module_param(floppy, charp, 0);
module_param(FLOPPY_IRQ, int, 0);
module_param(FLOPPY_DMA, int, 0);
MODULE_AUTHOR("Alain L. Knaff");
MODULE_SUPPORTED_DEVICE("fd");
MODULE_LICENSE("GPL");
/* This doesn't actually get used other than for module information */
static const struct pnp_device_id floppy_pnpids[] = {
{"PNP0700", 0},
{}
};
MODULE_DEVICE_TABLE(pnp, floppy_pnpids);
#else
__setup("floppy=", floppy_setup);
module_init(floppy_init)
#endif
MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
| gpl-2.0 |
pali/linux-n900 | arch/arm/mach-davinci/common.c | 25 | 2969 | /*
* Code commons to all DaVinci SoCs.
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
* 2009 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/module.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
#include <linux/davinci_emac.h>
#include <linux/dma-mapping.h>
#include <asm/tlb.h>
#include <asm/mach/map.h>
#include <mach/common.h>
#include <mach/cputype.h>
#include "clock.h"
struct davinci_soc_info davinci_soc_info;
EXPORT_SYMBOL(davinci_soc_info);
void __iomem *davinci_intc_base;
int davinci_intc_type;
void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context)
{
char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
off_t offset = (off_t)context;
/* Read MAC addr from EEPROM */
if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
}
static int __init davinci_init_id(struct davinci_soc_info *soc_info)
{
int i;
struct davinci_id *dip;
u8 variant;
u16 part_no;
void __iomem *base;
base = ioremap(soc_info->jtag_id_reg, SZ_4K);
if (!base) {
pr_err("Unable to map JTAG ID register\n");
return -ENOMEM;
}
soc_info->jtag_id = __raw_readl(base);
iounmap(base);
variant = (soc_info->jtag_id & 0xf0000000) >> 28;
part_no = (soc_info->jtag_id & 0x0ffff000) >> 12;
for (i = 0, dip = soc_info->ids; i < soc_info->ids_num;
i++, dip++)
/* Don't care about the manufacturer right now */
if ((dip->part_no == part_no) && (dip->variant == variant)) {
soc_info->cpu_id = dip->cpu_id;
pr_info("DaVinci %s variant 0x%x\n", dip->name,
dip->variant);
return 0;
}
pr_err("Unknown DaVinci JTAG ID 0x%x\n", soc_info->jtag_id);
return -EINVAL;
}
void __init davinci_common_init(struct davinci_soc_info *soc_info)
{
int ret;
if (!soc_info) {
ret = -EINVAL;
goto err;
}
memcpy(&davinci_soc_info, soc_info, sizeof(struct davinci_soc_info));
if (davinci_soc_info.io_desc && (davinci_soc_info.io_desc_num > 0))
iotable_init(davinci_soc_info.io_desc,
davinci_soc_info.io_desc_num);
/*
* Normally devicemaps_init() would flush caches and tlb after
* mdesc->map_io(), but we must also do it here because of the CPU
* revision check below.
*/
local_flush_tlb_all();
flush_cache_all();
/*
* We want to check CPU revision early for cpu_is_xxxx() macros.
* IO space mapping must be initialized before we can do that.
*/
ret = davinci_init_id(&davinci_soc_info);
if (ret < 0)
goto err;
if (davinci_soc_info.cpu_clks) {
ret = davinci_clk_init(davinci_soc_info.cpu_clks);
if (ret != 0)
goto err;
}
return;
err:
panic("davinci_common_init: SoC Initialization failed\n");
}
void __init davinci_init_late(void)
{
davinci_cpufreq_init();
davinci_pm_init();
davinci_clk_disable_unused();
}
| gpl-2.0 |
huhuikevin/kernel_imx | drivers/scsi/device_handler/scsi_dh_rdac.c | 25 | 23221 | /*
* LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
*
* Copyright (C) 2005 Mike Christie. All rights reserved.
* Copyright (C) Chandra Seetharaman, IBM Corp. 2007
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dh.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/module.h>
#define RDAC_NAME "rdac"
#define RDAC_RETRY_COUNT 5
/*
* LSI mode page stuff
*
* These struct definitions and the forming of the
* mode page were taken from the LSI RDAC 2.4 GPL'd
* driver, and then converted to Linux conventions.
*/
#define RDAC_QUIESCENCE_TIME 20
/*
* Page Codes
*/
#define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
/*
* Controller modes definitions
*/
#define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
/*
* RDAC Options field
*/
#define RDAC_FORCED_QUIESENCE 0x02
#define RDAC_TIMEOUT (60 * HZ)
#define RDAC_RETRIES 3
struct rdac_mode_6_hdr {
u8 data_len;
u8 medium_type;
u8 device_params;
u8 block_desc_len;
};
struct rdac_mode_10_hdr {
u16 data_len;
u8 medium_type;
u8 device_params;
u16 reserved;
u16 block_desc_len;
};
struct rdac_mode_common {
u8 controller_serial[16];
u8 alt_controller_serial[16];
u8 rdac_mode[2];
u8 alt_rdac_mode[2];
u8 quiescence_timeout;
u8 rdac_options;
};
struct rdac_pg_legacy {
struct rdac_mode_6_hdr hdr;
u8 page_code;
u8 page_len;
struct rdac_mode_common common;
#define MODE6_MAX_LUN 32
u8 lun_table[MODE6_MAX_LUN];
u8 reserved2[32];
u8 reserved3;
u8 reserved4;
};
struct rdac_pg_expanded {
struct rdac_mode_10_hdr hdr;
u8 page_code;
u8 subpage_code;
u8 page_len[2];
struct rdac_mode_common common;
u8 lun_table[256];
u8 reserved3;
u8 reserved4;
};
struct c9_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC9 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "vace" */
u8 avte_cvp;
u8 path_prio;
u8 reserved2[38];
};
#define SUBSYS_ID_LEN 16
#define SLOT_ID_LEN 2
#define ARRAY_LABEL_LEN 31
struct c4_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC4 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "subs" */
u8 subsys_id[SUBSYS_ID_LEN];
u8 revision[4];
u8 slot_id[SLOT_ID_LEN];
u8 reserved[2];
};
#define UNIQUE_ID_LEN 16
struct c8_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC8 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "edid" */
u8 reserved2[3];
u8 vol_uniq_id_len;
u8 vol_uniq_id[16];
u8 vol_user_label_len;
u8 vol_user_label[60];
u8 array_uniq_id_len;
u8 array_unique_id[UNIQUE_ID_LEN];
u8 array_user_label_len;
u8 array_user_label[60];
u8 lun[8];
};
struct rdac_controller {
u8 array_id[UNIQUE_ID_LEN];
int use_ms10;
struct kref kref;
struct list_head node; /* list of all controllers */
union {
struct rdac_pg_legacy legacy;
struct rdac_pg_expanded expanded;
} mode_select;
u8 index;
u8 array_name[ARRAY_LABEL_LEN];
struct Scsi_Host *host;
spinlock_t ms_lock;
int ms_queued;
struct work_struct ms_work;
struct scsi_device *ms_sdev;
struct list_head ms_head;
};
struct c2_inquiry {
u8 peripheral_info;
u8 page_code; /* 0xC2 */
u8 reserved1;
u8 page_len;
u8 page_id[4]; /* "swr4" */
u8 sw_version[3];
u8 sw_date[3];
u8 features_enabled;
u8 max_lun_supported;
u8 partitions[239]; /* Total allocation length should be 0xFF */
};
struct rdac_dh_data {
struct rdac_controller *ctlr;
#define UNINITIALIZED_LUN (1 << 8)
unsigned lun;
#define RDAC_MODE 0
#define RDAC_MODE_AVT 1
#define RDAC_MODE_IOSHIP 2
unsigned char mode;
#define RDAC_STATE_ACTIVE 0
#define RDAC_STATE_PASSIVE 1
unsigned char state;
#define RDAC_LUN_UNOWNED 0
#define RDAC_LUN_OWNED 1
char lun_state;
#define RDAC_PREFERRED 0
#define RDAC_NON_PREFERRED 1
char preferred;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
union {
struct c2_inquiry c2;
struct c4_inquiry c4;
struct c8_inquiry c8;
struct c9_inquiry c9;
} inq;
};
static const char *mode[] = {
"RDAC",
"AVT",
"IOSHIP",
};
static const char *lun_state[] =
{
"unowned",
"owned",
};
struct rdac_queue_data {
struct list_head entry;
struct rdac_dh_data *h;
activate_complete callback_fn;
void *callback_data;
};
static LIST_HEAD(ctlr_list);
static DEFINE_SPINLOCK(list_lock);
static struct workqueue_struct *kmpath_rdacd;
static void send_mode_select(struct work_struct *work);
/*
* module parameter to enable rdac debug logging.
* 2 bits for each type of logging, only two types defined for now
* Can be enhanced if required at later point
*/
static int rdac_logging = 1;
module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
"Default is 1 - failover logging enabled, "
"set it to 0xF to enable all the logs");
#define RDAC_LOG_FAILOVER 0
#define RDAC_LOG_SENSE 2
#define RDAC_LOG_BITS 2
#define RDAC_LOG_LEVEL(SHIFT) \
((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
#define RDAC_LOG(SHIFT, sdev, f, arg...) \
do { \
if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
} while (0);
static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
BUG_ON(scsi_dh_data == NULL);
return ((struct rdac_dh_data *) scsi_dh_data->buf);
}
static struct request *get_rdac_req(struct scsi_device *sdev,
void *buffer, unsigned buflen, int rw)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
rq = blk_get_request(q, rw, GFP_NOIO);
if (!rq) {
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_get_request failed.\n");
return NULL;
}
if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
blk_put_request(rq);
sdev_printk(KERN_INFO, sdev,
"get_rdac_req: blk_rq_map_kern failed.\n");
return NULL;
}
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = RDAC_RETRIES;
rq->timeout = RDAC_TIMEOUT;
return rq;
}
static struct request *rdac_failover_get(struct scsi_device *sdev,
struct rdac_dh_data *h, struct list_head *list)
{
struct request *rq;
struct rdac_mode_common *common;
unsigned data_size;
struct rdac_queue_data *qdata;
u8 *lun_table;
if (h->ctlr->use_ms10) {
struct rdac_pg_expanded *rdac_pg;
data_size = sizeof(struct rdac_pg_expanded);
rdac_pg = &h->ctlr->mode_select.expanded;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
rdac_pg->subpage_code = 0x1;
rdac_pg->page_len[0] = 0x01;
rdac_pg->page_len[1] = 0x28;
lun_table = rdac_pg->lun_table;
} else {
struct rdac_pg_legacy *rdac_pg;
data_size = sizeof(struct rdac_pg_legacy);
rdac_pg = &h->ctlr->mode_select.legacy;
memset(rdac_pg, 0, data_size);
common = &rdac_pg->common;
rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
rdac_pg->page_len = 0x68;
lun_table = rdac_pg->lun_table;
}
common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
common->rdac_options = RDAC_FORCED_QUIESENCE;
list_for_each_entry(qdata, list, entry) {
lun_table[qdata->h->lun] = 0x81;
}
/* get request for block layer packet command */
rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
if (!rq)
return NULL;
/* Prepare the command. */
if (h->ctlr->use_ms10) {
rq->cmd[0] = MODE_SELECT_10;
rq->cmd[7] = data_size >> 8;
rq->cmd[8] = data_size & 0xff;
} else {
rq->cmd[0] = MODE_SELECT;
rq->cmd[4] = data_size;
}
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
return rq;
}
static void release_controller(struct kref *kref)
{
struct rdac_controller *ctlr;
ctlr = container_of(kref, struct rdac_controller, kref);
flush_workqueue(kmpath_rdacd);
spin_lock(&list_lock);
list_del(&ctlr->node);
spin_unlock(&list_lock);
kfree(ctlr);
}
static struct rdac_controller *get_controller(int index, char *array_name,
u8 *array_id, struct scsi_device *sdev)
{
struct rdac_controller *ctlr, *tmp;
spin_lock(&list_lock);
list_for_each_entry(tmp, &ctlr_list, node) {
if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
(tmp->index == index) &&
(tmp->host == sdev->host)) {
kref_get(&tmp->kref);
spin_unlock(&list_lock);
return tmp;
}
}
ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
if (!ctlr)
goto done;
/* initialize fields of controller */
memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
ctlr->index = index;
ctlr->host = sdev->host;
memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
kref_init(&ctlr->kref);
ctlr->use_ms10 = -1;
ctlr->ms_queued = 0;
ctlr->ms_sdev = NULL;
spin_lock_init(&ctlr->ms_lock);
INIT_WORK(&ctlr->ms_work, send_mode_select);
INIT_LIST_HEAD(&ctlr->ms_head);
list_add(&ctlr->node, &ctlr_list);
done:
spin_unlock(&list_lock);
return ctlr;
}
static int submit_inquiry(struct scsi_device *sdev, int page_code,
unsigned int len, struct rdac_dh_data *h)
{
struct request *rq;
struct request_queue *q = sdev->request_queue;
int err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = get_rdac_req(sdev, &h->inq, len, READ);
if (!rq)
goto done;
/* Prepare the command. */
rq->cmd[0] = INQUIRY;
rq->cmd[1] = 1;
rq->cmd[2] = page_code;
rq->cmd[4] = len;
rq->cmd_len = COMMAND_SIZE(INQUIRY);
rq->sense = h->sense;
memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
rq->sense_len = 0;
err = blk_execute_rq(q, NULL, rq, 1);
if (err == -EIO)
err = SCSI_DH_IO;
blk_put_request(rq);
done:
return err;
}
static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
char *array_name, u8 *array_id)
{
int err, i;
struct c8_inquiry *inqp;
err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c8;
if (inqp->page_code != 0xc8)
return SCSI_DH_NOSYS;
if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
return SCSI_DH_NOSYS;
h->lun = inqp->lun[7]; /* Uses only the last byte */
for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
*(array_name+i) = inqp->array_user_label[(2*i)+1];
*(array_name+ARRAY_LABEL_LEN-1) = '\0';
memset(array_id, 0, UNIQUE_ID_LEN);
memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
}
return err;
}
static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c9_inquiry *inqp;
h->state = RDAC_STATE_ACTIVE;
err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c9;
/* detect the operating mode */
if ((inqp->avte_cvp >> 5) & 0x1)
h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
else if (inqp->avte_cvp >> 7)
h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
else
h->mode = RDAC_MODE; /* LUN in RDAC mode */
/* Update ownership */
if (inqp->avte_cvp & 0x1)
h->lun_state = RDAC_LUN_OWNED;
else {
h->lun_state = RDAC_LUN_UNOWNED;
if (h->mode == RDAC_MODE)
h->state = RDAC_STATE_PASSIVE;
}
/* Update path prio*/
if (inqp->path_prio & 0x1)
h->preferred = RDAC_PREFERRED;
else
h->preferred = RDAC_NON_PREFERRED;
}
return err;
}
static int initialize_controller(struct scsi_device *sdev,
struct rdac_dh_data *h, char *array_name, u8 *array_id)
{
int err, index;
struct c4_inquiry *inqp;
err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c4;
/* get the controller index */
if (inqp->slot_id[1] == 0x31)
index = 0;
else
index = 1;
h->ctlr = get_controller(index, array_name, array_id, sdev);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
}
return err;
}
static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
{
int err;
struct c2_inquiry *inqp;
err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
if (err == SCSI_DH_OK) {
inqp = &h->inq.c2;
/*
* If more than MODE6_MAX_LUN luns are supported, use
* mode select 10
*/
if (inqp->max_lun_supported >= MODE6_MAX_LUN)
h->ctlr->use_ms10 = 1;
else
h->ctlr->use_ms10 = 0;
}
return err;
}
static int mode_select_handle_sense(struct scsi_device *sdev,
unsigned char *sensebuf)
{
struct scsi_sense_hdr sense_hdr;
int err = SCSI_DH_IO, ret;
struct rdac_dh_data *h = get_rdac_data(sdev);
ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
if (!ret)
goto done;
switch (sense_hdr.sense_key) {
case NO_SENSE:
case ABORTED_COMMAND:
case UNIT_ATTENTION:
err = SCSI_DH_RETRY;
break;
case NOT_READY:
if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
/* LUN Not Ready and is in the Process of Becoming
* Ready
*/
err = SCSI_DH_RETRY;
break;
case ILLEGAL_REQUEST:
if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
/*
* Command Lock contention
*/
err = SCSI_DH_RETRY;
break;
default:
break;
}
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
done:
return err;
}
static void send_mode_select(struct work_struct *work)
{
struct rdac_controller *ctlr =
container_of(work, struct rdac_controller, ms_work);
struct request *rq;
struct scsi_device *sdev = ctlr->ms_sdev;
struct rdac_dh_data *h = get_rdac_data(sdev);
struct request_queue *q = sdev->request_queue;
int err, retry_cnt = RDAC_RETRY_COUNT;
struct rdac_queue_data *tmp, *qdata;
LIST_HEAD(list);
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
ctlr->ms_queued = 0;
ctlr->ms_sdev = NULL;
spin_unlock(&ctlr->ms_lock);
retry:
err = SCSI_DH_RES_TEMP_UNAVAIL;
rq = rdac_failover_get(sdev, h, &list);
if (!rq)
goto done;
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"%s MODE_SELECT command",
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
err = blk_execute_rq(q, NULL, rq, 1);
blk_put_request(rq);
if (err != SCSI_DH_OK) {
err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
}
if (err == SCSI_DH_OK) {
h->state = RDAC_STATE_ACTIVE;
RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
"MODE_SELECT completed",
(char *) h->ctlr->array_name, h->ctlr->index);
}
done:
list_for_each_entry_safe(qdata, tmp, &list, entry) {
list_del(&qdata->entry);
if (err == SCSI_DH_OK)
qdata->h->state = RDAC_STATE_ACTIVE;
if (qdata->callback_fn)
qdata->callback_fn(qdata->callback_data, err);
kfree(qdata);
}
return;
}
static int queue_mode_select(struct scsi_device *sdev,
activate_complete fn, void *data)
{
struct rdac_queue_data *qdata;
struct rdac_controller *ctlr;
qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
if (!qdata)
return SCSI_DH_RETRY;
qdata->h = get_rdac_data(sdev);
qdata->callback_fn = fn;
qdata->callback_data = data;
ctlr = qdata->h->ctlr;
spin_lock(&ctlr->ms_lock);
list_add_tail(&qdata->entry, &ctlr->ms_head);
if (!ctlr->ms_queued) {
ctlr->ms_queued = 1;
ctlr->ms_sdev = sdev;
queue_work(kmpath_rdacd, &ctlr->ms_work);
}
spin_unlock(&ctlr->ms_lock);
return SCSI_DH_OK;
}
static int rdac_activate(struct scsi_device *sdev,
activate_complete fn, void *data)
{
struct rdac_dh_data *h = get_rdac_data(sdev);
int err = SCSI_DH_OK;
int act = 0;
err = check_ownership(sdev, h);
if (err != SCSI_DH_OK)
goto done;
switch (h->mode) {
case RDAC_MODE:
if (h->lun_state == RDAC_LUN_UNOWNED)
act = 1;
break;
case RDAC_MODE_IOSHIP:
if ((h->lun_state == RDAC_LUN_UNOWNED) &&
(h->preferred == RDAC_PREFERRED))
act = 1;
break;
default:
break;
}
if (act) {
err = queue_mode_select(sdev, fn, data);
if (err == SCSI_DH_OK)
return 0;
}
done:
if (fn)
fn(data, err);
return 0;
}
static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct rdac_dh_data *h = get_rdac_data(sdev);
int ret = BLKPREP_OK;
if (h->state != RDAC_STATE_ACTIVE) {
ret = BLKPREP_KILL;
req->cmd_flags |= REQ_QUIET;
}
return ret;
}
static int rdac_check_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sense_hdr)
{
struct rdac_dh_data *h = get_rdac_data(sdev);
RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
"I/O returned with sense %02x/%02x/%02x",
(char *) h->ctlr->array_name, h->ctlr->index,
sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
switch (sense_hdr->sense_key) {
case NOT_READY:
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
/* LUN Not Ready - Logical Unit Not Ready and is in
* the process of becoming ready
* Just retry.
*/
return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
/* LUN Not Ready - Storage firmware incompatible
* Manual code synchonisation required.
*
* Nothing we can do here. Try to bypass the path.
*/
return SUCCESS;
if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
/* LUN Not Ready - Quiescense in progress
*
* Just retry and wait.
*/
return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02)
/* LUN Not Ready - Quiescense in progress
* or has been achieved
* Just retry.
*/
return ADD_TO_MLQUEUE;
break;
case ILLEGAL_REQUEST:
if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
/* Invalid Request - Current Logical Unit Ownership.
* Controller is not the current owner of the LUN,
* Fail the path, so that the other path be used.
*/
h->state = RDAC_STATE_PASSIVE;
return SUCCESS;
}
break;
case UNIT_ATTENTION:
if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
/*
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
/*
* Quiescence in progress , just retry.
*/
return ADD_TO_MLQUEUE;
break;
}
/* success just means we do not care what scsi-ml does */
return SCSI_RETURN_NOT_HANDLED;
}
static const struct scsi_dh_devlist rdac_dev_list[] = {
{"IBM", "1722"},
{"IBM", "1724"},
{"IBM", "1726"},
{"IBM", "1742"},
{"IBM", "1745"},
{"IBM", "1746"},
{"IBM", "1814"},
{"IBM", "1815"},
{"IBM", "1818"},
{"IBM", "3526"},
{"SGI", "TP9400"},
{"SGI", "TP9500"},
{"SGI", "TP9700"},
{"SGI", "IS"},
{"STK", "OPENstorage D280"},
{"SUN", "CSM200_R"},
{"SUN", "LCSM100_I"},
{"SUN", "LCSM100_S"},
{"SUN", "LCSM100_E"},
{"SUN", "LCSM100_F"},
{"DELL", "MD3000"},
{"DELL", "MD3000i"},
{"DELL", "MD32xx"},
{"DELL", "MD32xxi"},
{"DELL", "MD36xxi"},
{"DELL", "MD36xxf"},
{"LSI", "INF-01-00"},
{"ENGENIO", "INF-01-00"},
{"STK", "FLEXLINE 380"},
{"SUN", "CSM100_R_FC"},
{"SUN", "STK6580_6780"},
{"SUN", "SUN_6180"},
{"SUN", "ArrayStorage"},
{NULL, NULL},
};
static bool rdac_match(struct scsi_device *sdev)
{
int i;
if (scsi_device_tpgs(sdev))
return false;
for (i = 0; rdac_dev_list[i].vendor; i++) {
if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
strlen(rdac_dev_list[i].vendor)) &&
!strncmp(sdev->model, rdac_dev_list[i].model,
strlen(rdac_dev_list[i].model))) {
return true;
}
}
return false;
}
static int rdac_bus_attach(struct scsi_device *sdev);
static void rdac_bus_detach(struct scsi_device *sdev);
static struct scsi_device_handler rdac_dh = {
.name = RDAC_NAME,
.module = THIS_MODULE,
.devlist = rdac_dev_list,
.prep_fn = rdac_prep_fn,
.check_sense = rdac_check_sense,
.attach = rdac_bus_attach,
.detach = rdac_bus_detach,
.activate = rdac_activate,
.match = rdac_match,
};
static int rdac_bus_attach(struct scsi_device *sdev)
{
struct scsi_dh_data *scsi_dh_data;
struct rdac_dh_data *h;
unsigned long flags;
int err;
char array_name[ARRAY_LABEL_LEN];
char array_id[UNIQUE_ID_LEN];
scsi_dh_data = kzalloc(sizeof(*scsi_dh_data)
+ sizeof(*h) , GFP_KERNEL);
if (!scsi_dh_data) {
sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
RDAC_NAME);
return 0;
}
scsi_dh_data->scsi_dh = &rdac_dh;
h = (struct rdac_dh_data *) scsi_dh_data->buf;
h->lun = UNINITIALIZED_LUN;
h->state = RDAC_STATE_ACTIVE;
err = get_lun_info(sdev, h, array_name, array_id);
if (err != SCSI_DH_OK)
goto failed;
err = initialize_controller(sdev, h, array_name, array_id);
if (err != SCSI_DH_OK)
goto failed;
err = check_ownership(sdev, h);
if (err != SCSI_DH_OK)
goto clean_ctlr;
err = set_mode_select(sdev, h);
if (err != SCSI_DH_OK)
goto clean_ctlr;
if (!try_module_get(THIS_MODULE))
goto clean_ctlr;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = scsi_dh_data;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
sdev_printk(KERN_NOTICE, sdev,
"%s: LUN %d (%s) (%s)\n",
RDAC_NAME, h->lun, mode[(int)h->mode],
lun_state[(int)h->lun_state]);
return 0;
clean_ctlr:
kref_put(&h->ctlr->kref, release_controller);
failed:
kfree(scsi_dh_data);
sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
RDAC_NAME);
return -EINVAL;
}
static void rdac_bus_detach( struct scsi_device *sdev )
{
struct scsi_dh_data *scsi_dh_data;
struct rdac_dh_data *h;
unsigned long flags;
spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
h = (struct rdac_dh_data *) scsi_dh_data->buf;
if (h->ctlr)
kref_put(&h->ctlr->kref, release_controller);
kfree(scsi_dh_data);
module_put(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
}
static int __init rdac_init(void)
{
int r;
r = scsi_register_device_handler(&rdac_dh);
if (r != 0) {
printk(KERN_ERR "Failed to register scsi device handler.");
goto done;
}
/*
* Create workqueue to handle mode selects for rdac
*/
kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n");
}
done:
return r;
}
static void __exit rdac_exit(void)
{
destroy_workqueue(kmpath_rdacd);
scsi_unregister_device_handler(&rdac_dh);
}
module_init(rdac_init);
module_exit(rdac_exit);
MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
MODULE_VERSION("01.00.0000.0000");
MODULE_LICENSE("GPL");
| gpl-2.0 |
puppybane/linux-cyrus | sound/soc/codecs/hdmi-codec.c | 25 | 12326 | /*
* ALSA SoC codec for HDMI encoder drivers
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
* Author: Jyri Sarha <jsarha@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/module.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/pcm_drm_eld.h>
#include <sound/hdmi-codec.h>
#include <sound/pcm_iec958.h>
#include <drm/drm_crtc.h> /* This is only to get MAX_ELD_BYTES */
struct hdmi_device {
struct device *dev;
struct list_head list;
int cnt;
};
#define pos_to_hdmi_device(pos) container_of((pos), struct hdmi_device, list)
LIST_HEAD(hdmi_device_list);
#define DAI_NAME_SIZE 16
struct hdmi_codec_priv {
struct hdmi_codec_pdata hcd;
struct snd_soc_dai_driver *daidrv;
struct hdmi_codec_daifmt daifmt[2];
struct mutex current_stream_lock;
struct snd_pcm_substream *current_stream;
struct snd_pcm_hw_constraint_list ratec;
uint8_t eld[MAX_ELD_BYTES];
};
static const struct snd_soc_dapm_widget hdmi_widgets[] = {
SND_SOC_DAPM_OUTPUT("TX"),
};
static const struct snd_soc_dapm_route hdmi_routes[] = {
{ "TX", NULL, "Playback" },
};
enum {
DAI_ID_I2S = 0,
DAI_ID_SPDIF,
};
static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
uinfo->count = sizeof(hcp->eld);
return 0;
}
static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
memcpy(ucontrol->value.bytes.data, hcp->eld, sizeof(hcp->eld));
return 0;
}
static const struct snd_kcontrol_new hdmi_controls[] = {
{
.access = SNDRV_CTL_ELEM_ACCESS_READ |
SNDRV_CTL_ELEM_ACCESS_VOLATILE,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = "ELD",
.info = hdmi_eld_ctl_info,
.get = hdmi_eld_ctl_get,
},
};
static int hdmi_codec_new_stream(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
int ret = 0;
mutex_lock(&hcp->current_stream_lock);
if (!hcp->current_stream) {
hcp->current_stream = substream;
} else if (hcp->current_stream != substream) {
dev_err(dai->dev, "Only one simultaneous stream supported!\n");
ret = -EINVAL;
}
mutex_unlock(&hcp->current_stream_lock);
return ret;
}
static int hdmi_codec_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
int ret = 0;
dev_dbg(dai->dev, "%s()\n", __func__);
ret = hdmi_codec_new_stream(substream, dai);
if (ret)
return ret;
if (hcp->hcd.ops->audio_startup) {
ret = hcp->hcd.ops->audio_startup(dai->dev->parent, hcp->hcd.data);
if (ret) {
mutex_lock(&hcp->current_stream_lock);
hcp->current_stream = NULL;
mutex_unlock(&hcp->current_stream_lock);
return ret;
}
}
if (hcp->hcd.ops->get_eld) {
ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->hcd.data,
hcp->eld, sizeof(hcp->eld));
if (!ret) {
ret = snd_pcm_hw_constraint_eld(substream->runtime,
hcp->eld);
if (ret)
return ret;
}
}
return 0;
}
static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
dev_dbg(dai->dev, "%s()\n", __func__);
WARN_ON(hcp->current_stream != substream);
hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data);
mutex_lock(&hcp->current_stream_lock);
hcp->current_stream = NULL;
mutex_unlock(&hcp->current_stream_lock);
}
static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
struct hdmi_codec_params hp = {
.iec = {
.status = { 0 },
.subcode = { 0 },
.pad = 0,
.dig_subframe = { 0 },
}
};
int ret;
dev_dbg(dai->dev, "%s() width %d rate %d channels %d\n", __func__,
params_width(params), params_rate(params),
params_channels(params));
if (params_width(params) > 24)
params->msbits = 24;
ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status,
sizeof(hp.iec.status));
if (ret < 0) {
dev_err(dai->dev, "Creating IEC958 channel status failed %d\n",
ret);
return ret;
}
ret = hdmi_codec_new_stream(substream, dai);
if (ret)
return ret;
hdmi_audio_infoframe_init(&hp.cea);
hp.cea.channels = params_channels(params);
hp.cea.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
hp.cea.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
hp.cea.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
hp.sample_width = params_width(params);
hp.sample_rate = params_rate(params);
hp.channels = params_channels(params);
return hcp->hcd.ops->hw_params(dai->dev->parent, hcp->hcd.data,
&hcp->daifmt[dai->id], &hp);
}
static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
struct hdmi_codec_daifmt cf = { 0 };
int ret = 0;
dev_dbg(dai->dev, "%s()\n", __func__);
if (dai->id == DAI_ID_SPDIF) {
cf.fmt = HDMI_SPDIF;
} else {
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
cf.bit_clk_master = 1;
cf.frame_clk_master = 1;
break;
case SND_SOC_DAIFMT_CBS_CFM:
cf.frame_clk_master = 1;
break;
case SND_SOC_DAIFMT_CBM_CFS:
cf.bit_clk_master = 1;
break;
case SND_SOC_DAIFMT_CBS_CFS:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
cf.frame_clk_inv = 1;
break;
case SND_SOC_DAIFMT_IB_NF:
cf.bit_clk_inv = 1;
break;
case SND_SOC_DAIFMT_IB_IF:
cf.frame_clk_inv = 1;
cf.bit_clk_inv = 1;
break;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
cf.fmt = HDMI_I2S;
break;
case SND_SOC_DAIFMT_DSP_A:
cf.fmt = HDMI_DSP_A;
break;
case SND_SOC_DAIFMT_DSP_B:
cf.fmt = HDMI_DSP_B;
break;
case SND_SOC_DAIFMT_RIGHT_J:
cf.fmt = HDMI_RIGHT_J;
break;
case SND_SOC_DAIFMT_LEFT_J:
cf.fmt = HDMI_LEFT_J;
break;
case SND_SOC_DAIFMT_AC97:
cf.fmt = HDMI_AC97;
break;
default:
dev_err(dai->dev, "Invalid DAI interface format\n");
return -EINVAL;
}
}
hcp->daifmt[dai->id] = cf;
return ret;
}
static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
{
struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
dev_dbg(dai->dev, "%s()\n", __func__);
if (hcp->hcd.ops->digital_mute)
return hcp->hcd.ops->digital_mute(dai->dev->parent,
hcp->hcd.data, mute);
return 0;
}
static const struct snd_soc_dai_ops hdmi_dai_ops = {
.startup = hdmi_codec_startup,
.shutdown = hdmi_codec_shutdown,
.hw_params = hdmi_codec_hw_params,
.set_fmt = hdmi_codec_set_fmt,
.digital_mute = hdmi_codec_digital_mute,
};
#define HDMI_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
SNDRV_PCM_RATE_192000)
#define SPDIF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |\
SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE |\
SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE)
/*
* This list is only for formats allowed on the I2S bus. So there is
* some formats listed that are not supported by HDMI interface. For
* instance allowing the 32-bit formats enables 24-precision with CPU
* DAIs that do not support 24-bit formats. If the extra formats cause
* problems, we should add the video side driver an option to disable
* them.
*/
#define I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |\
SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE |\
SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE |\
SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE)
static struct snd_soc_dai_driver hdmi_i2s_dai = {
.id = DAI_ID_I2S,
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 8,
.rates = HDMI_RATES,
.formats = I2S_FORMATS,
.sig_bits = 24,
},
.ops = &hdmi_dai_ops,
};
static const struct snd_soc_dai_driver hdmi_spdif_dai = {
.id = DAI_ID_SPDIF,
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = HDMI_RATES,
.formats = SPDIF_FORMATS,
},
.ops = &hdmi_dai_ops,
};
static char hdmi_dai_name[][DAI_NAME_SIZE] = {
"hdmi-hifi.0",
"hdmi-hifi.1",
"hdmi-hifi.2",
"hdmi-hifi.3",
};
static int hdmi_of_xlate_dai_name(struct snd_soc_component *component,
struct of_phandle_args *args,
const char **dai_name)
{
int id = args->args[0];
if (id < ARRAY_SIZE(hdmi_dai_name)) {
*dai_name = hdmi_dai_name[id];
return 0;
}
return -EAGAIN;
}
static struct snd_soc_codec_driver hdmi_codec = {
.component_driver = {
.controls = hdmi_controls,
.num_controls = ARRAY_SIZE(hdmi_controls),
.dapm_widgets = hdmi_widgets,
.num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
.dapm_routes = hdmi_routes,
.num_dapm_routes = ARRAY_SIZE(hdmi_routes),
.of_xlate_dai_name = hdmi_of_xlate_dai_name,
},
};
static int hdmi_codec_probe(struct platform_device *pdev)
{
struct hdmi_codec_pdata *hcd = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct hdmi_codec_priv *hcp;
struct hdmi_device *hd;
struct list_head *pos;
int dai_count, i = 0;
int ret;
dev_dbg(dev, "%s()\n", __func__);
if (!hcd) {
dev_err(dev, "%s: No plalform data\n", __func__);
return -EINVAL;
}
dai_count = hcd->i2s + hcd->spdif;
if (dai_count < 1 || !hcd->ops || !hcd->ops->hw_params ||
!hcd->ops->audio_shutdown) {
dev_err(dev, "%s: Invalid parameters\n", __func__);
return -EINVAL;
}
hcp = devm_kzalloc(dev, sizeof(*hcp), GFP_KERNEL);
if (!hcp)
return -ENOMEM;
hd = NULL;
list_for_each(pos, &hdmi_device_list) {
struct hdmi_device *tmp = pos_to_hdmi_device(pos);
if (tmp->dev == dev->parent) {
hd = tmp;
break;
}
}
if (!hd) {
hd = devm_kzalloc(dev, sizeof(*hd), GFP_KERNEL);
if (!hd)
return -ENOMEM;
hd->dev = dev->parent;
list_add_tail(&hd->list, &hdmi_device_list);
}
if (hd->cnt >= ARRAY_SIZE(hdmi_dai_name)) {
dev_err(dev, "too many hdmi codec are deteced\n");
return -EINVAL;
}
hcp->hcd = *hcd;
mutex_init(&hcp->current_stream_lock);
hcp->daidrv = devm_kzalloc(dev, dai_count * sizeof(*hcp->daidrv),
GFP_KERNEL);
if (!hcp->daidrv)
return -ENOMEM;
if (hcd->i2s) {
hcp->daidrv[i] = hdmi_i2s_dai;
hcp->daidrv[i].playback.channels_max =
hcd->max_i2s_channels;
hcp->daidrv[i].name = hdmi_dai_name[hd->cnt++];
i++;
}
if (hcd->spdif) {
hcp->daidrv[i] = hdmi_spdif_dai;
hcp->daidrv[i].name = hdmi_dai_name[hd->cnt++];
}
ret = snd_soc_register_codec(dev, &hdmi_codec, hcp->daidrv,
dai_count);
if (ret) {
dev_err(dev, "%s: snd_soc_register_codec() failed (%d)\n",
__func__, ret);
return ret;
}
dev_set_drvdata(dev, hcp);
return 0;
}
static int hdmi_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
}
static struct platform_driver hdmi_codec_driver = {
.driver = {
.name = HDMI_CODEC_DRV_NAME,
},
.probe = hdmi_codec_probe,
.remove = hdmi_codec_remove,
};
module_platform_driver(hdmi_codec_driver);
MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
MODULE_DESCRIPTION("HDMI Audio Codec Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" HDMI_CODEC_DRV_NAME);
| gpl-2.0 |
SensePlatform/R | src/nmath/rchisq.c | 25 | 1162 | /*
* Mathlib : A C Library of Special Functions
* Copyright (C) 1998 Ross Ihaka
* Copyright (C) 2000 The R Core Team
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, a copy is available at
* https://www.R-project.org/Licenses/
*
* SYNOPSIS
*
* #include <Rmath.h>
* double rchisq(double df);
*
* DESCRIPTION
*
* Random variates from the chi-squared distribution.
*
* NOTES
*
* Calls rgamma to do the real work.
*/
#include "nmath.h"
double rchisq(double df)
{
if (!R_FINITE(df) || df < 0.0) ML_ERR_return_NAN;
return rgamma(df / 2.0, 2.0);
}
| gpl-2.0 |
openwrt-es/linux | tools/testing/selftests/seccomp/seccomp_bpf.c | 25 | 108243 | // SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
*
* Test code for seccomp bpf.
*/
#define _GNU_SOURCE
#include <sys/types.h>
/*
* glibc 2.26 and later have SIGSYS in siginfo_t. Before that,
* we need to use the kernel's siginfo.h file and trick glibc
* into accepting it.
*/
#if !__GLIBC_PREREQ(2, 26)
# include <asm/siginfo.h>
# define __have_siginfo_t 1
# define __have_sigval_t 1
# define __have_sigevent_t 1
#endif
#include <errno.h>
#include <linux/filter.h>
#include <sys/prctl.h>
#include <sys/ptrace.h>
#include <sys/user.h>
#include <linux/prctl.h>
#include <linux/ptrace.h>
#include <linux/seccomp.h>
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <stddef.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <limits.h>
#include <linux/elf.h>
#include <sys/uio.h>
#include <sys/utsname.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <sys/times.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <linux/kcmp.h>
#include <sys/resource.h>
#include <unistd.h>
#include <sys/syscall.h>
#include <poll.h>
#include "../kselftest_harness.h"
#include "../clone3/clone3_selftests.h"
/* Attempt to de-conflict with the selftests tree. */
#ifndef SKIP
#define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__)
#endif
#ifndef PR_SET_PTRACER
# define PR_SET_PTRACER 0x59616d61
#endif
#ifndef PR_SET_NO_NEW_PRIVS
#define PR_SET_NO_NEW_PRIVS 38
#define PR_GET_NO_NEW_PRIVS 39
#endif
#ifndef PR_SECCOMP_EXT
#define PR_SECCOMP_EXT 43
#endif
#ifndef SECCOMP_EXT_ACT
#define SECCOMP_EXT_ACT 1
#endif
#ifndef SECCOMP_EXT_ACT_TSYNC
#define SECCOMP_EXT_ACT_TSYNC 1
#endif
#ifndef SECCOMP_MODE_STRICT
#define SECCOMP_MODE_STRICT 1
#endif
#ifndef SECCOMP_MODE_FILTER
#define SECCOMP_MODE_FILTER 2
#endif
#ifndef SECCOMP_RET_ALLOW
struct seccomp_data {
int nr;
__u32 arch;
__u64 instruction_pointer;
__u64 args[6];
};
#endif
#ifndef SECCOMP_RET_KILL_PROCESS
#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
#define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */
#endif
#ifndef SECCOMP_RET_KILL
#define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD
#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
#endif
#ifndef SECCOMP_RET_LOG
#define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */
#endif
#ifndef __NR_seccomp
# if defined(__i386__)
# define __NR_seccomp 354
# elif defined(__x86_64__)
# define __NR_seccomp 317
# elif defined(__arm__)
# define __NR_seccomp 383
# elif defined(__aarch64__)
# define __NR_seccomp 277
# elif defined(__riscv)
# define __NR_seccomp 277
# elif defined(__csky__)
# define __NR_seccomp 277
# elif defined(__hppa__)
# define __NR_seccomp 338
# elif defined(__powerpc__)
# define __NR_seccomp 358
# elif defined(__s390__)
# define __NR_seccomp 348
# elif defined(__xtensa__)
# define __NR_seccomp 337
# elif defined(__sh__)
# define __NR_seccomp 372
# else
# warning "seccomp syscall number unknown for this architecture"
# define __NR_seccomp 0xffff
# endif
#endif
#ifndef SECCOMP_SET_MODE_STRICT
#define SECCOMP_SET_MODE_STRICT 0
#endif
#ifndef SECCOMP_SET_MODE_FILTER
#define SECCOMP_SET_MODE_FILTER 1
#endif
#ifndef SECCOMP_GET_ACTION_AVAIL
#define SECCOMP_GET_ACTION_AVAIL 2
#endif
#ifndef SECCOMP_GET_NOTIF_SIZES
#define SECCOMP_GET_NOTIF_SIZES 3
#endif
#ifndef SECCOMP_FILTER_FLAG_TSYNC
#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
#endif
#ifndef SECCOMP_FILTER_FLAG_LOG
#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
#endif
#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
#endif
#ifndef PTRACE_SECCOMP_GET_METADATA
#define PTRACE_SECCOMP_GET_METADATA 0x420d
struct seccomp_metadata {
__u64 filter_off; /* Input: which filter */
__u64 flags; /* Output: filter's flags */
};
#endif
#ifndef SECCOMP_FILTER_FLAG_NEW_LISTENER
#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
#endif
#ifndef SECCOMP_RET_USER_NOTIF
#define SECCOMP_RET_USER_NOTIF 0x7fc00000U
#define SECCOMP_IOC_MAGIC '!'
#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
#define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type)
#define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type)
/* Flags for seccomp notification fd ioctl. */
#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
struct seccomp_notif_resp)
#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
struct seccomp_notif {
__u64 id;
__u32 pid;
__u32 flags;
struct seccomp_data data;
};
struct seccomp_notif_resp {
__u64 id;
__s64 val;
__s32 error;
__u32 flags;
};
struct seccomp_notif_sizes {
__u16 seccomp_notif;
__u16 seccomp_notif_resp;
__u16 seccomp_data;
};
#endif
#ifndef SECCOMP_IOCTL_NOTIF_ADDFD
/* On success, the return value is the remote process's added fd number */
#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
struct seccomp_notif_addfd)
/* valid flags for seccomp_notif_addfd */
#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
struct seccomp_notif_addfd {
__u64 id;
__u32 flags;
__u32 srcfd;
__u32 newfd;
__u32 newfd_flags;
};
#endif
struct seccomp_notif_addfd_small {
__u64 id;
char weird[4];
};
#define SECCOMP_IOCTL_NOTIF_ADDFD_SMALL \
SECCOMP_IOW(3, struct seccomp_notif_addfd_small)
struct seccomp_notif_addfd_big {
union {
struct seccomp_notif_addfd addfd;
char buf[sizeof(struct seccomp_notif_addfd) + 8];
};
};
#define SECCOMP_IOCTL_NOTIF_ADDFD_BIG \
SECCOMP_IOWR(3, struct seccomp_notif_addfd_big)
#ifndef PTRACE_EVENTMSG_SYSCALL_ENTRY
#define PTRACE_EVENTMSG_SYSCALL_ENTRY 1
#define PTRACE_EVENTMSG_SYSCALL_EXIT 2
#endif
#ifndef SECCOMP_USER_NOTIF_FLAG_CONTINUE
#define SECCOMP_USER_NOTIF_FLAG_CONTINUE 0x00000001
#endif
#ifndef SECCOMP_FILTER_FLAG_TSYNC_ESRCH
#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4)
#endif
#ifndef seccomp
int seccomp(unsigned int op, unsigned int flags, void *args)
{
errno = 0;
return syscall(__NR_seccomp, op, flags, args);
}
#endif
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]))
#elif __BYTE_ORDER == __BIG_ENDIAN
#define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32))
#else
#error "wut? Unknown __BYTE_ORDER?!"
#endif
#define SIBLING_EXIT_UNKILLED 0xbadbeef
#define SIBLING_EXIT_FAILURE 0xbadface
#define SIBLING_EXIT_NEWPRIVS 0xbadfeed
static int __filecmp(pid_t pid1, pid_t pid2, int fd1, int fd2)
{
#ifdef __NR_kcmp
errno = 0;
return syscall(__NR_kcmp, pid1, pid2, KCMP_FILE, fd1, fd2);
#else
errno = ENOSYS;
return -1;
#endif
}
/* Have TH_LOG report actual location filecmp() is used. */
#define filecmp(pid1, pid2, fd1, fd2) ({ \
int _ret; \
\
_ret = __filecmp(pid1, pid2, fd1, fd2); \
if (_ret != 0) { \
if (_ret < 0 && errno == ENOSYS) { \
TH_LOG("kcmp() syscall missing (test is less accurate)");\
_ret = 0; \
} \
} \
_ret; })
TEST(kcmp)
{
int ret;
ret = __filecmp(getpid(), getpid(), 1, 1);
EXPECT_EQ(ret, 0);
if (ret != 0 && errno == ENOSYS)
SKIP(return, "Kernel does not support kcmp() (missing CONFIG_KCMP?)");
}
TEST(mode_strict_support)
{
long ret;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SECCOMP");
}
syscall(__NR_exit, 0);
}
TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL)
{
long ret;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support CONFIG_SECCOMP");
}
syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
NULL, NULL, NULL);
EXPECT_FALSE(true) {
TH_LOG("Unreachable!");
}
}
/* Note! This doesn't test no new privs behavior */
TEST(no_new_privs_support)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
EXPECT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
}
/* Tests kernel support by checking for a copy_from_user() fault on NULL. */
TEST(mode_filter_support)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EFAULT, errno) {
TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!");
}
}
TEST(mode_filter_without_nnp)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0);
ASSERT_LE(0, ret) {
TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS");
}
errno = 0;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
/* Succeeds with CAP_SYS_ADMIN, fails without */
/* TODO(wad) check caps not euid */
if (geteuid()) {
EXPECT_EQ(-1, ret);
EXPECT_EQ(EACCES, errno);
} else {
EXPECT_EQ(0, ret);
}
}
#define MAX_INSNS_PER_PATH 32768
TEST(filter_size_limits)
{
int i;
int count = BPF_MAXINSNS + 1;
struct sock_filter allow[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter *filter;
struct sock_fprog prog = { };
long ret;
filter = calloc(count, sizeof(*filter));
ASSERT_NE(NULL, filter);
for (i = 0; i < count; i++)
filter[i] = allow[0];
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
prog.filter = filter;
prog.len = count;
/* Too many filter instructions in a single filter. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_NE(0, ret) {
TH_LOG("Installing %d insn filter was allowed", prog.len);
}
/* One less is okay, though. */
prog.len -= 1;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Installing %d insn filter wasn't allowed", prog.len);
}
}
TEST(filter_chain_limits)
{
int i;
int count = BPF_MAXINSNS;
struct sock_filter allow[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter *filter;
struct sock_fprog prog = { };
long ret;
filter = calloc(count, sizeof(*filter));
ASSERT_NE(NULL, filter);
for (i = 0; i < count; i++)
filter[i] = allow[0];
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
prog.filter = filter;
prog.len = 1;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
prog.len = count;
/* Too many total filter instructions. */
for (i = 0; i < MAX_INSNS_PER_PATH; i++) {
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
if (ret != 0)
break;
}
ASSERT_NE(0, ret) {
TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)",
i, count, i * (count + 4));
}
}
TEST(mode_filter_cannot_move_to_strict)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno);
}
TEST(mode_filter_get_seccomp)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
EXPECT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
EXPECT_EQ(2, ret);
}
TEST(ALLOW_all)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
}
TEST(empty_prog)
{
struct sock_filter filter[] = {
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno);
}
TEST(log_all)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
/* getppid() should succeed and be logged (no check for logging) */
EXPECT_EQ(parent, syscall(__NR_getppid));
}
TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, 0x10000000U),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
EXPECT_EQ(0, syscall(__NR_getpid)) {
TH_LOG("getpid() shouldn't ever return");
}
}
/* return code >= 0x80000000 is unused. */
TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, 0x90000000U),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
EXPECT_EQ(0, syscall(__NR_getpid)) {
TH_LOG("getpid() shouldn't ever return");
}
}
TEST_SIGNAL(KILL_all, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
}
TEST_SIGNAL(KILL_one, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_SIGNAL(KILL_one_arg_one, SIGSYS)
{
void *fatal_address;
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
/* Only both with lower 32-bit for now. */
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K,
(unsigned long)&fatal_address, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
pid_t parent = getppid();
struct tms timebuf;
clock_t clock = times(&timebuf);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_LE(clock, syscall(__NR_times, &timebuf));
/* times() should never return. */
EXPECT_EQ(0, syscall(__NR_times, &fatal_address));
}
TEST_SIGNAL(KILL_one_arg_six, SIGSYS)
{
#ifndef __NR_mmap2
int sysno = __NR_mmap;
#else
int sysno = __NR_mmap2;
#endif
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
/* Only both with lower 32-bit for now. */
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
pid_t parent = getppid();
int fd;
void *map1, *map2;
int page_size = sysconf(_SC_PAGESIZE);
ASSERT_LT(0, page_size);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
ASSERT_EQ(0, ret);
fd = open("/dev/zero", O_RDONLY);
ASSERT_NE(-1, fd);
EXPECT_EQ(parent, syscall(__NR_getppid));
map1 = (void *)syscall(sysno,
NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size);
EXPECT_NE(MAP_FAILED, map1);
/* mmap2() should never return. */
map2 = (void *)syscall(sysno,
NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE);
EXPECT_EQ(MAP_FAILED, map2);
/* The test failed, so clean up the resources. */
munmap(map1, page_size);
munmap(map2, page_size);
close(fd);
}
/* This is a thread task to die via seccomp filter violation. */
void *kill_thread(void *data)
{
bool die = (bool)data;
if (die) {
prctl(PR_GET_SECCOMP, 0, 0, 0, 0);
return (void *)SIBLING_EXIT_FAILURE;
}
return (void *)SIBLING_EXIT_UNKILLED;
}
enum kill_t {
KILL_THREAD,
KILL_PROCESS,
RET_UNKNOWN
};
/* Prepare a thread that will kill itself or both of us. */
void kill_thread_or_group(struct __test_metadata *_metadata,
enum kill_t kill_how)
{
pthread_t thread;
void *status;
/* Kill only when calling __NR_prctl. */
struct sock_filter filter_thread[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog_thread = {
.len = (unsigned short)ARRAY_SIZE(filter_thread),
.filter = filter_thread,
};
int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAAA;
struct sock_filter filter_process[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
BPF_STMT(BPF_RET|BPF_K, kill),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog_process = {
.len = (unsigned short)ARRAY_SIZE(filter_process),
.filter = filter_process,
};
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0,
kill_how == KILL_THREAD ? &prog_thread
: &prog_process));
/*
* Add the KILL_THREAD rule again to make sure that the KILL_PROCESS
* flag cannot be downgraded by a new filter.
*/
if (kill_how == KILL_PROCESS)
ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread));
/* Start a thread that will exit immediately. */
ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false));
ASSERT_EQ(0, pthread_join(thread, &status));
ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status);
/* Start a thread that will die immediately. */
ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true));
ASSERT_EQ(0, pthread_join(thread, &status));
ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status);
/*
* If we get here, only the spawned thread died. Let the parent know
* the whole process didn't die (i.e. this thread, the spawner,
* stayed running).
*/
exit(42);
}
TEST(KILL_thread)
{
int status;
pid_t child_pid;
child_pid = fork();
ASSERT_LE(0, child_pid);
if (child_pid == 0) {
kill_thread_or_group(_metadata, KILL_THREAD);
_exit(38);
}
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
/* If only the thread was killed, we'll see exit 42. */
ASSERT_TRUE(WIFEXITED(status));
ASSERT_EQ(42, WEXITSTATUS(status));
}
TEST(KILL_process)
{
int status;
pid_t child_pid;
child_pid = fork();
ASSERT_LE(0, child_pid);
if (child_pid == 0) {
kill_thread_or_group(_metadata, KILL_PROCESS);
_exit(38);
}
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
/* If the entire process was killed, we'll see SIGSYS. */
ASSERT_TRUE(WIFSIGNALED(status));
ASSERT_EQ(SIGSYS, WTERMSIG(status));
}
TEST(KILL_unknown)
{
int status;
pid_t child_pid;
child_pid = fork();
ASSERT_LE(0, child_pid);
if (child_pid == 0) {
kill_thread_or_group(_metadata, RET_UNKNOWN);
_exit(38);
}
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
/* If the entire process was killed, we'll see SIGSYS. */
EXPECT_TRUE(WIFSIGNALED(status)) {
TH_LOG("Unknown SECCOMP_RET is only killing the thread?");
}
ASSERT_EQ(SIGSYS, WTERMSIG(status));
}
/* TODO(wad) add 64-bit versus 32-bit arg tests. */
TEST(arg_out_of_range)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno);
}
#define ERRNO_FILTER(name, errno) \
struct sock_filter _read_filter_##name[] = { \
BPF_STMT(BPF_LD|BPF_W|BPF_ABS, \
offsetof(struct seccomp_data, nr)), \
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), \
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno), \
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), \
}; \
struct sock_fprog prog_##name = { \
.len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \
.filter = _read_filter_##name, \
}
/* Make sure basic errno values are correctly passed through a filter. */
TEST(ERRNO_valid)
{
ERRNO_FILTER(valid, E2BIG);
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(0, NULL, 0));
EXPECT_EQ(E2BIG, errno);
}
/* Make sure an errno of zero is correctly handled by the arch code. */
TEST(ERRNO_zero)
{
ERRNO_FILTER(zero, 0);
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
/* "errno" of 0 is ok. */
EXPECT_EQ(0, read(0, NULL, 0));
}
/*
* The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller.
* This tests that the errno value gets capped correctly, fixed by
* 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO").
*/
TEST(ERRNO_capped)
{
ERRNO_FILTER(capped, 4096);
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(0, NULL, 0));
EXPECT_EQ(4095, errno);
}
/*
* Filters are processed in reverse order: last applied is executed first.
* Since only the SECCOMP_RET_ACTION mask is tested for return values, the
* SECCOMP_RET_DATA mask results will follow the most recently applied
* matching filter return (and not the lowest or highest value).
*/
TEST(ERRNO_order)
{
ERRNO_FILTER(first, 11);
ERRNO_FILTER(second, 13);
ERRNO_FILTER(third, 12);
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third);
ASSERT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(-1, read(0, NULL, 0));
EXPECT_EQ(12, errno);
}
FIXTURE(TRAP) {
struct sock_fprog prog;
};
FIXTURE_SETUP(TRAP)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
memset(&self->prog, 0, sizeof(self->prog));
self->prog.filter = malloc(sizeof(filter));
ASSERT_NE(NULL, self->prog.filter);
memcpy(self->prog.filter, filter, sizeof(filter));
self->prog.len = (unsigned short)ARRAY_SIZE(filter);
}
FIXTURE_TEARDOWN(TRAP)
{
if (self->prog.filter)
free(self->prog.filter);
}
TEST_F_SIGNAL(TRAP, dfl, SIGSYS)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
ASSERT_EQ(0, ret);
syscall(__NR_getpid);
}
/* Ensure that SIGSYS overrides SIG_IGN */
TEST_F_SIGNAL(TRAP, ign, SIGSYS)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
signal(SIGSYS, SIG_IGN);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
ASSERT_EQ(0, ret);
syscall(__NR_getpid);
}
static siginfo_t TRAP_info;
static volatile int TRAP_nr;
static void TRAP_action(int nr, siginfo_t *info, void *void_context)
{
memcpy(&TRAP_info, info, sizeof(TRAP_info));
TRAP_nr = nr;
}
TEST_F(TRAP, handler)
{
int ret, test;
struct sigaction act;
sigset_t mask;
memset(&act, 0, sizeof(act));
sigemptyset(&mask);
sigaddset(&mask, SIGSYS);
act.sa_sigaction = &TRAP_action;
act.sa_flags = SA_SIGINFO;
ret = sigaction(SIGSYS, &act, NULL);
ASSERT_EQ(0, ret) {
TH_LOG("sigaction failed");
}
ret = sigprocmask(SIG_UNBLOCK, &mask, NULL);
ASSERT_EQ(0, ret) {
TH_LOG("sigprocmask failed");
}
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog);
ASSERT_EQ(0, ret);
TRAP_nr = 0;
memset(&TRAP_info, 0, sizeof(TRAP_info));
/* Expect the registers to be rolled back. (nr = error) may vary
* based on arch. */
ret = syscall(__NR_getpid);
/* Silence gcc warning about volatile. */
test = TRAP_nr;
EXPECT_EQ(SIGSYS, test);
struct local_sigsys {
void *_call_addr; /* calling user insn */
int _syscall; /* triggering system call number */
unsigned int _arch; /* AUDIT_ARCH_* of syscall */
} *sigsys = (struct local_sigsys *)
#ifdef si_syscall
&(TRAP_info.si_call_addr);
#else
&TRAP_info.si_pid;
#endif
EXPECT_EQ(__NR_getpid, sigsys->_syscall);
/* Make sure arch is non-zero. */
EXPECT_NE(0, sigsys->_arch);
EXPECT_NE(0, (unsigned long)sigsys->_call_addr);
}
FIXTURE(precedence) {
struct sock_fprog allow;
struct sock_fprog log;
struct sock_fprog trace;
struct sock_fprog error;
struct sock_fprog trap;
struct sock_fprog kill;
};
FIXTURE_SETUP(precedence)
{
struct sock_filter allow_insns[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter log_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG),
};
struct sock_filter trace_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE),
};
struct sock_filter error_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO),
};
struct sock_filter trap_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP),
};
struct sock_filter kill_insns[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
};
memset(self, 0, sizeof(*self));
#define FILTER_ALLOC(_x) \
self->_x.filter = malloc(sizeof(_x##_insns)); \
ASSERT_NE(NULL, self->_x.filter); \
memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \
self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns)
FILTER_ALLOC(allow);
FILTER_ALLOC(log);
FILTER_ALLOC(trace);
FILTER_ALLOC(error);
FILTER_ALLOC(trap);
FILTER_ALLOC(kill);
}
FIXTURE_TEARDOWN(precedence)
{
#define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter)
FILTER_FREE(allow);
FILTER_FREE(log);
FILTER_FREE(trace);
FILTER_FREE(error);
FILTER_FREE(trap);
FILTER_FREE(kill);
}
TEST_F(precedence, allow_ok)
{
pid_t parent, res = 0;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
ASSERT_EQ(0, ret);
/* Should work just fine. */
res = syscall(__NR_getppid);
EXPECT_EQ(parent, res);
}
TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS)
{
pid_t parent, res = 0;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
ASSERT_EQ(0, ret);
/* Should work just fine. */
res = syscall(__NR_getppid);
EXPECT_EQ(parent, res);
/* getpid() should never return. */
res = syscall(__NR_getpid);
EXPECT_EQ(0, res);
}
TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F(precedence, errno_is_third)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F(precedence, errno_is_third_in_any_order)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST_F(precedence, trace_is_fourth)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* No ptracer */
EXPECT_EQ(-1, syscall(__NR_getpid));
}
TEST_F(precedence, trace_is_fourth_in_any_order)
{
pid_t parent;
long ret;
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* No ptracer */
EXPECT_EQ(-1, syscall(__NR_getpid));
}
TEST_F(precedence, log_is_fifth)
{
pid_t mypid, parent;
long ret;
mypid = getpid();
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* Should also work just fine */
EXPECT_EQ(mypid, syscall(__NR_getpid));
}
TEST_F(precedence, log_is_fifth_in_any_order)
{
pid_t mypid, parent;
long ret;
mypid = getpid();
parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow);
ASSERT_EQ(0, ret);
/* Should work just fine. */
EXPECT_EQ(parent, syscall(__NR_getppid));
/* Should also work just fine */
EXPECT_EQ(mypid, syscall(__NR_getpid));
}
#ifndef PTRACE_O_TRACESECCOMP
#define PTRACE_O_TRACESECCOMP 0x00000080
#endif
/* Catch the Ubuntu 12.04 value error. */
#if PTRACE_EVENT_SECCOMP != 7
#undef PTRACE_EVENT_SECCOMP
#endif
#ifndef PTRACE_EVENT_SECCOMP
#define PTRACE_EVENT_SECCOMP 7
#endif
#define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP)
bool tracer_running;
void tracer_stop(int sig)
{
tracer_running = false;
}
typedef void tracer_func_t(struct __test_metadata *_metadata,
pid_t tracee, int status, void *args);
void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee,
tracer_func_t tracer_func, void *args, bool ptrace_syscall)
{
int ret = -1;
struct sigaction action = {
.sa_handler = tracer_stop,
};
/* Allow external shutdown. */
tracer_running = true;
ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL));
errno = 0;
while (ret == -1 && errno != EINVAL)
ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0);
ASSERT_EQ(0, ret) {
kill(tracee, SIGKILL);
}
/* Wait for attach stop */
wait(NULL);
ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ?
PTRACE_O_TRACESYSGOOD :
PTRACE_O_TRACESECCOMP);
ASSERT_EQ(0, ret) {
TH_LOG("Failed to set PTRACE_O_TRACESECCOMP");
kill(tracee, SIGKILL);
}
ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT,
tracee, NULL, 0);
ASSERT_EQ(0, ret);
/* Unblock the tracee */
ASSERT_EQ(1, write(fd, "A", 1));
ASSERT_EQ(0, close(fd));
/* Run until we're shut down. Must assert to stop execution. */
while (tracer_running) {
int status;
if (wait(&status) != tracee)
continue;
if (WIFSIGNALED(status) || WIFEXITED(status))
/* Child is dead. Time to go. */
return;
/* Check if this is a seccomp event. */
ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status));
tracer_func(_metadata, tracee, status, args);
ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT,
tracee, NULL, 0);
ASSERT_EQ(0, ret);
}
/* Directly report the status of our test harness results. */
syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE);
}
/* Common tracer setup/teardown functions. */
void cont_handler(int num)
{ }
pid_t setup_trace_fixture(struct __test_metadata *_metadata,
tracer_func_t func, void *args, bool ptrace_syscall)
{
char sync;
int pipefd[2];
pid_t tracer_pid;
pid_t tracee = getpid();
/* Setup a pipe for clean synchronization. */
ASSERT_EQ(0, pipe(pipefd));
/* Fork a child which we'll promote to tracer */
tracer_pid = fork();
ASSERT_LE(0, tracer_pid);
signal(SIGALRM, cont_handler);
if (tracer_pid == 0) {
close(pipefd[0]);
start_tracer(_metadata, pipefd[1], tracee, func, args,
ptrace_syscall);
syscall(__NR_exit, 0);
}
close(pipefd[1]);
prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
read(pipefd[0], &sync, 1);
close(pipefd[0]);
return tracer_pid;
}
void teardown_trace_fixture(struct __test_metadata *_metadata,
pid_t tracer)
{
if (tracer) {
int status;
/*
* Extract the exit code from the other process and
* adopt it for ourselves in case its asserts failed.
*/
ASSERT_EQ(0, kill(tracer, SIGUSR1));
ASSERT_EQ(tracer, waitpid(tracer, &status, 0));
if (WEXITSTATUS(status))
_metadata->passed = 0;
}
}
/* "poke" tracer arguments and function. */
struct tracer_args_poke_t {
unsigned long poke_addr;
};
void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status,
void *args)
{
int ret;
unsigned long msg;
struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args;
ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
EXPECT_EQ(0, ret);
/* If this fails, don't try to recover. */
ASSERT_EQ(0x1001, msg) {
kill(tracee, SIGKILL);
}
/*
* Poke in the message.
* Registers are not touched to try to keep this relatively arch
* agnostic.
*/
ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001);
EXPECT_EQ(0, ret);
}
FIXTURE(TRACE_poke) {
struct sock_fprog prog;
pid_t tracer;
long poked;
struct tracer_args_poke_t tracer_args;
};
FIXTURE_SETUP(TRACE_poke)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
self->poked = 0;
memset(&self->prog, 0, sizeof(self->prog));
self->prog.filter = malloc(sizeof(filter));
ASSERT_NE(NULL, self->prog.filter);
memcpy(self->prog.filter, filter, sizeof(filter));
self->prog.len = (unsigned short)ARRAY_SIZE(filter);
/* Set up tracer args. */
self->tracer_args.poke_addr = (unsigned long)&self->poked;
/* Launch tracer. */
self->tracer = setup_trace_fixture(_metadata, tracer_poke,
&self->tracer_args, false);
}
FIXTURE_TEARDOWN(TRACE_poke)
{
teardown_trace_fixture(_metadata, self->tracer);
if (self->prog.filter)
free(self->prog.filter);
}
TEST_F(TRACE_poke, read_has_side_effects)
{
ssize_t ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
ASSERT_EQ(0, ret);
EXPECT_EQ(0, self->poked);
ret = read(-1, NULL, 0);
EXPECT_EQ(-1, ret);
EXPECT_EQ(0x1001, self->poked);
}
TEST_F(TRACE_poke, getpid_runs_normally)
{
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0);
ASSERT_EQ(0, ret);
EXPECT_EQ(0, self->poked);
EXPECT_NE(0, syscall(__NR_getpid));
EXPECT_EQ(0, self->poked);
}
#if defined(__x86_64__)
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).orig_rax
# define SYSCALL_RET(_regs) (_regs).rax
#elif defined(__i386__)
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).orig_eax
# define SYSCALL_RET(_regs) (_regs).eax
#elif defined(__arm__)
# define ARCH_REGS struct pt_regs
# define SYSCALL_NUM(_regs) (_regs).ARM_r7
# ifndef PTRACE_SET_SYSCALL
# define PTRACE_SET_SYSCALL 23
# endif
# define SYSCALL_NUM_SET(_regs, _nr) \
EXPECT_EQ(0, ptrace(PTRACE_SET_SYSCALL, tracee, NULL, _nr))
# define SYSCALL_RET(_regs) (_regs).ARM_r0
#elif defined(__aarch64__)
# define ARCH_REGS struct user_pt_regs
# define SYSCALL_NUM(_regs) (_regs).regs[8]
# ifndef NT_ARM_SYSTEM_CALL
# define NT_ARM_SYSTEM_CALL 0x404
# endif
# define SYSCALL_NUM_SET(_regs, _nr) \
do { \
struct iovec __v; \
typeof(_nr) __nr = (_nr); \
__v.iov_base = &__nr; \
__v.iov_len = sizeof(__nr); \
EXPECT_EQ(0, ptrace(PTRACE_SETREGSET, tracee, \
NT_ARM_SYSTEM_CALL, &__v)); \
} while (0)
# define SYSCALL_RET(_regs) (_regs).regs[0]
#elif defined(__riscv) && __riscv_xlen == 64
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).a7
# define SYSCALL_RET(_regs) (_regs).a0
#elif defined(__csky__)
# define ARCH_REGS struct pt_regs
# if defined(__CSKYABIV2__)
# define SYSCALL_NUM(_regs) (_regs).regs[3]
# else
# define SYSCALL_NUM(_regs) (_regs).regs[9]
# endif
# define SYSCALL_RET(_regs) (_regs).a0
#elif defined(__hppa__)
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM(_regs) (_regs).gr[20]
# define SYSCALL_RET(_regs) (_regs).gr[28]
#elif defined(__powerpc__)
# define ARCH_REGS struct pt_regs
# define SYSCALL_NUM(_regs) (_regs).gpr[0]
# define SYSCALL_RET(_regs) (_regs).gpr[3]
# define SYSCALL_RET_SET(_regs, _val) \
do { \
typeof(_val) _result = (_val); \
/* \
* A syscall error is signaled by CR0 SO bit \
* and the code is stored as a positive value. \
*/ \
if (_result < 0) { \
SYSCALL_RET(_regs) = -_result; \
(_regs).ccr |= 0x10000000; \
} else { \
SYSCALL_RET(_regs) = _result; \
(_regs).ccr &= ~0x10000000; \
} \
} while (0)
# define SYSCALL_RET_SET_ON_PTRACE_EXIT
#elif defined(__s390__)
# define ARCH_REGS s390_regs
# define SYSCALL_NUM(_regs) (_regs).gprs[2]
# define SYSCALL_RET_SET(_regs, _val) \
TH_LOG("Can't modify syscall return on this architecture")
#elif defined(__mips__)
# include <asm/unistd_nr_n32.h>
# include <asm/unistd_nr_n64.h>
# include <asm/unistd_nr_o32.h>
# define ARCH_REGS struct pt_regs
# define SYSCALL_NUM(_regs) \
({ \
typeof((_regs).regs[2]) _nr; \
if ((_regs).regs[2] == __NR_O32_Linux) \
_nr = (_regs).regs[4]; \
else \
_nr = (_regs).regs[2]; \
_nr; \
})
# define SYSCALL_NUM_SET(_regs, _nr) \
do { \
if ((_regs).regs[2] == __NR_O32_Linux) \
(_regs).regs[4] = _nr; \
else \
(_regs).regs[2] = _nr; \
} while (0)
# define SYSCALL_RET_SET(_regs, _val) \
TH_LOG("Can't modify syscall return on this architecture")
#elif defined(__xtensa__)
# define ARCH_REGS struct user_pt_regs
# define SYSCALL_NUM(_regs) (_regs).syscall
/*
* On xtensa syscall return value is in the register
* a2 of the current window which is not fixed.
*/
#define SYSCALL_RET(_regs) (_regs).a[(_regs).windowbase * 4 + 2]
#elif defined(__sh__)
# define ARCH_REGS struct pt_regs
# define SYSCALL_NUM(_regs) (_regs).regs[3]
# define SYSCALL_RET(_regs) (_regs).regs[0]
#else
# error "Do not know how to find your architecture's registers and syscalls"
#endif
/*
* Most architectures can change the syscall by just updating the
* associated register. This is the default if not defined above.
*/
#ifndef SYSCALL_NUM_SET
# define SYSCALL_NUM_SET(_regs, _nr) \
do { \
SYSCALL_NUM(_regs) = (_nr); \
} while (0)
#endif
/*
* Most architectures can change the syscall return value by just
* writing to the SYSCALL_RET register. This is the default if not
* defined above. If an architecture cannot set the return value
* (for example when the syscall and return value register is
* shared), report it with TH_LOG() in an arch-specific definition
* of SYSCALL_RET_SET() above, and leave SYSCALL_RET undefined.
*/
#if !defined(SYSCALL_RET) && !defined(SYSCALL_RET_SET)
# error "One of SYSCALL_RET or SYSCALL_RET_SET is needed for this arch"
#endif
#ifndef SYSCALL_RET_SET
# define SYSCALL_RET_SET(_regs, _val) \
do { \
SYSCALL_RET(_regs) = (_val); \
} while (0)
#endif
/* When the syscall return can't be changed, stub out the tests for it. */
#ifndef SYSCALL_RET
# define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action)
#else
# define EXPECT_SYSCALL_RETURN(val, action) \
do { \
errno = 0; \
if (val < 0) { \
EXPECT_EQ(-1, action); \
EXPECT_EQ(-(val), errno); \
} else { \
EXPECT_EQ(val, action); \
} \
} while (0)
#endif
/*
* Some architectures (e.g. powerpc) can only set syscall
* return values on syscall exit during ptrace.
*/
const bool ptrace_entry_set_syscall_nr = true;
const bool ptrace_entry_set_syscall_ret =
#ifndef SYSCALL_RET_SET_ON_PTRACE_EXIT
true;
#else
false;
#endif
/*
* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for
* architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux).
*/
#if defined(__x86_64__) || defined(__i386__) || defined(__mips__)
# define ARCH_GETREGS(_regs) ptrace(PTRACE_GETREGS, tracee, 0, &(_regs))
# define ARCH_SETREGS(_regs) ptrace(PTRACE_SETREGS, tracee, 0, &(_regs))
#else
# define ARCH_GETREGS(_regs) ({ \
struct iovec __v; \
__v.iov_base = &(_regs); \
__v.iov_len = sizeof(_regs); \
ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &__v); \
})
# define ARCH_SETREGS(_regs) ({ \
struct iovec __v; \
__v.iov_base = &(_regs); \
__v.iov_len = sizeof(_regs); \
ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &__v); \
})
#endif
/* Architecture-specific syscall fetching routine. */
int get_syscall(struct __test_metadata *_metadata, pid_t tracee)
{
ARCH_REGS regs;
EXPECT_EQ(0, ARCH_GETREGS(regs)) {
return -1;
}
return SYSCALL_NUM(regs);
}
/* Architecture-specific syscall changing routine. */
void __change_syscall(struct __test_metadata *_metadata,
pid_t tracee, long *syscall, long *ret)
{
ARCH_REGS orig, regs;
/* Do not get/set registers if we have nothing to do. */
if (!syscall && !ret)
return;
EXPECT_EQ(0, ARCH_GETREGS(regs)) {
return;
}
orig = regs;
if (syscall)
SYSCALL_NUM_SET(regs, *syscall);
if (ret)
SYSCALL_RET_SET(regs, *ret);
/* Flush any register changes made. */
if (memcmp(&orig, ®s, sizeof(orig)) != 0)
EXPECT_EQ(0, ARCH_SETREGS(regs));
}
/* Change only syscall number. */
void change_syscall_nr(struct __test_metadata *_metadata,
pid_t tracee, long syscall)
{
__change_syscall(_metadata, tracee, &syscall, NULL);
}
/* Change syscall return value (and set syscall number to -1). */
void change_syscall_ret(struct __test_metadata *_metadata,
pid_t tracee, long ret)
{
long syscall = -1;
__change_syscall(_metadata, tracee, &syscall, &ret);
}
void tracer_seccomp(struct __test_metadata *_metadata, pid_t tracee,
int status, void *args)
{
int ret;
unsigned long msg;
/* Make sure we got the right message. */
ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
EXPECT_EQ(0, ret);
/* Validate and take action on expected syscalls. */
switch (msg) {
case 0x1002:
/* change getpid to getppid. */
EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee));
change_syscall_nr(_metadata, tracee, __NR_getppid);
break;
case 0x1003:
/* skip gettid with valid return code. */
EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee));
change_syscall_ret(_metadata, tracee, 45000);
break;
case 0x1004:
/* skip openat with error. */
EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee));
change_syscall_ret(_metadata, tracee, -ESRCH);
break;
case 0x1005:
/* do nothing (allow getppid) */
EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee));
break;
default:
EXPECT_EQ(0, msg) {
TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg);
kill(tracee, SIGKILL);
}
}
}
FIXTURE(TRACE_syscall) {
struct sock_fprog prog;
pid_t tracer, mytid, mypid, parent;
long syscall_nr;
};
void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee,
int status, void *args)
{
int ret;
unsigned long msg;
static bool entry;
long syscall_nr_val, syscall_ret_val;
long *syscall_nr = NULL, *syscall_ret = NULL;
FIXTURE_DATA(TRACE_syscall) *self = args;
/*
* The traditional way to tell PTRACE_SYSCALL entry/exit
* is by counting.
*/
entry = !entry;
/* Make sure we got an appropriate message. */
ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg);
EXPECT_EQ(0, ret);
EXPECT_EQ(entry ? PTRACE_EVENTMSG_SYSCALL_ENTRY
: PTRACE_EVENTMSG_SYSCALL_EXIT, msg);
/*
* Some architectures only support setting return values during
* syscall exit under ptrace, and on exit the syscall number may
* no longer be available. Therefore, save the initial sycall
* number here, so it can be examined during both entry and exit
* phases.
*/
if (entry)
self->syscall_nr = get_syscall(_metadata, tracee);
/*
* Depending on the architecture's syscall setting abilities, we
* pick which things to set during this phase (entry or exit).
*/
if (entry == ptrace_entry_set_syscall_nr)
syscall_nr = &syscall_nr_val;
if (entry == ptrace_entry_set_syscall_ret)
syscall_ret = &syscall_ret_val;
/* Now handle the actual rewriting cases. */
switch (self->syscall_nr) {
case __NR_getpid:
syscall_nr_val = __NR_getppid;
/* Never change syscall return for this case. */
syscall_ret = NULL;
break;
case __NR_gettid:
syscall_nr_val = -1;
syscall_ret_val = 45000;
break;
case __NR_openat:
syscall_nr_val = -1;
syscall_ret_val = -ESRCH;
break;
default:
/* Unhandled, do nothing. */
return;
}
__change_syscall(_metadata, tracee, syscall_nr, syscall_ret);
}
FIXTURE_VARIANT(TRACE_syscall) {
/*
* All of the SECCOMP_RET_TRACE behaviors can be tested with either
* SECCOMP_RET_TRACE+PTRACE_CONT or plain ptrace()+PTRACE_SYSCALL.
* This indicates if we should use SECCOMP_RET_TRACE (false), or
* ptrace (true).
*/
bool use_ptrace;
};
FIXTURE_VARIANT_ADD(TRACE_syscall, ptrace) {
.use_ptrace = true,
};
FIXTURE_VARIANT_ADD(TRACE_syscall, seccomp) {
.use_ptrace = false,
};
FIXTURE_SETUP(TRACE_syscall)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
/* Prepare some testable syscall results. */
self->mytid = syscall(__NR_gettid);
ASSERT_GT(self->mytid, 0);
ASSERT_NE(self->mytid, 1) {
TH_LOG("Running this test as init is not supported. :)");
}
self->mypid = getpid();
ASSERT_GT(self->mypid, 0);
ASSERT_EQ(self->mytid, self->mypid);
self->parent = getppid();
ASSERT_GT(self->parent, 0);
ASSERT_NE(self->parent, self->mypid);
/* Launch tracer. */
self->tracer = setup_trace_fixture(_metadata,
variant->use_ptrace ? tracer_ptrace
: tracer_seccomp,
self, variant->use_ptrace);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
if (variant->use_ptrace)
return;
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
}
FIXTURE_TEARDOWN(TRACE_syscall)
{
teardown_trace_fixture(_metadata, self->tracer);
}
TEST(negative_ENOSYS)
{
/*
* There should be no difference between an "internal" skip
* and userspace asking for syscall "-1".
*/
errno = 0;
EXPECT_EQ(-1, syscall(-1));
EXPECT_EQ(errno, ENOSYS);
/* And no difference for "still not valid but not -1". */
errno = 0;
EXPECT_EQ(-1, syscall(-101));
EXPECT_EQ(errno, ENOSYS);
}
TEST_F(TRACE_syscall, negative_ENOSYS)
{
negative_ENOSYS(_metadata);
}
TEST_F(TRACE_syscall, syscall_allowed)
{
/* getppid works as expected (no changes). */
EXPECT_EQ(self->parent, syscall(__NR_getppid));
EXPECT_NE(self->mypid, syscall(__NR_getppid));
}
TEST_F(TRACE_syscall, syscall_redirected)
{
/* getpid has been redirected to getppid as expected. */
EXPECT_EQ(self->parent, syscall(__NR_getpid));
EXPECT_NE(self->mypid, syscall(__NR_getpid));
}
TEST_F(TRACE_syscall, syscall_errno)
{
/* Tracer should skip the open syscall, resulting in ESRCH. */
EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat));
}
TEST_F(TRACE_syscall, syscall_faked)
{
/* Tracer skips the gettid syscall and store altered return value. */
EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid));
}
TEST_F(TRACE_syscall, skip_after)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
/* Install additional "errno on getppid" filter. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
/* Tracer will redirect getpid to getppid, and we should see EPERM. */
errno = 0;
EXPECT_EQ(-1, syscall(__NR_getpid));
EXPECT_EQ(EPERM, errno);
}
TEST_F_SIGNAL(TRACE_syscall, kill_after, SIGSYS)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
/* Install additional "death on getppid" filter. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
ASSERT_EQ(0, ret);
/* Tracer will redirect getpid to getppid, and we should die. */
EXPECT_NE(self->mypid, syscall(__NR_getpid));
}
TEST(seccomp_syscall)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* Reject insane operation. */
ret = seccomp(-1, 0, &prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Did not reject crazy op value!");
}
/* Reject strict with flags or pointer. */
ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Did not reject mode strict with flags!");
}
ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Did not reject mode strict with uargs!");
}
/* Reject insane args for filter. */
ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Did not reject crazy filter flags!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL);
EXPECT_EQ(EFAULT, errno) {
TH_LOG("Did not reject NULL filter!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
EXPECT_EQ(0, errno) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s",
strerror(errno));
}
}
TEST(seccomp_syscall_mode_lock)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_EQ(0, ret) {
TH_LOG("Could not install filter!");
}
/* Make sure neither entry point will switch to strict. */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Switched to mode strict!");
}
ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Switched to mode strict!");
}
}
/*
* Test detection of known and unknown filter flags. Userspace needs to be able
* to check if a filter flag is supported by the current kernel and a good way
* of doing that is by attempting to enter filter mode, with the flag bit in
* question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
* that the flag is valid and EINVAL indicates that the flag is invalid.
*/
TEST(detect_seccomp_filter_flags)
{
unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
SECCOMP_FILTER_FLAG_LOG,
SECCOMP_FILTER_FLAG_SPEC_ALLOW,
SECCOMP_FILTER_FLAG_NEW_LISTENER,
SECCOMP_FILTER_FLAG_TSYNC_ESRCH };
unsigned int exclusive[] = {
SECCOMP_FILTER_FLAG_TSYNC,
SECCOMP_FILTER_FLAG_NEW_LISTENER };
unsigned int flag, all_flags, exclusive_mask;
int i;
long ret;
/* Test detection of individual known-good filter flags */
for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
int bits = 0;
flag = flags[i];
/* Make sure the flag is a single bit! */
while (flag) {
if (flag & 0x1)
bits ++;
flag >>= 1;
}
ASSERT_EQ(1, bits);
flag = flags[i];
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_EQ(-1, ret);
EXPECT_EQ(EFAULT, errno) {
TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
flag);
}
all_flags |= flag;
}
/*
* Test detection of all known-good filter flags combined. But
* for the exclusive flags we need to mask them out and try them
* individually for the "all flags" testing.
*/
exclusive_mask = 0;
for (i = 0; i < ARRAY_SIZE(exclusive); i++)
exclusive_mask |= exclusive[i];
for (i = 0; i < ARRAY_SIZE(exclusive); i++) {
flag = all_flags & ~exclusive_mask;
flag |= exclusive[i];
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EFAULT, errno) {
TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
flag);
}
}
/* Test detection of an unknown filter flags, without exclusives. */
flag = -1;
flag &= ~exclusive_mask;
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
flag);
}
/*
* Test detection of an unknown filter flag that may simply need to be
* added to this test
*/
flag = flags[ARRAY_SIZE(flags) - 1] << 1;
ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
flag);
}
}
TEST(TSYNC_first)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
long ret;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_EQ(0, ret) {
TH_LOG("Could not install initial filter with TSYNC!");
}
}
#define TSYNC_SIBLINGS 2
struct tsync_sibling {
pthread_t tid;
pid_t system_tid;
sem_t *started;
pthread_cond_t *cond;
pthread_mutex_t *mutex;
int diverge;
int num_waits;
struct sock_fprog *prog;
struct __test_metadata *metadata;
};
/*
* To avoid joining joined threads (which is not allowed by Bionic),
* make sure we both successfully join and clear the tid to skip a
* later join attempt during fixture teardown. Any remaining threads
* will be directly killed during teardown.
*/
#define PTHREAD_JOIN(tid, status) \
do { \
int _rc = pthread_join(tid, status); \
if (_rc) { \
TH_LOG("pthread_join of tid %u failed: %d\n", \
(unsigned int)tid, _rc); \
} else { \
tid = 0; \
} \
} while (0)
FIXTURE(TSYNC) {
struct sock_fprog root_prog, apply_prog;
struct tsync_sibling sibling[TSYNC_SIBLINGS];
sem_t started;
pthread_cond_t cond;
pthread_mutex_t mutex;
int sibling_count;
};
FIXTURE_SETUP(TSYNC)
{
struct sock_filter root_filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter apply_filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
memset(&self->root_prog, 0, sizeof(self->root_prog));
memset(&self->apply_prog, 0, sizeof(self->apply_prog));
memset(&self->sibling, 0, sizeof(self->sibling));
self->root_prog.filter = malloc(sizeof(root_filter));
ASSERT_NE(NULL, self->root_prog.filter);
memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter));
self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter);
self->apply_prog.filter = malloc(sizeof(apply_filter));
ASSERT_NE(NULL, self->apply_prog.filter);
memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter));
self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter);
self->sibling_count = 0;
pthread_mutex_init(&self->mutex, NULL);
pthread_cond_init(&self->cond, NULL);
sem_init(&self->started, 0, 0);
self->sibling[0].tid = 0;
self->sibling[0].cond = &self->cond;
self->sibling[0].started = &self->started;
self->sibling[0].mutex = &self->mutex;
self->sibling[0].diverge = 0;
self->sibling[0].num_waits = 1;
self->sibling[0].prog = &self->root_prog;
self->sibling[0].metadata = _metadata;
self->sibling[1].tid = 0;
self->sibling[1].cond = &self->cond;
self->sibling[1].started = &self->started;
self->sibling[1].mutex = &self->mutex;
self->sibling[1].diverge = 0;
self->sibling[1].prog = &self->root_prog;
self->sibling[1].num_waits = 1;
self->sibling[1].metadata = _metadata;
}
FIXTURE_TEARDOWN(TSYNC)
{
int sib = 0;
if (self->root_prog.filter)
free(self->root_prog.filter);
if (self->apply_prog.filter)
free(self->apply_prog.filter);
for ( ; sib < self->sibling_count; ++sib) {
struct tsync_sibling *s = &self->sibling[sib];
if (!s->tid)
continue;
/*
* If a thread is still running, it may be stuck, so hit
* it over the head really hard.
*/
pthread_kill(s->tid, 9);
}
pthread_mutex_destroy(&self->mutex);
pthread_cond_destroy(&self->cond);
sem_destroy(&self->started);
}
void *tsync_sibling(void *data)
{
long ret = 0;
struct tsync_sibling *me = data;
me->system_tid = syscall(__NR_gettid);
pthread_mutex_lock(me->mutex);
if (me->diverge) {
/* Just re-apply the root prog to fork the tree */
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER,
me->prog, 0, 0);
}
sem_post(me->started);
/* Return outside of started so parent notices failures. */
if (ret) {
pthread_mutex_unlock(me->mutex);
return (void *)SIBLING_EXIT_FAILURE;
}
do {
pthread_cond_wait(me->cond, me->mutex);
me->num_waits = me->num_waits - 1;
} while (me->num_waits);
pthread_mutex_unlock(me->mutex);
ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0);
if (!ret)
return (void *)SIBLING_EXIT_NEWPRIVS;
read(0, NULL, 0);
return (void *)SIBLING_EXIT_UNKILLED;
}
void tsync_start_sibling(struct tsync_sibling *sibling)
{
pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling);
}
TEST_F(TSYNC, siblings_fail_prctl)
{
long ret;
void *status;
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* Check prctl failure detection by requesting sib 0 diverge. */
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("setting filter failed");
}
self->sibling[0].diverge = 1;
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
/* Signal the threads to clean up*/
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure diverging sibling failed to call prctl. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
TEST_F(TSYNC, two_siblings_with_ancestor)
{
long ret;
void *status;
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret) {
TH_LOG("Could install filter on all threads!");
}
/* Tell the siblings to test the policy */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(0x0, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(0x0, (long)status);
}
TEST_F(TSYNC, two_sibling_want_nnp)
{
void *status;
/* start siblings before any prctl() operations */
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
/* Tell the siblings to test no policy */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both upset about lacking nnp. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status);
}
TEST_F(TSYNC, two_siblings_with_no_filter)
{
long ret;
void *status;
/* start siblings before any prctl() operations */
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Could install filter on all threads!");
}
/* Tell the siblings to test the policy */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both killed and don't exit cleanly. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(0x0, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(0x0, (long)status);
}
TEST_F(TSYNC, two_siblings_with_one_divergence)
{
long ret;
void *status;
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
self->sibling[0].diverge = 1;
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(self->sibling[0].system_tid, ret) {
TH_LOG("Did not fail on diverged sibling.");
}
/* Wake the threads */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both unkilled. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
TEST_F(TSYNC, two_siblings_with_one_divergence_no_tid_in_err)
{
long ret, flags;
void *status;
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
self->sibling[0].diverge = 1;
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
flags = SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_TSYNC_ESRCH;
ret = seccomp(SECCOMP_SET_MODE_FILTER, flags, &self->apply_prog);
ASSERT_EQ(ESRCH, errno) {
TH_LOG("Did not return ESRCH for diverged sibling.");
}
ASSERT_EQ(-1, ret) {
TH_LOG("Did not fail on diverged sibling.");
}
/* Wake the threads */
pthread_mutex_lock(&self->mutex);
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
/* Ensure they are both unkilled. */
PTHREAD_JOIN(self->sibling[0].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
PTHREAD_JOIN(self->sibling[1].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
}
TEST_F(TSYNC, two_siblings_not_under_filter)
{
long ret, sib;
void *status;
struct timespec delay = { .tv_nsec = 100000000 };
ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/*
* Sibling 0 will have its own seccomp policy
* and Sibling 1 will not be under seccomp at
* all. Sibling 1 will enter seccomp and 0
* will cause failure.
*/
self->sibling[0].diverge = 1;
tsync_start_sibling(&self->sibling[0]);
tsync_start_sibling(&self->sibling[1]);
while (self->sibling_count < TSYNC_SIBLINGS) {
sem_wait(&self->started);
self->sibling_count++;
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(ret, self->sibling[0].system_tid) {
TH_LOG("Did not fail on diverged sibling.");
}
sib = 1;
if (ret == self->sibling[0].system_tid)
sib = 0;
pthread_mutex_lock(&self->mutex);
/* Increment the other siblings num_waits so we can clean up
* the one we just saw.
*/
self->sibling[!sib].num_waits += 1;
/* Signal the thread to clean up*/
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
PTHREAD_JOIN(self->sibling[sib].tid, &status);
EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
nanosleep(&delay, NULL);
/* Switch to the remaining sibling */
sib = !sib;
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret) {
TH_LOG("Expected the remaining sibling to sync");
};
pthread_mutex_lock(&self->mutex);
/* If remaining sibling didn't have a chance to wake up during
* the first broadcast, manually reduce the num_waits now.
*/
if (self->sibling[sib].num_waits > 1)
self->sibling[sib].num_waits = 1;
ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) {
TH_LOG("cond broadcast non-zero");
}
pthread_mutex_unlock(&self->mutex);
PTHREAD_JOIN(self->sibling[sib].tid, &status);
EXPECT_EQ(0, (long)status);
/* Poll for actual task death. pthread_join doesn't guarantee it. */
while (!kill(self->sibling[sib].system_tid, 0))
nanosleep(&delay, NULL);
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret); /* just us chickens */
}
/* Make sure restarted syscalls are seen directly as "restart_syscall". */
TEST(syscall_restart)
{
long ret;
unsigned long msg;
pid_t child_pid;
int pipefd[2];
int status;
siginfo_t info = { };
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
#ifdef __NR_sigreturn
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 7, 0),
#endif
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 6, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 5, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 4, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 5, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_clock_nanosleep, 4, 0),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0),
/* Allow __NR_write for easy logging. */
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
/* The nanosleep jump target. */
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100),
/* The restart_syscall jump target. */
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
#if defined(__arm__)
struct utsname utsbuf;
#endif
ASSERT_EQ(0, pipe(pipefd));
child_pid = fork();
ASSERT_LE(0, child_pid);
if (child_pid == 0) {
/* Child uses EXPECT not ASSERT to deliver status correctly. */
char buf = ' ';
struct timespec timeout = { };
/* Attach parent as tracer and stop. */
EXPECT_EQ(0, ptrace(PTRACE_TRACEME));
EXPECT_EQ(0, raise(SIGSTOP));
EXPECT_EQ(0, close(pipefd[1]));
EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0);
EXPECT_EQ(0, ret) {
TH_LOG("Failed to install filter!");
}
EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
TH_LOG("Failed to read() sync from parent");
}
EXPECT_EQ('.', buf) {
TH_LOG("Failed to get sync data from read()");
}
/* Start nanosleep to be interrupted. */
timeout.tv_sec = 1;
errno = 0;
EXPECT_EQ(0, nanosleep(&timeout, NULL)) {
TH_LOG("Call to nanosleep() failed (errno %d)", errno);
}
/* Read final sync from parent. */
EXPECT_EQ(1, read(pipefd[0], &buf, 1)) {
TH_LOG("Failed final read() from parent");
}
EXPECT_EQ('!', buf) {
TH_LOG("Failed to get final data from read()");
}
/* Directly report the status of our test harness results. */
syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS
: EXIT_FAILURE);
}
EXPECT_EQ(0, close(pipefd[0]));
/* Attach to child, setup options, and release. */
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL,
PTRACE_O_TRACESECCOMP));
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
ASSERT_EQ(1, write(pipefd[1], ".", 1));
/* Wait for nanosleep() to start. */
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
ASSERT_EQ(0x100, msg);
ret = get_syscall(_metadata, child_pid);
EXPECT_TRUE(ret == __NR_nanosleep || ret == __NR_clock_nanosleep);
/* Might as well check siginfo for sanity while we're here. */
ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
ASSERT_EQ(SIGTRAP, info.si_signo);
ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code);
EXPECT_EQ(0, info.si_errno);
EXPECT_EQ(getuid(), info.si_uid);
/* Verify signal delivery came from child (seccomp-triggered). */
EXPECT_EQ(child_pid, info.si_pid);
/* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */
ASSERT_EQ(0, kill(child_pid, SIGSTOP));
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
/*
* There is no siginfo on SIGSTOP any more, so we can't verify
* signal delivery came from parent now (getpid() == info.si_pid).
* https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com
* At least verify the SIGSTOP via PTRACE_GETSIGINFO.
*/
EXPECT_EQ(SIGSTOP, info.si_signo);
/* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
ASSERT_EQ(0, kill(child_pid, SIGCONT));
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGCONT, WSTOPSIG(status));
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
/* Wait for restart_syscall() to start. */
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGTRAP, WSTOPSIG(status));
ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16));
ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg));
ASSERT_EQ(0x200, msg);
ret = get_syscall(_metadata, child_pid);
#if defined(__arm__)
/*
* FIXME:
* - native ARM registers do NOT expose true syscall.
* - compat ARM registers on ARM64 DO expose true syscall.
*/
ASSERT_EQ(0, uname(&utsbuf));
if (strncmp(utsbuf.machine, "arm", 3) == 0) {
EXPECT_EQ(__NR_nanosleep, ret);
} else
#endif
{
EXPECT_EQ(__NR_restart_syscall, ret);
}
/* Write again to end test. */
ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0));
ASSERT_EQ(1, write(pipefd[1], "!", 1));
EXPECT_EQ(0, close(pipefd[1]));
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
if (WIFSIGNALED(status) || WEXITSTATUS(status))
_metadata->passed = 0;
}
TEST_SIGNAL(filter_flag_log, SIGSYS)
{
struct sock_filter allow_filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_filter kill_filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog allow_prog = {
.len = (unsigned short)ARRAY_SIZE(allow_filter),
.filter = allow_filter,
};
struct sock_fprog kill_prog = {
.len = (unsigned short)ARRAY_SIZE(kill_filter),
.filter = kill_filter,
};
long ret;
pid_t parent = getppid();
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret);
/* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */
ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG,
&allow_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
EXPECT_NE(0, ret) {
TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!");
}
EXPECT_EQ(EINVAL, errno) {
TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!");
}
/* Verify that a simple, permissive filter can be added with no flags */
ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog);
EXPECT_EQ(0, ret);
/* See if the same filter can be added with the FILTER_FLAG_LOG flag */
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
&allow_prog);
ASSERT_NE(EINVAL, errno) {
TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!");
}
EXPECT_EQ(0, ret);
/* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */
ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG,
&kill_prog);
EXPECT_EQ(0, ret);
EXPECT_EQ(parent, syscall(__NR_getppid));
/* getpid() should never return. */
EXPECT_EQ(0, syscall(__NR_getpid));
}
TEST(get_action_avail)
{
__u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP,
SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE,
SECCOMP_RET_LOG, SECCOMP_RET_ALLOW };
__u32 unknown_action = 0x10000000U;
int i;
long ret;
ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
}
ASSERT_NE(EINVAL, errno) {
TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!");
}
EXPECT_EQ(ret, 0);
for (i = 0; i < ARRAY_SIZE(actions); i++) {
ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]);
EXPECT_EQ(ret, 0) {
TH_LOG("Expected action (0x%X) not available!",
actions[i]);
}
}
/* Check that an unknown action is handled properly (EOPNOTSUPP) */
ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action);
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno, EOPNOTSUPP);
}
TEST(get_metadata)
{
pid_t pid;
int pipefd[2];
char buf;
struct seccomp_metadata md;
long ret;
/* Only real root can get metadata. */
if (geteuid()) {
SKIP(return, "get_metadata requires real root");
return;
}
ASSERT_EQ(0, pipe(pipefd));
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
/* one with log, one without */
EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER,
SECCOMP_FILTER_FLAG_LOG, &prog));
EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog));
EXPECT_EQ(0, close(pipefd[0]));
ASSERT_EQ(1, write(pipefd[1], "1", 1));
ASSERT_EQ(0, close(pipefd[1]));
while (1)
sleep(100);
}
ASSERT_EQ(0, close(pipefd[1]));
ASSERT_EQ(1, read(pipefd[0], &buf, 1));
ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid));
ASSERT_EQ(pid, waitpid(pid, NULL, 0));
/* Past here must not use ASSERT or child process is never killed. */
md.filter_off = 0;
errno = 0;
ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
EXPECT_EQ(sizeof(md), ret) {
if (errno == EINVAL)
SKIP(goto skip, "Kernel does not support PTRACE_SECCOMP_GET_METADATA (missing CONFIG_CHECKPOINT_RESTORE?)");
}
EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG);
EXPECT_EQ(md.filter_off, 0);
md.filter_off = 1;
ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md);
EXPECT_EQ(sizeof(md), ret);
EXPECT_EQ(md.flags, 0);
EXPECT_EQ(md.filter_off, 1);
skip:
ASSERT_EQ(0, kill(pid, SIGKILL));
}
static int user_notif_syscall(int nr, unsigned int flags)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD|BPF_W|BPF_ABS,
offsetof(struct seccomp_data, nr)),
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, nr, 0, 1),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_USER_NOTIF),
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog);
}
#define USER_NOTIF_MAGIC INT_MAX
TEST(user_notification_basic)
{
pid_t pid;
long ret;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
struct pollfd pollfd;
struct sock_filter filter[] = {
BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW),
};
struct sock_fprog prog = {
.len = (unsigned short)ARRAY_SIZE(filter),
.filter = filter,
};
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
pid = fork();
ASSERT_GE(pid, 0);
/* Check that we get -ENOSYS with no listener attached */
if (pid == 0) {
if (user_notif_syscall(__NR_getppid, 0) < 0)
exit(1);
ret = syscall(__NR_getppid);
exit(ret >= 0 || errno != ENOSYS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/* Add some no-op filters for grins. */
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0);
/* Check that the basic notification machinery works */
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/* Installing a second listener in the chain should EBUSY */
EXPECT_EQ(user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER),
-1);
EXPECT_EQ(errno, EBUSY);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLIN);
/* Test that we can't pass garbage to the kernel. */
memset(&req, 0, sizeof(req));
req.pid = -1;
errno = 0;
ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req);
EXPECT_EQ(-1, ret);
EXPECT_EQ(EINVAL, errno);
if (ret) {
req.pid = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
}
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLOUT);
EXPECT_EQ(req.data.nr, __NR_getppid);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
/* check that we make sure flags == 0 */
resp.flags = 1;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, EINVAL);
resp.flags = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(user_notification_with_tsync)
{
int ret;
unsigned int flags;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* these were exclusive */
flags = SECCOMP_FILTER_FLAG_NEW_LISTENER |
SECCOMP_FILTER_FLAG_TSYNC;
ASSERT_EQ(-1, user_notif_syscall(__NR_getppid, flags));
ASSERT_EQ(EINVAL, errno);
/* but now they're not */
flags |= SECCOMP_FILTER_FLAG_TSYNC_ESRCH;
ret = user_notif_syscall(__NR_getppid, flags);
close(ret);
ASSERT_LE(0, ret);
}
TEST(user_notification_kill_in_middle)
{
pid_t pid;
long ret;
int listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/*
* Check that nothing bad happens when we kill the task in the middle
* of a syscall.
*/
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
ret = syscall(__NR_getppid);
exit(ret != USER_NOTIF_MAGIC);
}
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0);
EXPECT_EQ(kill(pid, SIGKILL), 0);
EXPECT_EQ(waitpid(pid, NULL, 0), pid);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1);
resp.id = req.id;
ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp);
EXPECT_EQ(ret, -1);
EXPECT_EQ(errno, ENOENT);
}
static int handled = -1;
static void signal_handler(int signal)
{
if (write(handled, "c", 1) != 1)
perror("write from signal");
}
TEST(user_notification_signal)
{
pid_t pid;
long ret;
int status, listener, sk_pair[2];
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
char c;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0);
listener = user_notif_syscall(__NR_gettid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
close(sk_pair[0]);
handled = sk_pair[1];
if (signal(SIGUSR1, signal_handler) == SIG_ERR) {
perror("signal");
exit(1);
}
/*
* ERESTARTSYS behavior is a bit hard to test, because we need
* to rely on a signal that has not yet been handled. Let's at
* least check that the error code gets propagated through, and
* hope that it doesn't break when there is actually a signal :)
*/
ret = syscall(__NR_gettid);
exit(!(ret == -1 && errno == 512));
}
close(sk_pair[1]);
memset(&req, 0, sizeof(req));
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(kill(pid, SIGUSR1), 0);
/*
* Make sure the signal really is delivered, which means we're not
* stuck in the user notification code any more and the notification
* should be dead.
*/
EXPECT_EQ(read(sk_pair[0], &c, 1), 1);
resp.id = req.id;
resp.error = -EPERM;
resp.val = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, ENOENT);
memset(&req, 0, sizeof(req));
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
resp.id = req.id;
resp.error = -512; /* -ERESTARTSYS */
resp.val = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(user_notification_closed_listener)
{
pid_t pid;
long ret;
int status, listener;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
/*
* Check that we get an ENOSYS when the listener is closed.
*/
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
close(listener);
ret = syscall(__NR_getppid);
exit(ret != -1 && errno != ENOSYS);
}
close(listener);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
/*
* Check that a pid in a child namespace still shows up as valid in ours.
*/
TEST(user_notification_child_pid_ns)
{
pid_t pid;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0) {
if (errno == EINVAL)
SKIP(return, "kernel missing CLONE_NEWUSER support");
};
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(req.pid, pid);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
close(listener);
}
/*
* Check that a pid in a sibling (i.e. unrelated) namespace shows up as 0, i.e.
* invalid.
*/
TEST(user_notification_sibling_pid_ns)
{
pid_t pid, pid2;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
ASSERT_EQ(unshare(CLONE_NEWPID), 0);
pid2 = fork();
ASSERT_GE(pid2, 0);
if (pid2 == 0)
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
exit(WEXITSTATUS(status));
}
/* Create the sibling ns, and sibling in it. */
ASSERT_EQ(unshare(CLONE_NEWPID), 0) {
if (errno == EPERM)
SKIP(return, "CLONE_NEWPID requires CAP_SYS_ADMIN");
}
ASSERT_EQ(errno, 0);
pid2 = fork();
ASSERT_GE(pid2, 0);
if (pid2 == 0) {
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
/*
* The pid should be 0, i.e. the task is in some namespace that
* we can't "see".
*/
EXPECT_EQ(req.pid, 0);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
exit(0);
}
close(listener);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
EXPECT_EQ(waitpid(pid2, &status, 0), pid2);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(user_notification_fault_recv)
{
pid_t pid;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
ASSERT_EQ(unshare(CLONE_NEWUSER), 0);
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
/* Do a bad recv() */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1);
EXPECT_EQ(errno, EFAULT);
/* We should still be able to receive this notification, though. */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
EXPECT_EQ(req.pid, pid);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
}
TEST(seccomp_get_notif_sizes)
{
struct seccomp_notif_sizes sizes;
ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0);
EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif));
EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp));
}
TEST(user_notification_continue)
{
pid_t pid;
long ret;
int status, listener;
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
struct pollfd pollfd;
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
int dup_fd, pipe_fds[2];
pid_t self;
ASSERT_GE(pipe(pipe_fds), 0);
dup_fd = dup(pipe_fds[0]);
ASSERT_GE(dup_fd, 0);
EXPECT_NE(pipe_fds[0], dup_fd);
self = getpid();
ASSERT_EQ(filecmp(self, self, pipe_fds[0], dup_fd), 0);
exit(0);
}
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLIN);
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
pollfd.fd = listener;
pollfd.events = POLLIN | POLLOUT;
EXPECT_GT(poll(&pollfd, 1, -1), 0);
EXPECT_EQ(pollfd.revents, POLLOUT);
EXPECT_EQ(req.data.nr, __NR_dup);
resp.id = req.id;
resp.flags = SECCOMP_USER_NOTIF_FLAG_CONTINUE;
/*
* Verify that setting SECCOMP_USER_NOTIF_FLAG_CONTINUE enforces other
* args be set to 0.
*/
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, EINVAL);
resp.error = USER_NOTIF_MAGIC;
resp.val = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1);
EXPECT_EQ(errno, EINVAL);
resp.error = 0;
resp.val = 0;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0) {
if (errno == EINVAL)
SKIP(goto skip, "Kernel does not support SECCOMP_USER_NOTIF_FLAG_CONTINUE");
}
skip:
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status)) {
if (WEXITSTATUS(status) == 2) {
SKIP(return, "Kernel does not support kcmp() syscall");
return;
}
}
}
TEST(user_notification_filter_empty)
{
pid_t pid;
long ret;
int status;
struct pollfd pollfd;
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
int listener;
listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER);
if (listener < 0)
_exit(EXIT_FAILURE);
if (dup2(listener, 200) != 200)
_exit(EXIT_FAILURE);
close(listener);
_exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/*
* The seccomp filter has become unused so we should be notified once
* the kernel gets around to cleaning up task struct.
*/
pollfd.fd = 200;
pollfd.events = POLLHUP;
EXPECT_GT(poll(&pollfd, 1, 2000), 0);
EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
}
static void *do_thread(void *data)
{
return NULL;
}
TEST(user_notification_filter_empty_threaded)
{
pid_t pid;
long ret;
int status;
struct pollfd pollfd;
struct __clone_args args = {
.flags = CLONE_FILES,
.exit_signal = SIGCHLD,
};
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
pid = sys_clone3(&args, sizeof(args));
ASSERT_GE(pid, 0);
if (pid == 0) {
pid_t pid1, pid2;
int listener, status;
pthread_t thread;
listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER);
if (listener < 0)
_exit(EXIT_FAILURE);
if (dup2(listener, 200) != 200)
_exit(EXIT_FAILURE);
close(listener);
pid1 = fork();
if (pid1 < 0)
_exit(EXIT_FAILURE);
if (pid1 == 0)
_exit(EXIT_SUCCESS);
pid2 = fork();
if (pid2 < 0)
_exit(EXIT_FAILURE);
if (pid2 == 0)
_exit(EXIT_SUCCESS);
if (pthread_create(&thread, NULL, do_thread, NULL) ||
pthread_join(thread, NULL))
_exit(EXIT_FAILURE);
if (pthread_create(&thread, NULL, do_thread, NULL) ||
pthread_join(thread, NULL))
_exit(EXIT_FAILURE);
if (waitpid(pid1, &status, 0) != pid1 || !WIFEXITED(status) ||
WEXITSTATUS(status))
_exit(EXIT_FAILURE);
if (waitpid(pid2, &status, 0) != pid2 || !WIFEXITED(status) ||
WEXITSTATUS(status))
_exit(EXIT_FAILURE);
exit(EXIT_SUCCESS);
}
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
/*
* The seccomp filter has become unused so we should be notified once
* the kernel gets around to cleaning up task struct.
*/
pollfd.fd = 200;
pollfd.events = POLLHUP;
EXPECT_GT(poll(&pollfd, 1, 2000), 0);
EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0);
}
TEST(user_notification_addfd)
{
pid_t pid;
long ret;
int status, listener, memfd, fd;
struct seccomp_notif_addfd addfd = {};
struct seccomp_notif_addfd_small small = {};
struct seccomp_notif_addfd_big big = {};
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
/* 100 ms */
struct timespec delay = { .tv_nsec = 100000000 };
memfd = memfd_create("test", 0);
ASSERT_GE(memfd, 0);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* Check that the basic notification machinery works */
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0) {
if (syscall(__NR_getppid) != USER_NOTIF_MAGIC)
exit(1);
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
}
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
addfd.srcfd = memfd;
addfd.newfd = 0;
addfd.id = req.id;
addfd.flags = 0x0;
/* Verify bad newfd_flags cannot be set */
addfd.newfd_flags = ~O_CLOEXEC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EINVAL);
addfd.newfd_flags = O_CLOEXEC;
/* Verify bad flags cannot be set */
addfd.flags = 0xff;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EINVAL);
addfd.flags = 0;
/* Verify that remote_fd cannot be set without setting flags */
addfd.newfd = 1;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EINVAL);
addfd.newfd = 0;
/* Verify small size cannot be set */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_SMALL, &small), -1);
EXPECT_EQ(errno, EINVAL);
/* Verify we can't send bits filled in unknown buffer area */
memset(&big, 0xAA, sizeof(big));
big.addfd = addfd;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big), -1);
EXPECT_EQ(errno, E2BIG);
/* Verify we can set an arbitrary remote fd */
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
EXPECT_GE(fd, 0);
EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
/* Verify we can set an arbitrary remote fd with large size */
memset(&big, 0x0, sizeof(big));
big.addfd = addfd;
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big);
EXPECT_GE(fd, 0);
/* Verify we can set a specific remote fd */
addfd.newfd = 42;
addfd.flags = SECCOMP_ADDFD_FLAG_SETFD;
fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd);
EXPECT_EQ(fd, 42);
EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0);
/* Resume syscall */
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
/*
* This sets the ID of the ADD FD to the last request plus 1. The
* notification ID increments 1 per notification.
*/
addfd.id = req.id + 1;
/* This spins until the underlying notification is generated */
while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 &&
errno != -EINPROGRESS)
nanosleep(&delay, NULL);
memset(&req, 0, sizeof(req));
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
ASSERT_EQ(addfd.id, req.id);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
/* Wait for child to finish. */
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
close(memfd);
}
TEST(user_notification_addfd_rlimit)
{
pid_t pid;
long ret;
int status, listener, memfd;
struct seccomp_notif_addfd addfd = {};
struct seccomp_notif req = {};
struct seccomp_notif_resp resp = {};
const struct rlimit lim = {
.rlim_cur = 0,
.rlim_max = 0,
};
memfd = memfd_create("test", 0);
ASSERT_GE(memfd, 0);
ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
ASSERT_EQ(0, ret) {
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
/* Check that the basic notification machinery works */
listener = user_notif_syscall(__NR_getppid,
SECCOMP_FILTER_FLAG_NEW_LISTENER);
ASSERT_GE(listener, 0);
pid = fork();
ASSERT_GE(pid, 0);
if (pid == 0)
exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC);
ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0);
ASSERT_EQ(prlimit(pid, RLIMIT_NOFILE, &lim, NULL), 0);
addfd.srcfd = memfd;
addfd.newfd_flags = O_CLOEXEC;
addfd.newfd = 0;
addfd.id = req.id;
addfd.flags = 0;
/* Should probably spot check /proc/sys/fs/file-nr */
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EMFILE);
addfd.newfd = 100;
addfd.flags = SECCOMP_ADDFD_FLAG_SETFD;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1);
EXPECT_EQ(errno, EBADF);
resp.id = req.id;
resp.error = 0;
resp.val = USER_NOTIF_MAGIC;
EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0);
/* Wait for child to finish. */
EXPECT_EQ(waitpid(pid, &status, 0), pid);
EXPECT_EQ(true, WIFEXITED(status));
EXPECT_EQ(0, WEXITSTATUS(status));
close(memfd);
}
/*
* TODO:
* - expand NNP testing
* - better arch-specific TRACE and TRAP handlers.
* - endianness checking when appropriate
* - 64-bit arg prodding
* - arch value testing (x86 modes especially)
* - verify that FILTER_FLAG_LOG filters generate log messages
* - verify that RET_LOG generates log messages
*/
TEST_HARNESS_MAIN
| gpl-2.0 |
Laniax/TC-Zombie-Mod | src/server/game/Movement/MovementGenerators/IdleMovementGenerator.cpp | 25 | 3064 | /*
* Copyright (C) 2008-2011 TrinityCore <http://www.trinitycore.org/>
* Copyright (C) 2005-2009 MaNGOS <http://getmangos.com/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "IdleMovementGenerator.h"
#include "CreatureAI.h"
#include "Creature.h"
IdleMovementGenerator si_idleMovement;
// StopMoving is needed to make unit stop if its last movement generator expires
// But it should not be sent otherwise there are many redundent packets
void IdleMovementGenerator::Initialize(Unit &owner)
{
Reset(owner);
}
void
IdleMovementGenerator::Reset(Unit& owner)
{
if (owner.HasUnitState(UNIT_STAT_MOVE))
owner.StopMoving();
}
void RotateMovementGenerator::Initialize(Unit& owner)
{
if (owner.HasUnitState(UNIT_STAT_MOVE))
owner.StopMoving();
if (owner.getVictim())
owner.SetInFront(owner.getVictim());
owner.AddUnitState(UNIT_STAT_ROTATING);
owner.AttackStop();
}
bool RotateMovementGenerator::Update(Unit& owner, const uint32 diff)
{
float angle = owner.GetOrientation();
if (m_direction == ROTATE_DIRECTION_LEFT)
{
angle += (float)diff * static_cast<float>(M_PI * 2) / m_maxDuration;
while (angle >= static_cast<float>(M_PI * 2)) angle -= static_cast<float>(M_PI * 2);
}
else
{
angle -= (float)diff * static_cast<float>(M_PI * 2) / m_maxDuration;
while (angle < 0) angle += static_cast<float>(M_PI * 2);
}
owner.SetOrientation(angle);
owner.SendMovementFlagUpdate(); // this is a hack. we do not have anything correct to send in the beginning
if (m_duration > diff)
m_duration -= diff;
else
return false;
return true;
}
void RotateMovementGenerator::Finalize(Unit &unit)
{
unit.ClearUnitState(UNIT_STAT_ROTATING);
if (unit.GetTypeId() == TYPEID_UNIT)
unit.ToCreature()->AI()->MovementInform(ROTATE_MOTION_TYPE, 0);
}
void
DistractMovementGenerator::Initialize(Unit& owner)
{
owner.AddUnitState(UNIT_STAT_DISTRACTED);
}
void
DistractMovementGenerator::Finalize(Unit& owner)
{
owner.ClearUnitState(UNIT_STAT_DISTRACTED);
}
bool
DistractMovementGenerator::Update(Unit& /*owner*/, const uint32 time_diff)
{
if (time_diff > m_timer)
return false;
m_timer -= time_diff;
return true;
}
void
AssistanceDistractMovementGenerator::Finalize(Unit &unit)
{
unit.ClearUnitState(UNIT_STAT_DISTRACTED);
unit.ToCreature()->SetReactState(REACT_AGGRESSIVE);
}
| gpl-2.0 |
rlnelson-git/linux-nvme | kernel/rcu/tiny.c | 281 | 10721 | /*
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright IBM Corporation, 2008
*
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
*/
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/cpu.h>
#include <linux/prefetch.h>
#include <linux/ftrace_event.h>
#ifdef CONFIG_RCU_TRACE
#include <trace/events/rcu.h>
#endif /* #else #ifdef CONFIG_RCU_TRACE */
#include "rcu.h"
/* Forward declarations for tiny_plugin.h. */
struct rcu_ctrlblk;
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
static void rcu_process_callbacks(struct softirq_action *unused);
static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp);
static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
#include "tiny_plugin.h"
/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */
static void rcu_idle_enter_common(long long newval)
{
if (newval) {
RCU_TRACE(trace_rcu_dyntick(TPS("--="),
rcu_dynticks_nesting, newval));
rcu_dynticks_nesting = newval;
return;
}
RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
rcu_dynticks_nesting, newval));
if (!is_idle_task(current)) {
struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
rcu_dynticks_nesting, newval));
ftrace_dump(DUMP_ALL);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
idle->pid, idle->comm); /* must be idle task! */
}
rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
barrier();
rcu_dynticks_nesting = newval;
}
/*
* Enter idle, which is an extended quiescent state if we have fully
* entered that mode (i.e., if the new value of dynticks_nesting is zero).
*/
void rcu_idle_enter(void)
{
unsigned long flags;
long long newval;
local_irq_save(flags);
WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
DYNTICK_TASK_NEST_VALUE)
newval = 0;
else
newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
rcu_idle_enter_common(newval);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);
/*
* Exit an interrupt handler towards idle.
*/
void rcu_irq_exit(void)
{
unsigned long flags;
long long newval;
local_irq_save(flags);
newval = rcu_dynticks_nesting - 1;
WARN_ON_ONCE(newval < 0);
rcu_idle_enter_common(newval);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_irq_exit);
/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */
static void rcu_idle_exit_common(long long oldval)
{
if (oldval) {
RCU_TRACE(trace_rcu_dyntick(TPS("++="),
oldval, rcu_dynticks_nesting));
return;
}
RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
if (!is_idle_task(current)) {
struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
oldval, rcu_dynticks_nesting));
ftrace_dump(DUMP_ALL);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
idle->pid, idle->comm); /* must be idle task! */
}
}
/*
* Exit idle, so that we are no longer in an extended quiescent state.
*/
void rcu_idle_exit(void)
{
unsigned long flags;
long long oldval;
local_irq_save(flags);
oldval = rcu_dynticks_nesting;
WARN_ON_ONCE(rcu_dynticks_nesting < 0);
if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
else
rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
rcu_idle_exit_common(oldval);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);
/*
* Enter an interrupt handler, moving away from idle.
*/
void rcu_irq_enter(void)
{
unsigned long flags;
long long oldval;
local_irq_save(flags);
oldval = rcu_dynticks_nesting;
rcu_dynticks_nesting++;
WARN_ON_ONCE(rcu_dynticks_nesting == 0);
rcu_idle_exit_common(oldval);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_irq_enter);
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
/*
* Test whether RCU thinks that the current CPU is idle.
*/
bool notrace __rcu_is_watching(void)
{
return rcu_dynticks_nesting;
}
EXPORT_SYMBOL(__rcu_is_watching);
#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
/*
* Test whether the current CPU was interrupted from idle. Nested
* interrupts don't count, we must be running at the first interrupt
* level.
*/
static int rcu_is_cpu_rrupt_from_idle(void)
{
return rcu_dynticks_nesting <= 1;
}
/*
* Helper function for rcu_sched_qs() and rcu_bh_qs().
* Also irqs are disabled to avoid confusion due to interrupt handlers
* invoking call_rcu().
*/
static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
{
RCU_TRACE(reset_cpu_stall_ticks(rcp));
if (rcp->rcucblist != NULL &&
rcp->donetail != rcp->curtail) {
rcp->donetail = rcp->curtail;
return 1;
}
return 0;
}
/*
* Record an rcu quiescent state. And an rcu_bh quiescent state while we
* are at it, given that any rcu quiescent state is also an rcu_bh
* quiescent state. Use "+" instead of "||" to defeat short circuiting.
*/
void rcu_sched_qs(int cpu)
{
unsigned long flags;
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
rcu_qsctr_help(&rcu_bh_ctrlblk))
raise_softirq(RCU_SOFTIRQ);
local_irq_restore(flags);
}
/*
* Record an rcu_bh quiescent state.
*/
void rcu_bh_qs(int cpu)
{
unsigned long flags;
local_irq_save(flags);
if (rcu_qsctr_help(&rcu_bh_ctrlblk))
raise_softirq(RCU_SOFTIRQ);
local_irq_restore(flags);
}
/*
* Check to see if the scheduling-clock interrupt came from an extended
* quiescent state, and, if so, tell RCU about it. This function must
* be called from hardirq context. It is normally called from the
* scheduling-clock interrupt.
*/
void rcu_check_callbacks(int cpu, int user)
{
RCU_TRACE(check_cpu_stalls());
if (user || rcu_is_cpu_rrupt_from_idle())
rcu_sched_qs(cpu);
else if (!in_softirq())
rcu_bh_qs(cpu);
}
/*
* Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
* whose grace period has elapsed.
*/
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
const char *rn = NULL;
struct rcu_head *next, *list;
unsigned long flags;
RCU_TRACE(int cb_count = 0);
/* If no RCU callbacks ready to invoke, just return. */
if (&rcp->rcucblist == rcp->donetail) {
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
!!ACCESS_ONCE(rcp->rcucblist),
need_resched(),
is_idle_task(current),
false));
return;
}
/* Move the ready-to-invoke callbacks to a local list. */
local_irq_save(flags);
RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
list = rcp->rcucblist;
rcp->rcucblist = *rcp->donetail;
*rcp->donetail = NULL;
if (rcp->curtail == rcp->donetail)
rcp->curtail = &rcp->rcucblist;
rcp->donetail = &rcp->rcucblist;
local_irq_restore(flags);
/* Invoke the callbacks on the local list. */
RCU_TRACE(rn = rcp->name);
while (list) {
next = list->next;
prefetch(next);
debug_rcu_head_unqueue(list);
local_bh_disable();
__rcu_reclaim(rn, list);
local_bh_enable();
list = next;
RCU_TRACE(cb_count++);
}
RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
RCU_TRACE(trace_rcu_batch_end(rcp->name,
cb_count, 0, need_resched(),
is_idle_task(current),
false));
}
static void rcu_process_callbacks(struct softirq_action *unused)
{
__rcu_process_callbacks(&rcu_sched_ctrlblk);
__rcu_process_callbacks(&rcu_bh_ctrlblk);
}
/*
* Wait for a grace period to elapse. But it is illegal to invoke
* synchronize_sched() from within an RCU read-side critical section.
* Therefore, any legal call to synchronize_sched() is a quiescent
* state, and so on a UP system, synchronize_sched() need do nothing.
* Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
* benefits of doing might_sleep() to reduce latency.)
*
* Cool, huh? (Due to Josh Triplett.)
*
* But we want to make this a static inline later. The cond_resched()
* currently makes this problematic.
*/
void synchronize_sched(void)
{
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
!lock_is_held(&rcu_lock_map) &&
!lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched() in RCU read-side critical section");
cond_resched();
}
EXPORT_SYMBOL_GPL(synchronize_sched);
/*
* Helper function for call_rcu() and call_rcu_bh().
*/
static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp)
{
unsigned long flags;
debug_rcu_head_queue(head);
head->func = func;
head->next = NULL;
local_irq_save(flags);
*rcp->curtail = head;
rcp->curtail = &head->next;
RCU_TRACE(rcp->qlen++);
local_irq_restore(flags);
}
/*
* Post an RCU callback to be invoked after the end of an RCU-sched grace
* period. But since we have but one CPU, that would be after any
* quiescent state.
*/
void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_sched_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
/*
* Post an RCU bottom-half callback to be invoked after any subsequent
* quiescent state.
*/
void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
{
__call_rcu(head, func, &rcu_bh_ctrlblk);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
void rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
| gpl-2.0 |
h8rift/android_kernel_htc_msm8960-evita-1_85 | drivers/input/keyboard/pmic8xxx-keypad.c | 281 | 20119 | /* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/mfd/pm8xxx/core.h>
#include <linux/mfd/pm8xxx/gpio.h>
#include <linux/input/pmic8xxx-keypad.h>
#define PM8XXX_MAX_ROWS 18
#define PM8XXX_MAX_COLS 8
#define PM8XXX_ROW_SHIFT 3
#define PM8XXX_MATRIX_MAX_SIZE (PM8XXX_MAX_ROWS * PM8XXX_MAX_COLS)
#define PM8XXX_MIN_ROWS 5
#define PM8XXX_MIN_COLS 5
#define MAX_SCAN_DELAY 128
#define MIN_SCAN_DELAY 1
/* in nanoseconds */
#define MAX_ROW_HOLD_DELAY 122000
#define MIN_ROW_HOLD_DELAY 30500
#define MAX_DEBOUNCE_TIME 20
#define MIN_DEBOUNCE_TIME 5
#define KEYP_CTRL 0x148
#define KEYP_CTRL_EVNTS BIT(0)
#define KEYP_CTRL_EVNTS_MASK 0x3
#define KEYP_CTRL_SCAN_COLS_SHIFT 5
#define KEYP_CTRL_SCAN_COLS_MIN 5
#define KEYP_CTRL_SCAN_COLS_BITS 0x3
#define KEYP_CTRL_SCAN_ROWS_SHIFT 2
#define KEYP_CTRL_SCAN_ROWS_MIN 5
#define KEYP_CTRL_SCAN_ROWS_BITS 0x7
#define KEYP_CTRL_KEYP_EN BIT(7)
#define KEYP_SCAN 0x149
#define KEYP_SCAN_READ_STATE BIT(0)
#define KEYP_SCAN_DBOUNCE_SHIFT 1
#define KEYP_SCAN_PAUSE_SHIFT 3
#define KEYP_SCAN_ROW_HOLD_SHIFT 6
#define KEYP_TEST 0x14A
#define KEYP_TEST_CLEAR_RECENT_SCAN BIT(6)
#define KEYP_TEST_CLEAR_OLD_SCAN BIT(5)
#define KEYP_TEST_READ_RESET BIT(4)
#define KEYP_TEST_DTEST_EN BIT(3)
#define KEYP_TEST_ABORT_READ BIT(0)
#define KEYP_TEST_DBG_SELECT_SHIFT 1
/* bits of these registers represent
* '0' for key press
* '1' for key release
*/
#define KEYP_RECENT_DATA 0x14B
#define KEYP_OLD_DATA 0x14C
#define KEYP_CLOCK_FREQ 32768
/**
* struct pmic8xxx_kp - internal keypad data structure
* @pdata - keypad platform data pointer
* @input - input device pointer for keypad
* @key_sense_irq - key press/release irq number
* @key_stuck_irq - key stuck notification irq number
* @keycodes - array to hold the key codes
* @dev - parent device pointer
* @keystate - present key press/release state
* @stuckstate - present state when key stuck irq
* @ctrl_reg - control register value
*/
struct pmic8xxx_kp {
const struct pm8xxx_keypad_platform_data *pdata;
struct input_dev *input;
int key_sense_irq;
int key_stuck_irq;
unsigned short keycodes[PM8XXX_MATRIX_MAX_SIZE];
struct device *dev;
u16 keystate[PM8XXX_MAX_ROWS];
u16 stuckstate[PM8XXX_MAX_ROWS];
u8 ctrl_reg;
};
static int pmic8xxx_kp_write_u8(struct pmic8xxx_kp *kp,
u8 data, u16 reg)
{
int rc;
rc = pm8xxx_writeb(kp->dev->parent, reg, data);
if (rc < 0)
dev_warn(kp->dev, "Error writing pmic8xxx: %X - ret %X\n",
reg, rc);
return rc;
}
static int pmic8xxx_kp_read(struct pmic8xxx_kp *kp,
u8 *data, u16 reg, unsigned num_bytes)
{
int rc;
rc = pm8xxx_read_buf(kp->dev->parent, reg, data, num_bytes);
if (rc < 0)
dev_warn(kp->dev, "Error reading pmic8xxx: %X - ret %X\n",
reg, rc);
return rc;
}
static int pmic8xxx_kp_read_u8(struct pmic8xxx_kp *kp,
u8 *data, u16 reg)
{
int rc;
rc = pmic8xxx_kp_read(kp, data, reg, 1);
if (rc < 0)
dev_warn(kp->dev, "Error reading pmic8xxx: %X - ret %X\n",
reg, rc);
return rc;
}
static u8 pmic8xxx_col_state(struct pmic8xxx_kp *kp, u8 col)
{
/* all keys pressed on that particular row? */
if (col == 0x00)
return 1 << kp->pdata->num_cols;
else
return col & ((1 << kp->pdata->num_cols) - 1);
}
/*
* Synchronous read protocol for RevB0 onwards:
*
* 1. Write '1' to ReadState bit in KEYP_SCAN register
* 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode
* synchronously
* 3. Read rows in old array first if events are more than one
* 4. Read rows in recent array
* 5. Wait 4*32KHz clocks
* 6. Write '0' to ReadState bit of KEYP_SCAN register so that hw can
* synchronously exit read mode.
*/
static int pmic8xxx_chk_sync_read(struct pmic8xxx_kp *kp)
{
int rc;
u8 scan_val;
rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN);
if (rc < 0) {
dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc);
return rc;
}
scan_val |= 0x1;
rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
if (rc < 0) {
dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
return rc;
}
/* 2 * 32KHz clocks */
udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
return rc;
}
static int pmic8xxx_kp_read_data(struct pmic8xxx_kp *kp, u16 *state,
u16 data_reg, int read_rows)
{
int rc, row;
u8 new_data[PM8XXX_MAX_ROWS];
rc = pmic8xxx_kp_read(kp, new_data, data_reg, read_rows);
if (rc)
return rc;
for (row = 0; row < kp->pdata->num_rows; row++) {
dev_dbg(kp->dev, "new_data[%d] = %d\n", row,
new_data[row]);
state[row] = pmic8xxx_col_state(kp, new_data[row]);
}
return rc;
}
static int pmic8xxx_kp_read_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
u16 *old_state)
{
int rc, read_rows;
u8 scan_val;
if (kp->pdata->num_rows < PM8XXX_MIN_ROWS)
read_rows = PM8XXX_MIN_ROWS;
else
read_rows = kp->pdata->num_rows;
pmic8xxx_chk_sync_read(kp);
if (old_state) {
rc = pmic8xxx_kp_read_data(kp, old_state, KEYP_OLD_DATA,
read_rows);
if (rc < 0) {
dev_err(kp->dev,
"Error reading KEYP_OLD_DATA, rc=%d\n", rc);
return rc;
}
}
rc = pmic8xxx_kp_read_data(kp, new_state, KEYP_RECENT_DATA,
read_rows);
if (rc < 0) {
dev_err(kp->dev,
"Error reading KEYP_RECENT_DATA, rc=%d\n", rc);
return rc;
}
/* 4 * 32KHz clocks */
udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN);
if (rc < 0) {
dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc);
return rc;
}
scan_val &= 0xFE;
rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
if (rc < 0)
dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
return rc;
}
static void __pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
u16 *old_state)
{
int row, col, code;
for (row = 0; row < kp->pdata->num_rows; row++) {
int bits_changed = new_state[row] ^ old_state[row];
if (!bits_changed)
continue;
for (col = 0; col < kp->pdata->num_cols; col++) {
if (!(bits_changed & (1 << col)))
continue;
dev_dbg(kp->dev, "key [%d:%d] %s\n", row, col,
!(new_state[row] & (1 << col)) ?
"pressed" : "released");
code = MATRIX_SCAN_CODE(row, col, PM8XXX_ROW_SHIFT);
input_event(kp->input, EV_MSC, MSC_SCAN, code);
input_report_key(kp->input,
kp->keycodes[code],
!(new_state[row] & (1 << col)));
input_sync(kp->input);
}
}
}
static bool pmic8xxx_detect_ghost_keys(struct pmic8xxx_kp *kp, u16 *new_state)
{
int row, found_first = -1;
u16 check, row_state;
check = 0;
for (row = 0; row < kp->pdata->num_rows; row++) {
row_state = (~new_state[row]) &
((1 << kp->pdata->num_cols) - 1);
if (hweight16(row_state) > 1) {
if (found_first == -1)
found_first = row;
if (check & row_state) {
dev_dbg(kp->dev, "detected ghost key on row[%d]"
" and row[%d]\n", found_first, row);
return true;
}
}
check |= row_state;
}
return false;
}
static int pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, unsigned int events)
{
u16 new_state[PM8XXX_MAX_ROWS];
u16 old_state[PM8XXX_MAX_ROWS];
int rc;
switch (events) {
case 0x1:
rc = pmic8xxx_kp_read_matrix(kp, new_state, NULL);
if (rc < 0)
return rc;
/* detecting ghost key is not an error */
if (pmic8xxx_detect_ghost_keys(kp, new_state))
return 0;
__pmic8xxx_kp_scan_matrix(kp, new_state, kp->keystate);
memcpy(kp->keystate, new_state, sizeof(new_state));
break;
case 0x3: /* two events - eventcounter is gray-coded */
rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
if (rc < 0)
return rc;
__pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate);
__pmic8xxx_kp_scan_matrix(kp, new_state, old_state);
memcpy(kp->keystate, new_state, sizeof(new_state));
break;
case 0x2:
dev_dbg(kp->dev, "Some key events were lost\n");
rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
if (rc < 0)
return rc;
__pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate);
__pmic8xxx_kp_scan_matrix(kp, new_state, old_state);
memcpy(kp->keystate, new_state, sizeof(new_state));
break;
default:
rc = -EINVAL;
}
return rc;
}
/*
* NOTE: We are reading recent and old data registers blindly
* whenever key-stuck interrupt happens, because events counter doesn't
* get updated when this interrupt happens due to key stuck doesn't get
* considered as key state change.
*
* We are not using old data register contents after they are being read
* because it might report the key which was pressed before the key being stuck
* as stuck key because it's pressed status is stored in the old data
* register.
*/
static irqreturn_t pmic8xxx_kp_stuck_irq(int irq, void *data)
{
u16 new_state[PM8XXX_MAX_ROWS];
u16 old_state[PM8XXX_MAX_ROWS];
int rc;
struct pmic8xxx_kp *kp = data;
rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
if (rc < 0) {
dev_err(kp->dev, "failed to read keypad matrix\n");
return IRQ_HANDLED;
}
__pmic8xxx_kp_scan_matrix(kp, new_state, kp->stuckstate);
return IRQ_HANDLED;
}
static irqreturn_t pmic8xxx_kp_irq(int irq, void *data)
{
struct pmic8xxx_kp *kp = data;
u8 ctrl_val, events;
int rc;
rc = pmic8xxx_kp_read(kp, &ctrl_val, KEYP_CTRL, 1);
if (rc < 0) {
dev_err(kp->dev, "failed to read keyp_ctrl register\n");
return IRQ_HANDLED;
}
events = ctrl_val & KEYP_CTRL_EVNTS_MASK;
rc = pmic8xxx_kp_scan_matrix(kp, events);
if (rc < 0)
dev_err(kp->dev, "failed to scan matrix\n");
return IRQ_HANDLED;
}
static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
{
int bits, rc, cycles;
u8 scan_val = 0, ctrl_val = 0;
static const u8 row_bits[] = {
0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7,
};
/* Find column bits */
if (kp->pdata->num_cols < KEYP_CTRL_SCAN_COLS_MIN)
bits = 0;
else
bits = kp->pdata->num_cols - KEYP_CTRL_SCAN_COLS_MIN;
ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) <<
KEYP_CTRL_SCAN_COLS_SHIFT;
/* Find row bits */
if (kp->pdata->num_rows < KEYP_CTRL_SCAN_ROWS_MIN)
bits = 0;
else
bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN];
ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT);
rc = pmic8xxx_kp_write_u8(kp, ctrl_val, KEYP_CTRL);
if (rc < 0) {
dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc);
return rc;
}
bits = (kp->pdata->debounce_ms / 5) - 1;
scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT);
bits = fls(kp->pdata->scan_delay_ms) - 1;
scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT);
/* Row hold time is a multiple of 32KHz cycles. */
cycles = (kp->pdata->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT);
rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
if (rc)
dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
return rc;
}
static int __devinit pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config)
{
int rc, i;
if (gpio_start < 0 || num_gpios < 0)
return -EINVAL;
for (i = 0; i < num_gpios; i++) {
rc = pm8xxx_gpio_config(gpio_start + i, gpio_config);
if (rc) {
dev_err(kp->dev, "%s: FAIL pm8xxx_gpio_config():"
"for PM GPIO [%d] rc=%d.\n",
__func__, gpio_start + i, rc);
return rc;
}
}
return 0;
}
static int pmic8xxx_kp_enable(struct pmic8xxx_kp *kp)
{
int rc;
kp->ctrl_reg |= KEYP_CTRL_KEYP_EN;
rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
if (rc < 0)
dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc);
return rc;
}
static int pmic8xxx_kp_disable(struct pmic8xxx_kp *kp)
{
int rc;
kp->ctrl_reg &= ~KEYP_CTRL_KEYP_EN;
rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
if (rc < 0)
return rc;
return rc;
}
static int pmic8xxx_kp_open(struct input_dev *dev)
{
struct pmic8xxx_kp *kp = input_get_drvdata(dev);
return pmic8xxx_kp_enable(kp);
}
static void pmic8xxx_kp_close(struct input_dev *dev)
{
struct pmic8xxx_kp *kp = input_get_drvdata(dev);
pmic8xxx_kp_disable(kp);
}
/*
* keypad controller should be initialized in the following sequence
* only, otherwise it might get into FSM stuck state.
*
* - Initialize keypad control parameters, like no. of rows, columns,
* timing values etc.,
* - configure rows and column gpios pull up/down.
* - set irq edge type.
* - enable the keypad controller.
*/
static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
{
const struct pm8xxx_keypad_platform_data *pdata =
dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap_data;
struct pmic8xxx_kp *kp;
int rc;
u8 ctrl_val;
struct pm_gpio kypd_drv = {
.direction = PM_GPIO_DIR_OUT,
.output_buffer = PM_GPIO_OUT_BUF_OPEN_DRAIN,
.output_value = 0,
.pull = PM_GPIO_PULL_NO,
.vin_sel = PM_GPIO_VIN_S4,
.out_strength = PM_GPIO_STRENGTH_LOW,
.function = PM_GPIO_FUNC_1,
.inv_int_pol = 1,
};
struct pm_gpio kypd_sns = {
.direction = PM_GPIO_DIR_IN,
.pull = PM_GPIO_PULL_UP_31P5,
.vin_sel = PM_GPIO_VIN_S4,
.out_strength = PM_GPIO_STRENGTH_NO,
.function = PM_GPIO_FUNC_NORMAL,
.inv_int_pol = 1,
};
if (!pdata || !pdata->num_cols || !pdata->num_rows ||
pdata->num_cols > PM8XXX_MAX_COLS ||
pdata->num_rows > PM8XXX_MAX_ROWS ||
pdata->num_cols < PM8XXX_MIN_COLS) {
dev_err(&pdev->dev, "invalid platform data\n");
return -EINVAL;
}
if (!pdata->scan_delay_ms ||
pdata->scan_delay_ms > MAX_SCAN_DELAY ||
pdata->scan_delay_ms < MIN_SCAN_DELAY ||
!is_power_of_2(pdata->scan_delay_ms)) {
dev_err(&pdev->dev, "invalid keypad scan time supplied\n");
return -EINVAL;
}
if (!pdata->row_hold_ns ||
pdata->row_hold_ns > MAX_ROW_HOLD_DELAY ||
pdata->row_hold_ns < MIN_ROW_HOLD_DELAY ||
((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) {
dev_err(&pdev->dev, "invalid keypad row hold time supplied\n");
return -EINVAL;
}
if (!pdata->debounce_ms ||
((pdata->debounce_ms % 5) != 0) ||
pdata->debounce_ms > MAX_DEBOUNCE_TIME ||
pdata->debounce_ms < MIN_DEBOUNCE_TIME) {
dev_err(&pdev->dev, "invalid debounce time supplied\n");
return -EINVAL;
}
keymap_data = pdata->keymap_data;
if (!keymap_data) {
dev_err(&pdev->dev, "no keymap data supplied\n");
return -EINVAL;
}
kp = kzalloc(sizeof(*kp), GFP_KERNEL);
if (!kp)
return -ENOMEM;
platform_set_drvdata(pdev, kp);
kp->pdata = pdata;
kp->dev = &pdev->dev;
kp->input = input_allocate_device();
if (!kp->input) {
dev_err(&pdev->dev, "unable to allocate input device\n");
rc = -ENOMEM;
goto err_alloc_device;
}
kp->key_sense_irq = platform_get_irq(pdev, 0);
if (kp->key_sense_irq < 0) {
dev_err(&pdev->dev, "unable to get keypad sense irq\n");
rc = -ENXIO;
goto err_get_irq;
}
kp->key_stuck_irq = platform_get_irq(pdev, 1);
if (kp->key_stuck_irq < 0) {
dev_err(&pdev->dev, "unable to get keypad stuck irq\n");
rc = -ENXIO;
goto err_get_irq;
}
kp->input->name = pdata->input_name ? : "PMIC8XXX keypad";
kp->input->phys = pdata->input_phys_device ? : "pmic8xxx_keypad/input0";
kp->input->dev.parent = &pdev->dev;
kp->input->id.bustype = BUS_I2C;
kp->input->id.version = 0x0001;
kp->input->id.product = 0x0001;
kp->input->id.vendor = 0x0001;
kp->input->evbit[0] = BIT_MASK(EV_KEY);
if (pdata->rep)
__set_bit(EV_REP, kp->input->evbit);
kp->input->keycode = kp->keycodes;
kp->input->keycodemax = PM8XXX_MATRIX_MAX_SIZE;
kp->input->keycodesize = sizeof(kp->keycodes);
kp->input->open = pmic8xxx_kp_open;
kp->input->close = pmic8xxx_kp_close;
matrix_keypad_build_keymap(keymap_data, PM8XXX_ROW_SHIFT,
kp->input->keycode, kp->input->keybit);
input_set_capability(kp->input, EV_MSC, MSC_SCAN);
input_set_drvdata(kp->input, kp);
/* initialize keypad state */
memset(kp->keystate, 0xff, sizeof(kp->keystate));
memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate));
rc = pmic8xxx_kpd_init(kp);
if (rc < 0) {
dev_err(&pdev->dev, "unable to initialize keypad controller\n");
goto err_get_irq;
}
rc = pmic8xxx_kp_config_gpio(pdata->cols_gpio_start,
pdata->num_cols, kp, &kypd_sns);
if (rc < 0) {
dev_err(&pdev->dev, "unable to configure keypad sense lines\n");
goto err_gpio_config;
}
rc = pmic8xxx_kp_config_gpio(pdata->rows_gpio_start,
pdata->num_rows, kp, &kypd_drv);
if (rc < 0) {
dev_err(&pdev->dev, "unable to configure keypad drive lines\n");
goto err_gpio_config;
}
rc = request_any_context_irq(kp->key_sense_irq, pmic8xxx_kp_irq,
IRQF_TRIGGER_RISING, "pmic-keypad", kp);
if (rc < 0) {
dev_err(&pdev->dev, "failed to request keypad sense irq\n");
goto err_get_irq;
}
rc = request_any_context_irq(kp->key_stuck_irq, pmic8xxx_kp_stuck_irq,
IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp);
if (rc < 0) {
dev_err(&pdev->dev, "failed to request keypad stuck irq\n");
goto err_req_stuck_irq;
}
rc = pmic8xxx_kp_read_u8(kp, &ctrl_val, KEYP_CTRL);
if (rc < 0) {
dev_err(&pdev->dev, "failed to read KEYP_CTRL register\n");
goto err_pmic_reg_read;
}
kp->ctrl_reg = ctrl_val;
rc = input_register_device(kp->input);
if (rc < 0) {
dev_err(&pdev->dev, "unable to register keypad input device\n");
goto err_pmic_reg_read;
}
device_init_wakeup(&pdev->dev, pdata->wakeup);
return 0;
err_pmic_reg_read:
free_irq(kp->key_stuck_irq, kp);
err_req_stuck_irq:
free_irq(kp->key_sense_irq, kp);
err_gpio_config:
err_get_irq:
input_free_device(kp->input);
err_alloc_device:
platform_set_drvdata(pdev, NULL);
kfree(kp);
return rc;
}
static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
{
struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
device_init_wakeup(&pdev->dev, 0);
free_irq(kp->key_stuck_irq, kp);
free_irq(kp->key_sense_irq, kp);
input_unregister_device(kp->input);
kfree(kp);
platform_set_drvdata(pdev, NULL);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int pmic8xxx_kp_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
struct input_dev *input_dev = kp->input;
if (device_may_wakeup(dev)) {
enable_irq_wake(kp->key_sense_irq);
} else {
mutex_lock(&input_dev->mutex);
if (input_dev->users)
pmic8xxx_kp_disable(kp);
mutex_unlock(&input_dev->mutex);
}
return 0;
}
static int pmic8xxx_kp_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
struct input_dev *input_dev = kp->input;
if (device_may_wakeup(dev)) {
disable_irq_wake(kp->key_sense_irq);
} else {
mutex_lock(&input_dev->mutex);
if (input_dev->users)
pmic8xxx_kp_enable(kp);
mutex_unlock(&input_dev->mutex);
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops,
pmic8xxx_kp_suspend, pmic8xxx_kp_resume);
static struct platform_driver pmic8xxx_kp_driver = {
.probe = pmic8xxx_kp_probe,
.remove = __devexit_p(pmic8xxx_kp_remove),
.driver = {
.name = PM8XXX_KEYPAD_DEV_NAME,
.owner = THIS_MODULE,
.pm = &pm8xxx_kp_pm_ops,
},
};
static int __init pmic8xxx_kp_init(void)
{
return platform_driver_register(&pmic8xxx_kp_driver);
}
module_init(pmic8xxx_kp_init);
static void __exit pmic8xxx_kp_exit(void)
{
platform_driver_unregister(&pmic8xxx_kp_driver);
}
module_exit(pmic8xxx_kp_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC8XXX keypad driver");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:pmic8xxx_keypad");
MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
| gpl-2.0 |
minipli/linux-grsec | arch/mips/ralink/rt288x.c | 281 | 3543 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Parts of this file are based on Ralink's 2.6.21 BSP
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
* Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt288x.h>
#include <asm/mach-ralink/pinmux.h>
#include "common.h"
static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
{ 0 }
};
void __init ralink_clk_init(void)
{
unsigned long cpu_rate, wmac_rate = 40000000;
u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
switch (t) {
case SYSTEM_CONFIG_CPUCLK_250:
cpu_rate = 250000000;
break;
case SYSTEM_CONFIG_CPUCLK_266:
cpu_rate = 266666667;
break;
case SYSTEM_CONFIG_CPUCLK_280:
cpu_rate = 280000000;
break;
case SYSTEM_CONFIG_CPUCLK_300:
cpu_rate = 300000000;
break;
}
ralink_clk_add("cpu", cpu_rate);
ralink_clk_add("300100.timer", cpu_rate / 2);
ralink_clk_add("300120.watchdog", cpu_rate / 2);
ralink_clk_add("300500.uart", cpu_rate / 2);
ralink_clk_add("300900.i2c", cpu_rate / 2);
ralink_clk_add("300c00.uartlite", cpu_rate / 2);
ralink_clk_add("400000.ethernet", cpu_rate / 2);
ralink_clk_add("480000.wmac", wmac_rate);
}
void __init ralink_of_remap(void)
{
rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc");
rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc");
if (!rt_sysc_membase || !rt_memc_membase)
panic("Failed to remap core resources");
}
void prom_soc_init(struct ralink_soc_info *soc_info)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
const char *name;
u32 n0;
u32 n1;
u32 id;
n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) {
soc_info->compatible = "ralink,r2880-soc";
name = "RT2880";
} else {
panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1);
}
snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
"Ralink %s id:%u rev:%u",
name,
(id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
(id & CHIP_ID_REV_MASK));
soc_info->mem_base = RT2880_SDRAM_BASE;
soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
rt2880_pinmux_data = rt2880_pinmux_data_act;
ralink_soc = RT2880_SOC;
}
| gpl-2.0 |
AppliedMicro/ENGLinuxLatest | drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 537 | 98743 | /*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#include "bna.h"
#include "bfi.h"
/* IB */
static void
bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
{
ib->coalescing_timeo = coalescing_timeo;
ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
(u32)ib->coalescing_timeo, 0);
}
/* RXF */
#define bna_rxf_vlan_cfg_soft_reset(rxf) \
do { \
(rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
(rxf)->vlan_strip_pending = true; \
} while (0)
#define bna_rxf_rss_cfg_soft_reset(rxf) \
do { \
if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
(rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
BNA_RSS_F_CFG_PENDING | \
BNA_RSS_F_STATUS_PENDING); \
} while (0)
static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
enum bna_cleanup_type cleanup);
static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
enum bna_cleanup_type cleanup);
static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
enum bna_cleanup_type cleanup);
bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
enum bna_rxf_event);
static void
bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
{
call_rxf_stop_cbfn(rxf);
}
static void
bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_START:
if (rxf->flags & BNA_RXF_F_PAUSED) {
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
call_rxf_start_cbfn(rxf);
} else
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_STOP:
call_rxf_stop_cbfn(rxf);
break;
case RXF_E_FAIL:
/* No-op */
break;
case RXF_E_CONFIG:
call_rxf_cam_fltr_cbfn(rxf);
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
call_rxf_pause_cbfn(rxf);
break;
case RXF_E_RESUME:
rxf->flags &= ~BNA_RXF_F_PAUSED;
call_rxf_resume_cbfn(rxf);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
{
call_rxf_pause_cbfn(rxf);
}
static void
bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
case RXF_E_FAIL:
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CONFIG:
call_rxf_cam_fltr_cbfn(rxf);
break;
case RXF_E_RESUME:
rxf->flags &= ~BNA_RXF_F_PAUSED;
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
{
if (!bna_rxf_cfg_apply(rxf)) {
/* No more pending config updates */
bfa_fsm_set_state(rxf, bna_rxf_sm_started);
}
}
static void
bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
break;
case RXF_E_FAIL:
bna_rxf_cfg_reset(rxf);
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
call_rxf_resume_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CONFIG:
/* No-op */
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
call_rxf_start_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
break;
case RXF_E_FW_RESP:
if (!bna_rxf_cfg_apply(rxf)) {
/* No more pending config updates */
bfa_fsm_set_state(rxf, bna_rxf_sm_started);
}
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_started_entry(struct bna_rxf *rxf)
{
call_rxf_start_cbfn(rxf);
call_rxf_cam_fltr_cbfn(rxf);
call_rxf_resume_cbfn(rxf);
}
static void
bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
case RXF_E_FAIL:
bna_rxf_cfg_reset(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CONFIG:
bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
break;
case RXF_E_PAUSE:
rxf->flags |= BNA_RXF_F_PAUSED;
if (!bna_rxf_fltr_clear(rxf))
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
else
bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
{
}
static void
bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
bna_rxf_cfg_reset(rxf);
call_rxf_pause_cbfn(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_FW_RESP:
if (!bna_rxf_fltr_clear(rxf)) {
/* No more pending CAM entries to clear */
bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
}
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
{
}
static void
bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
case RXF_E_FW_RESP:
bna_rxf_cfg_reset(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
enum bfi_enet_h2i_msgs req_type)
{
struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_ucast_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
{
struct bfi_enet_mcast_add_req *req =
&rxf->bfi_enet_cmd.mcast_add_req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_mcast_add_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
{
struct bfi_enet_mcast_del_req *req =
&rxf->bfi_enet_cmd.mcast_del_req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
req->handle = htons(handle);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_mcast_del_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
{
struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
req->enable = status;
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_enable_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
{
struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
req->enable = status;
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_enable_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
{
struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
int i;
int j;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
req->block_idx = block_idx;
for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
req->bit_mask[i] =
htonl(rxf->vlan_filter_table[j]);
else
req->bit_mask[i] = 0xFFFFFFFF;
}
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
{
struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
req->enable = rxf->vlan_strip_status;
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_enable_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rit_cfg(struct bna_rxf *rxf)
{
struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
req->size = htons(rxf->rit_size);
memcpy(&req->table[0], rxf->rit, rxf->rit_size);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_rit_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rss_cfg(struct bna_rxf *rxf)
{
struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
int i;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
req->cfg.type = rxf->rss_cfg.hash_type;
req->cfg.mask = rxf->rss_cfg.hash_mask;
for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
req->cfg.key[i] =
htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
static void
bna_bfi_rss_enable(struct bna_rxf *rxf)
{
struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
req->enable = rxf->rss_status;
bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_enable_req), &req->mh);
bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
}
/* This function gets the multicast MAC that has already been added to CAM */
static struct bna_mac *
bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
{
struct bna_mac *mac;
struct list_head *qe;
list_for_each(qe, &rxf->mcast_active_q) {
mac = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
return mac;
}
list_for_each(qe, &rxf->mcast_pending_del_q) {
mac = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
return mac;
}
return NULL;
}
static struct bna_mcam_handle *
bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
{
struct bna_mcam_handle *mchandle;
struct list_head *qe;
list_for_each(qe, &rxf->mcast_handle_q) {
mchandle = (struct bna_mcam_handle *)qe;
if (mchandle->handle == handle)
return mchandle;
}
return NULL;
}
static void
bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
{
struct bna_mac *mcmac;
struct bna_mcam_handle *mchandle;
mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
mchandle = bna_rxf_mchandle_get(rxf, handle);
if (mchandle == NULL) {
mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
mchandle->handle = handle;
mchandle->refcnt = 0;
list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
}
mchandle->refcnt++;
mcmac->handle = mchandle;
}
static int
bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
enum bna_cleanup_type cleanup)
{
struct bna_mcam_handle *mchandle;
int ret = 0;
mchandle = mac->handle;
if (mchandle == NULL)
return ret;
mchandle->refcnt--;
if (mchandle->refcnt == 0) {
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_mcast_del_req(rxf, mchandle->handle);
ret = 1;
}
list_del(&mchandle->qe);
bfa_q_qe_init(&mchandle->qe);
bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
}
mac->handle = NULL;
return ret;
}
static int
bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
int ret;
/* First delete multicast entries to maintain the count */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
/* Add multicast entries */
if (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
list_add_tail(&mac->qe, &rxf->mcast_active_q);
bna_bfi_mcast_add_req(rxf, mac);
return 1;
}
return 0;
}
static int
bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
{
u8 vlan_pending_bitmask;
int block_idx = 0;
if (rxf->vlan_pending_bitmask) {
vlan_pending_bitmask = rxf->vlan_pending_bitmask;
while (!(vlan_pending_bitmask & 0x1)) {
block_idx++;
vlan_pending_bitmask >>= 1;
}
rxf->vlan_pending_bitmask &= ~(1 << block_idx);
bna_bfi_rx_vlan_filter_set(rxf, block_idx);
return 1;
}
return 0;
}
static int
bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
struct list_head *qe;
struct bna_mac *mac;
int ret;
/* Throw away delete pending mcast entries */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
ret = bna_rxf_mcast_del(rxf, mac, cleanup);
bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
if (ret)
return ret;
}
/* Move active mcast entries to pending_add_q */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
bfa_q_qe_init(qe);
list_add_tail(qe, &rxf->mcast_pending_add_q);
mac = (struct bna_mac *)qe;
if (bna_rxf_mcast_del(rxf, mac, cleanup))
return 1;
}
return 0;
}
static int
bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
{
if (rxf->rss_pending) {
if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
bna_bfi_rit_cfg(rxf);
return 1;
}
if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
bna_bfi_rss_cfg(rxf);
return 1;
}
if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
bna_bfi_rss_enable(rxf);
return 1;
}
}
return 0;
}
static int
bna_rxf_cfg_apply(struct bna_rxf *rxf)
{
if (bna_rxf_ucast_cfg_apply(rxf))
return 1;
if (bna_rxf_mcast_cfg_apply(rxf))
return 1;
if (bna_rxf_promisc_cfg_apply(rxf))
return 1;
if (bna_rxf_allmulti_cfg_apply(rxf))
return 1;
if (bna_rxf_vlan_cfg_apply(rxf))
return 1;
if (bna_rxf_vlan_strip_cfg_apply(rxf))
return 1;
if (bna_rxf_rss_cfg_apply(rxf))
return 1;
return 0;
}
/* Only software reset */
static int
bna_rxf_fltr_clear(struct bna_rxf *rxf)
{
if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
return 1;
return 0;
}
static void
bna_rxf_cfg_reset(struct bna_rxf *rxf)
{
bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
bna_rxf_vlan_cfg_soft_reset(rxf);
bna_rxf_rss_cfg_soft_reset(rxf);
}
static void
bna_rit_init(struct bna_rxf *rxf, int rit_size)
{
struct bna_rx *rx = rxf->rx;
struct bna_rxp *rxp;
struct list_head *qe;
int offset = 0;
rxf->rit_size = rit_size;
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
rxf->rit[offset] = rxp->cq.ccb->id;
offset++;
}
}
void
bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
{
bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
}
void
bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_rsp *rsp =
container_of(msghdr, struct bfi_enet_rsp, mh);
if (rsp->error) {
/* Clear ucast from cache */
rxf->ucast_active_set = 0;
}
bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
}
void
bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_mcast_add_req *req =
&rxf->bfi_enet_cmd.mcast_add_req;
struct bfi_enet_mcast_add_rsp *rsp =
container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
ntohs(rsp->handle));
bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
}
static void
bna_rxf_init(struct bna_rxf *rxf,
struct bna_rx *rx,
struct bna_rx_config *q_config,
struct bna_res_info *res_info)
{
rxf->rx = rx;
INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
rxf->ucast_pending_set = 0;
rxf->ucast_active_set = 0;
INIT_LIST_HEAD(&rxf->ucast_active_q);
rxf->ucast_pending_mac = NULL;
INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
INIT_LIST_HEAD(&rxf->mcast_active_q);
INIT_LIST_HEAD(&rxf->mcast_handle_q);
if (q_config->paused)
rxf->flags |= BNA_RXF_F_PAUSED;
rxf->rit = (u8 *)
res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
bna_rit_init(rxf, q_config->num_paths);
rxf->rss_status = q_config->rss_status;
if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
rxf->rss_cfg = q_config->rss_config;
rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
}
rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
memset(rxf->vlan_filter_table, 0,
(sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
rxf->vlan_strip_status = q_config->vlan_strip_status;
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
}
static void
bna_rxf_uninit(struct bna_rxf *rxf)
{
struct bna_mac *mac;
rxf->ucast_pending_set = 0;
rxf->ucast_active_set = 0;
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
}
if (rxf->ucast_pending_mac) {
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
rxf->ucast_pending_mac);
rxf->ucast_pending_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
rxf->rxmode_pending = 0;
rxf->rxmode_pending_bitmask = 0;
if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
rxf->rss_pending = 0;
rxf->vlan_strip_pending = false;
rxf->flags = 0;
rxf->rx = NULL;
}
static void
bna_rx_cb_rxf_started(struct bna_rx *rx)
{
bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
}
static void
bna_rxf_start(struct bna_rxf *rxf)
{
rxf->start_cbfn = bna_rx_cb_rxf_started;
rxf->start_cbarg = rxf->rx;
bfa_fsm_send_event(rxf, RXF_E_START);
}
static void
bna_rx_cb_rxf_stopped(struct bna_rx *rx)
{
bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
}
static void
bna_rxf_stop(struct bna_rxf *rxf)
{
rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
rxf->stop_cbarg = rxf->rx;
bfa_fsm_send_event(rxf, RXF_E_STOP);
}
static void
bna_rxf_fail(struct bna_rxf *rxf)
{
bfa_fsm_send_event(rxf, RXF_E_FAIL);
}
enum bna_cb_status
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->ucast_pending_mac == NULL) {
rxf->ucast_pending_mac =
bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
if (rxf->ucast_pending_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
}
memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
rxf->ucast_pending_set = 1;
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
}
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
struct bna_mac *mac;
/* Check if already added or pending addition */
if (bna_mac_find(&rxf->mcast_active_q, addr) ||
bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
if (cbfn)
cbfn(rx->bna->bnad, rx);
return BNA_CB_SUCCESS;
}
mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, addr, ETH_ALEN);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
}
enum bna_cb_status
bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->ucast_active_q)) {
bfa_q_deq(&rxf->ucast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
}
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
for (i = 0, mcaddr = uclist; i < count; i++) {
mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Add the new entries */
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
}
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
}
return BNA_CB_UCAST_CAM_FULL;
}
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac, *del_mac;
int i;
/* Purge the pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Schedule active_q entries for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
for (i = 0, mcaddr = mclist; i < count; i++) {
mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Add the new entries */
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
}
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
bna_rx_mcast_delall(struct bna_rx *rx,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
struct list_head *qe;
struct bna_mac *mac, *del_mac;
int need_hw_config = 0;
/* Purge all entries from pending_add_q */
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
}
/* Schedule all entries in active_q for deletion */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
memcpy(del_mac, mac, sizeof(*del_mac));
list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
mac->handle = NULL;
bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
need_hw_config = 1;
}
if (need_hw_config) {
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
return;
}
if (cbfn)
(*cbfn)(rx->bna->bnad, rx);
}
void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] |= bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
rxf->vlan_pending_bitmask |= (1 << group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
void
bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
rxf->vlan_filter_table[index] &= ~bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
rxf->vlan_pending_bitmask |= (1 << group_id);
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
static int
bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
/* Delete MAC addresses previousely added */
if (!list_empty(&rxf->ucast_pending_del_q)) {
bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
return 1;
}
/* Set default unicast MAC */
if (rxf->ucast_pending_set) {
rxf->ucast_pending_set = 0;
memcpy(rxf->ucast_active_mac.addr,
rxf->ucast_pending_mac->addr, ETH_ALEN);
rxf->ucast_active_set = 1;
bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
BFI_ENET_H2I_MAC_UCAST_SET_REQ);
return 1;
}
/* Add additional MAC entries */
if (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
list_add_tail(&mac->qe, &rxf->ucast_active_q);
bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
return 1;
}
return 0;
}
static int
bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
struct list_head *qe;
struct bna_mac *mac;
/* Throw away delete pending ucast entries */
while (!list_empty(&rxf->ucast_pending_del_q)) {
bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
if (cleanup == BNA_SOFT_CLEANUP)
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
mac);
else {
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
mac);
return 1;
}
}
/* Move active ucast entries to pending_add_q */
while (!list_empty(&rxf->ucast_active_q)) {
bfa_q_deq(&rxf->ucast_active_q, &qe);
bfa_q_qe_init(qe);
list_add_tail(qe, &rxf->ucast_pending_add_q);
if (cleanup == BNA_HARD_CLEANUP) {
mac = (struct bna_mac *)qe;
bna_bfi_ucast_req(rxf, mac,
BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
return 1;
}
}
if (rxf->ucast_active_set) {
rxf->ucast_pending_set = 1;
rxf->ucast_active_set = 0;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
return 1;
}
}
return 0;
}
static int
bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
/* Enable/disable promiscuous mode */
if (is_promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move promisc configuration from pending -> active */
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active |= BNA_RXMODE_PROMISC;
bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
return 1;
} else if (is_promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move promisc configuration from pending -> active */
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
bna->promisc_rid = BFI_INVALID_RID;
bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
return 1;
}
return 0;
}
static int
bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
struct bna *bna = rxf->rx->bna;
/* Clear pending promisc mode disable */
if (is_promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
bna->promisc_rid = BFI_INVALID_RID;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
return 1;
}
}
/* Move promisc mode config from active -> pending */
if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
return 1;
}
}
return 0;
}
static int
bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
{
/* Enable/disable allmulti mode */
if (is_allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move allmulti configuration from pending -> active */
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
return 1;
} else if (is_allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* move allmulti configuration from pending -> active */
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
return 1;
}
return 0;
}
static int
bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
{
/* Clear pending allmulti mode disable */
if (is_allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
return 1;
}
}
/* Move allmulti mode config from active -> pending */
if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
if (cleanup == BNA_HARD_CLEANUP) {
bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
return 1;
}
}
return 0;
}
static int
bna_rxf_promisc_enable(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
int ret = 0;
if (is_promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
/* Do nothing if pending enable or already enabled */
} else if (is_promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Turn off pending disable command */
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
} else {
/* Schedule enable */
promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
bna->promisc_rid = rxf->rx->rid;
ret = 1;
}
return ret;
}
static int
bna_rxf_promisc_disable(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
int ret = 0;
if (is_promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
/* Do nothing if pending disable or already disabled */
} else if (is_promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Turn off pending enable command */
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
bna->promisc_rid = BFI_INVALID_RID;
} else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
/* Schedule disable */
promisc_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
ret = 1;
}
return ret;
}
static int
bna_rxf_allmulti_enable(struct bna_rxf *rxf)
{
int ret = 0;
if (is_allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
/* Do nothing if pending enable or already enabled */
} else if (is_allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Turn off pending disable command */
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
} else {
/* Schedule enable */
allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
ret = 1;
}
return ret;
}
static int
bna_rxf_allmulti_disable(struct bna_rxf *rxf)
{
int ret = 0;
if (is_allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask) ||
(!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
/* Do nothing if pending disable or already disabled */
} else if (is_allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* Turn off pending enable command */
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
} else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
/* Schedule disable */
allmulti_disable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
ret = 1;
}
return ret;
}
static int
bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
{
if (rxf->vlan_strip_pending) {
rxf->vlan_strip_pending = false;
bna_bfi_vlan_strip_enable(rxf);
return 1;
}
return 0;
}
/* RX */
#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
(qcfg)->num_paths : ((qcfg)->num_paths * 2))
#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
(PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
#define call_rx_stop_cbfn(rx) \
do { \
if ((rx)->stop_cbfn) { \
void (*cbfn)(void *, struct bna_rx *); \
void *cbarg; \
cbfn = (rx)->stop_cbfn; \
cbarg = (rx)->stop_cbarg; \
(rx)->stop_cbfn = NULL; \
(rx)->stop_cbarg = NULL; \
cbfn(cbarg, rx); \
} \
} while (0)
#define call_rx_stall_cbfn(rx) \
do { \
if ((rx)->rx_stall_cbfn) \
(rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
} while (0)
#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
do { \
struct bna_dma_addr cur_q_addr = \
*((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
(bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
(bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
(bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
(bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
(bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
(bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
} while (0)
static void bna_bfi_rx_enet_start(struct bna_rx *rx);
static void bna_rx_enet_stop(struct bna_rx *rx);
static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
bfa_fsm_state_decl(bna_rx, stopped,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, start_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, start_stop_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, rxf_start_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, started,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, stop_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, cleanup_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, failed,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, quiesce_wait,
struct bna_rx, enum bna_rx_event);
static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
{
call_rx_stop_cbfn(rx);
}
static void bna_rx_sm_stopped(struct bna_rx *rx,
enum bna_rx_event event)
{
switch (event) {
case RX_E_START:
bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
break;
case RX_E_STOP:
call_rx_stop_cbfn(rx);
break;
case RX_E_FAIL:
/* no-op */
break;
default:
bfa_sm_fault(event);
break;
}
}
static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
{
bna_bfi_rx_enet_start(rx);
}
static void
bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
case RX_E_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
case RX_E_STARTED:
bna_rx_enet_stop(rx);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void bna_rx_sm_start_wait(struct bna_rx *rx,
enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
case RX_E_STARTED:
bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
{
rx->rx_post_cbfn(rx->bna->bnad, rx);
bna_rxf_start(&rx->rxf);
}
static void
bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
case RX_E_RXF_STARTED:
bna_rxf_stop(&rx->rxf);
break;
case RX_E_RXF_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
call_rx_stall_cbfn(rx);
bna_rx_enet_stop(rx);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void
bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
case RX_E_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
case RX_E_STARTED:
bna_rx_enet_stop(rx);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
int is_regular = (rx->type == BNA_RX_T_REGULAR);
/* Start IB */
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
}
bna_ethport_cb_rx_started(&rx->bna->ethport);
}
static void
bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
bna_ethport_cb_rx_stopped(&rx->bna->ethport);
bna_rxf_stop(&rx->rxf);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_failed);
bna_ethport_cb_rx_stopped(&rx->bna->ethport);
bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_failed);
bna_rxf_fail(&rx->rxf);
call_rx_stall_cbfn(rx);
rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
break;
case RX_E_RXF_STARTED:
bfa_fsm_set_state(rx, bna_rx_sm_started);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void
bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
case RX_E_RXF_STOPPED:
/* No-op */
break;
case RX_E_CLEANUP_DONE:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void
bna_rx_sm_failed_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_START:
bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
break;
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
break;
case RX_E_FAIL:
case RX_E_RXF_STARTED:
case RX_E_RXF_STOPPED:
/* No-op */
break;
case RX_E_CLEANUP_DONE:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
default:
bfa_sm_fault(event);
break;
} }
static void
bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
{
}
static void
bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_failed);
break;
case RX_E_CLEANUP_DONE:
bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
break;
default:
bfa_sm_fault(event);
break;
}
}
static void
bna_bfi_rx_enet_start(struct bna_rx *rx)
{
struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
struct list_head *rxp_qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
cfg_req->num_queue_sets = rx->num_paths;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
i++, rxp_qe = bfa_q_next(rxp_qe)) {
rxp = (struct bna_rxp *)rxp_qe;
GET_RXQS(rxp, q0, q1);
switch (rxp->type) {
case BNA_RXP_SLR:
case BNA_RXP_HDS:
/* Small RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
&q1->qpt);
cfg_req->q_cfg[i].qs.rx_buffer_size =
htons((u16)q1->buffer_size);
/* Fall through */
case BNA_RXP_SINGLE:
/* Large/Single RxQ */
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
&q0->qpt);
if (q0->multi_buffer)
/* multi-buffer is enabled by allocating
* a new rx with new set of resources.
* q0->buffer_size should be initialized to
* fragment size.
*/
cfg_req->rx_cfg.multi_buffer =
BNA_STATUS_T_ENABLED;
else
q0->buffer_size =
bna_enet_mtu_get(&rx->bna->enet);
cfg_req->q_cfg[i].ql.rx_buffer_size =
htons((u16)q0->buffer_size);
break;
default:
BUG_ON(1);
}
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
&rxp->cq.qpt);
cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
rxp->cq.ib.ib_seg_host_addr.lsb;
cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
rxp->cq.ib.ib_seg_host_addr.msb;
cfg_req->q_cfg[i].ib.intr.msix_index =
htons((u16)rxp->cq.ib.intr_vector);
}
cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
? BNA_STATUS_T_ENABLED :
BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.coalescing_timeout =
htonl((u32)rxp->cq.ib.coalescing_timeo);
cfg_req->ib_cfg.inter_pkt_timeout =
htonl((u32)rxp->cq.ib.interpkt_timeo);
cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
switch (rxp->type) {
case BNA_RXP_SLR:
cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
break;
case BNA_RXP_HDS:
cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
break;
case BNA_RXP_SINGLE:
cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
break;
default:
BUG_ON(1);
}
cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
}
static void
bna_bfi_rx_enet_stop(struct bna_rx *rx)
{
struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
&req->mh);
bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
}
static void
bna_rx_enet_stop(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
/* Stop IB */
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
bna_ib_stop(rx->bna, &rxp->cq.ib);
}
bna_bfi_rx_enet_stop(rx);
}
static int
bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
{
if ((rx_mod->rx_free_count == 0) ||
(rx_mod->rxp_free_count == 0) ||
(rx_mod->rxq_free_count == 0))
return 0;
if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
(rx_mod->rxq_free_count < rx_cfg->num_paths))
return 0;
} else {
if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
(rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
return 0;
}
return 1;
}
static struct bna_rxq *
bna_rxq_get(struct bna_rx_mod *rx_mod)
{
struct bna_rxq *rxq = NULL;
struct list_head *qe = NULL;
bfa_q_deq(&rx_mod->rxq_free_q, &qe);
rx_mod->rxq_free_count--;
rxq = (struct bna_rxq *)qe;
bfa_q_qe_init(&rxq->qe);
return rxq;
}
static void
bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
{
bfa_q_qe_init(&rxq->qe);
list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
static struct bna_rxp *
bna_rxp_get(struct bna_rx_mod *rx_mod)
{
struct list_head *qe = NULL;
struct bna_rxp *rxp = NULL;
bfa_q_deq(&rx_mod->rxp_free_q, &qe);
rx_mod->rxp_free_count--;
rxp = (struct bna_rxp *)qe;
bfa_q_qe_init(&rxp->qe);
return rxp;
}
static void
bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
{
bfa_q_qe_init(&rxp->qe);
list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
static struct bna_rx *
bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct list_head *qe = NULL;
struct bna_rx *rx = NULL;
if (type == BNA_RX_T_REGULAR) {
bfa_q_deq(&rx_mod->rx_free_q, &qe);
} else
bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
rx_mod->rx_free_count--;
rx = (struct bna_rx *)qe;
bfa_q_qe_init(&rx->qe);
list_add_tail(&rx->qe, &rx_mod->rx_active_q);
rx->type = type;
return rx;
}
static void
bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
{
struct list_head *prev_qe = NULL;
struct list_head *qe;
bfa_q_qe_init(&rx->qe);
list_for_each(qe, &rx_mod->rx_free_q) {
if (((struct bna_rx *)qe)->rid < rx->rid)
prev_qe = qe;
else
break;
}
if (prev_qe == NULL) {
/* This is the first entry */
bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
} else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
/* This is the last entry */
list_add_tail(&rx->qe, &rx_mod->rx_free_q);
} else {
/* Somewhere in the middle */
bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
bfa_q_prev(&rx->qe) = prev_qe;
bfa_q_next(prev_qe) = &rx->qe;
bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
}
rx_mod->rx_free_count++;
}
static void
bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
struct bna_rxq *q1)
{
switch (rxp->type) {
case BNA_RXP_SINGLE:
rxp->rxq.single.only = q0;
rxp->rxq.single.reserved = NULL;
break;
case BNA_RXP_SLR:
rxp->rxq.slr.large = q0;
rxp->rxq.slr.small = q1;
break;
case BNA_RXP_HDS:
rxp->rxq.hds.data = q0;
rxp->rxq.hds.hdr = q1;
break;
default:
break;
}
}
static void
bna_rxq_qpt_setup(struct bna_rxq *rxq,
struct bna_rxp *rxp,
u32 page_count,
u32 page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
u8 *kva;
u64 dma;
struct bna_dma_addr bna_dma;
int i;
rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
rxq->qpt.page_count = page_count;
rxq->qpt.page_size = page_size;
rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
rxq->rcb->sw_q = page_mem->kva;
kva = page_mem->kva;
BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < rxq->qpt.page_count; i++) {
rxq->rcb->sw_qpt[i] = kva;
kva += PAGE_SIZE;
BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
bna_dma.lsb;
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
bna_dma.msb;
dma += PAGE_SIZE;
}
}
static void
bna_rxp_cqpt_setup(struct bna_rxp *rxp,
u32 page_count,
u32 page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
u8 *kva;
u64 dma;
struct bna_dma_addr bna_dma;
int i;
rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
rxp->cq.qpt.page_count = page_count;
rxp->cq.qpt.page_size = page_size;
rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
rxp->cq.ccb->sw_q = page_mem->kva;
kva = page_mem->kva;
BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < rxp->cq.qpt.page_count; i++) {
rxp->cq.ccb->sw_qpt[i] = kva;
kva += PAGE_SIZE;
BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
bna_dma.lsb;
((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
bna_dma.msb;
dma += PAGE_SIZE;
}
}
static void
bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
{
struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
bfa_wc_down(&rx_mod->rx_stop_wc);
}
static void
bna_rx_mod_cb_rx_stopped_all(void *arg)
{
struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
if (rx_mod->stop_cbfn)
rx_mod->stop_cbfn(&rx_mod->bna->enet);
rx_mod->stop_cbfn = NULL;
}
static void
bna_rx_start(struct bna_rx *rx)
{
rx->rx_flags |= BNA_RX_F_ENET_STARTED;
if (rx->rx_flags & BNA_RX_F_ENABLED)
bfa_fsm_send_event(rx, RX_E_START);
}
static void
bna_rx_stop(struct bna_rx *rx)
{
rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
else {
rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
rx->stop_cbarg = &rx->bna->rx_mod;
bfa_fsm_send_event(rx, RX_E_STOP);
}
}
static void
bna_rx_fail(struct bna_rx *rx)
{
/* Indicate Enet is not enabled, and failed */
rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
bfa_fsm_send_event(rx, RX_E_FAIL);
}
void
bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
if (type == BNA_RX_T_LOOPBACK)
rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
if (rx->type == type)
bna_rx_start(rx);
}
}
void
bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
if (rx->type == type) {
bfa_wc_up(&rx_mod->rx_stop_wc);
bna_rx_stop(rx);
}
}
bfa_wc_wait(&rx_mod->rx_stop_wc);
}
void
bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
bna_rx_fail(rx);
}
}
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
struct bna_res_info *res_info)
{
int index;
struct bna_rx *rx_ptr;
struct bna_rxp *rxp_ptr;
struct bna_rxq *rxq_ptr;
rx_mod->bna = bna;
rx_mod->flags = 0;
rx_mod->rx = (struct bna_rx *)
res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
rx_mod->rxp = (struct bna_rxp *)
res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
rx_mod->rxq = (struct bna_rxq *)
res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
/* Initialize the queues */
INIT_LIST_HEAD(&rx_mod->rx_free_q);
rx_mod->rx_free_count = 0;
INIT_LIST_HEAD(&rx_mod->rxq_free_q);
rx_mod->rxq_free_count = 0;
INIT_LIST_HEAD(&rx_mod->rxp_free_q);
rx_mod->rxp_free_count = 0;
INIT_LIST_HEAD(&rx_mod->rx_active_q);
/* Build RX queues */
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rx_ptr = &rx_mod->rx[index];
bfa_q_qe_init(&rx_ptr->qe);
INIT_LIST_HEAD(&rx_ptr->rxp_q);
rx_ptr->bna = NULL;
rx_ptr->rid = index;
rx_ptr->stop_cbfn = NULL;
rx_ptr->stop_cbarg = NULL;
list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
rx_mod->rx_free_count++;
}
/* build RX-path queue */
for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
rxp_ptr = &rx_mod->rxp[index];
bfa_q_qe_init(&rxp_ptr->qe);
list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
/* build RXQ queue */
for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
rxq_ptr = &rx_mod->rxq[index];
bfa_q_qe_init(&rxq_ptr->qe);
list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
}
void
bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &rx_mod->rx_free_q)
i++;
i = 0;
list_for_each(qe, &rx_mod->rxp_free_q)
i++;
i = 0;
list_for_each(qe, &rx_mod->rxq_free_q)
i++;
rx_mod->bna = NULL;
}
void
bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL, *q1 = NULL;
struct list_head *rxp_qe;
int i;
bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
sizeof(struct bfi_enet_rx_cfg_rsp));
rx->hw_id = cfg_rsp->hw_id;
for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
i < rx->num_paths;
i++, rxp_qe = bfa_q_next(rxp_qe)) {
rxp = (struct bna_rxp *)rxp_qe;
GET_RXQS(rxp, q0, q1);
/* Setup doorbells */
rxp->cq.ccb->i_dbell->doorbell_addr =
rx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].i_dbell);
rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
q0->rcb->q_dbell =
rx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].ql_dbell);
q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
if (q1) {
q1->rcb->q_dbell =
rx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].qs_dbell);
q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
}
/* Initialize producer/consumer indexes */
(*rxp->cq.ccb->hw_producer_index) = 0;
rxp->cq.ccb->producer_index = 0;
q0->rcb->producer_index = q0->rcb->consumer_index = 0;
if (q1)
q1->rcb->producer_index = q1->rcb->consumer_index = 0;
}
bfa_fsm_send_event(rx, RX_E_STARTED);
}
void
bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
{
bfa_fsm_send_event(rx, RX_E_STOPPED);
}
void
bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
{
u32 cq_size, hq_size, dq_size;
u32 cpage_count, hpage_count, dpage_count;
struct bna_mem_info *mem_info;
u32 cq_depth;
u32 hq_depth;
u32 dq_depth;
dq_depth = q_cfg->q0_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
cq_depth = dq_depth + hq_depth;
BNA_TO_POWER_OF_2_HIGH(cq_depth);
cq_size = cq_depth * BFI_CQ_WI_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
cpage_count = SIZE_TO_PAGES(cq_size);
BNA_TO_POWER_OF_2_HIGH(dq_depth);
dq_size = dq_depth * BFI_RXQ_WI_SIZE;
dq_size = ALIGN(dq_size, PAGE_SIZE);
dpage_count = SIZE_TO_PAGES(dq_size);
if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
BNA_TO_POWER_OF_2_HIGH(hq_depth);
hq_size = hq_depth * BFI_RXQ_WI_SIZE;
hq_size = ALIGN(hq_size, PAGE_SIZE);
hpage_count = SIZE_TO_PAGES(hq_size);
} else
hpage_count = 0;
res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_ccb);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_rcb);
mem_info->num = BNA_GET_RXQS(q_cfg);
res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = cpage_count * sizeof(void *);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE * cpage_count;
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = dpage_count * sizeof(void *);
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE * dpage_count;
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = hpage_count * sizeof(void *);
mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE * hpage_count;
mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = BFI_IBIDX_SIZE;
mem_info->num = q_cfg->num_paths;
res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = BFI_ENET_RSS_RIT_MAX;
mem_info->num = 1;
res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
}
struct bna_rx *
bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rx_config *rx_cfg,
const struct bna_rx_event_cbfn *rx_cbfn,
struct bna_res_info *res_info,
void *priv)
{
struct bna_rx_mod *rx_mod = &bna->rx_mod;
struct bna_rx *rx;
struct bna_rxp *rxp;
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
struct bna_mem_descr *hqunmap_mem;
struct bna_mem_descr *dqunmap_mem;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
struct bna_mem_descr *hqpt_mem;
struct bna_mem_descr *dqpt_mem;
struct bna_mem_descr *hsqpt_mem;
struct bna_mem_descr *dsqpt_mem;
struct bna_mem_descr *hpage_mem;
struct bna_mem_descr *dpage_mem;
u32 dpage_count, hpage_count;
u32 hq_idx, dq_idx, rcb_idx;
u32 cq_depth, i;
u32 page_count;
if (!bna_rx_res_check(rx_mod, rx_cfg))
return NULL;
intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
PAGE_SIZE;
dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
PAGE_SIZE;
hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
PAGE_SIZE;
rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
rx->bna = bna;
rx->rx_flags = 0;
INIT_LIST_HEAD(&rx->rxp_q);
rx->stop_cbfn = NULL;
rx->stop_cbarg = NULL;
rx->priv = priv;
rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
/* Following callbacks are mandatory */
rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
switch (rx->type) {
case BNA_RX_T_REGULAR:
if (!(rx->bna->rx_mod.flags &
BNA_RX_MOD_F_ENET_LOOPBACK))
rx->rx_flags |= BNA_RX_F_ENET_STARTED;
break;
case BNA_RX_T_LOOPBACK:
if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
rx->rx_flags |= BNA_RX_F_ENET_STARTED;
break;
}
}
rx->num_paths = rx_cfg->num_paths;
for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
i < rx->num_paths; i++) {
rxp = bna_rxp_get(rx_mod);
list_add_tail(&rxp->qe, &rx->rxp_q);
rxp->type = rx_cfg->rxp_type;
rxp->rx = rx;
rxp->cq.rx = rx;
q0 = bna_rxq_get(rx_mod);
if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
q1 = NULL;
else
q1 = bna_rxq_get(rx_mod);
if (1 == intr_info->num)
rxp->vector = intr_info->idl[0].vector;
else
rxp->vector = intr_info->idl[i].vector;
/* Setup IB */
rxp->cq.ib.ib_seg_host_addr.lsb =
res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
rxp->cq.ib.ib_seg_host_addr.msb =
res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
rxp->cq.ib.ib_seg_host_addr_kva =
res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
rxp->cq.ib.intr_type = intr_info->intr_type;
if (intr_info->intr_type == BNA_INTR_T_MSIX)
rxp->cq.ib.intr_vector = rxp->vector;
else
rxp->cq.ib.intr_vector = (1 << rxp->vector);
rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
bna_rxp_add_rxqs(rxp, q0, q1);
/* Setup large Q */
q0->rx = rx;
q0->rxp = rxp;
q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
rcb_idx++; dq_idx++;
q0->rcb->q_depth = rx_cfg->q0_depth;
q0->q_depth = rx_cfg->q0_depth;
q0->multi_buffer = rx_cfg->q0_multi_buf;
q0->buffer_size = rx_cfg->q0_buf_size;
q0->num_vecs = rx_cfg->q0_num_vecs;
q0->rcb->rxq = q0;
q0->rcb->bnad = bna->bnad;
q0->rcb->id = 0;
q0->rx_packets = q0->rx_bytes = 0;
q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
if (rx->rcb_setup_cbfn)
rx->rcb_setup_cbfn(bnad, q0->rcb);
/* Setup small Q */
if (q1) {
q1->rx = rx;
q1->rxp = rxp;
q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
rcb_idx++; hq_idx++;
q1->rcb->q_depth = rx_cfg->q1_depth;
q1->q_depth = rx_cfg->q1_depth;
q1->multi_buffer = BNA_STATUS_T_DISABLED;
q1->num_vecs = 1;
q1->rcb->rxq = q1;
q1->rcb->bnad = bna->bnad;
q1->rcb->id = 1;
q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
rx_cfg->hds_config.forced_offset
: rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
&hqpt_mem[i], &hsqpt_mem[i],
&hpage_mem[i]);
if (rx->rcb_setup_cbfn)
rx->rcb_setup_cbfn(bnad, q1->rcb);
}
/* Setup CQ */
rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
cq_depth = rx_cfg->q0_depth +
((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
0 : rx_cfg->q1_depth);
/* if multi-buffer is enabled sum of q0_depth
* and q1_depth need not be a power of 2
*/
BNA_TO_POWER_OF_2_HIGH(cq_depth);
rxp->cq.ccb->q_depth = cq_depth;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->rcb[0] = q0->rcb;
q0->rcb->ccb = rxp->cq.ccb;
if (q1) {
rxp->cq.ccb->rcb[1] = q1->rcb;
q1->rcb->ccb = rxp->cq.ccb;
}
rxp->cq.ccb->hw_producer_index =
(u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
rxp->cq.ccb->rx_coalescing_timeo =
rxp->cq.ib.coalescing_timeo;
rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
rxp->cq.ccb->bnad = bna->bnad;
rxp->cq.ccb->id = i;
bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
&cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
if (rx->ccb_setup_cbfn)
rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
}
rx->hds_cfg = rx_cfg->hds_config;
bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
rx_mod->rid_mask |= (1 << rx->rid);
return rx;
}
void
bna_rx_destroy(struct bna_rx *rx)
{
struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
struct bna_rxq *q0 = NULL;
struct bna_rxq *q1 = NULL;
struct bna_rxp *rxp;
struct list_head *qe;
bna_rxf_uninit(&rx->rxf);
while (!list_empty(&rx->rxp_q)) {
bfa_q_deq(&rx->rxp_q, &rxp);
GET_RXQS(rxp, q0, q1);
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
q0->rcb = NULL;
q0->rxp = NULL;
q0->rx = NULL;
bna_rxq_put(rx_mod, q0);
if (q1) {
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
q1->rcb = NULL;
q1->rxp = NULL;
q1->rx = NULL;
bna_rxq_put(rx_mod, q1);
}
rxp->rxq.slr.large = NULL;
rxp->rxq.slr.small = NULL;
if (rx->ccb_destroy_cbfn)
rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
rxp->cq.ccb = NULL;
rxp->rx = NULL;
bna_rxp_put(rx_mod, rxp);
}
list_for_each(qe, &rx_mod->rx_active_q) {
if (qe == &rx->qe) {
list_del(&rx->qe);
bfa_q_qe_init(&rx->qe);
break;
}
}
rx_mod->rid_mask &= ~(1 << rx->rid);
rx->bna = NULL;
rx->priv = NULL;
bna_rx_put(rx_mod, rx);
}
void
bna_rx_enable(struct bna_rx *rx)
{
if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
return;
rx->rx_flags |= BNA_RX_F_ENABLED;
if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
bfa_fsm_send_event(rx, RX_E_START);
}
void
bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
void (*cbfn)(void *, struct bna_rx *))
{
if (type == BNA_SOFT_CLEANUP) {
/* h/w should not be accessed. Treat we're stopped */
(*cbfn)(rx->bna->bnad, rx);
} else {
rx->stop_cbfn = cbfn;
rx->stop_cbarg = rx->bna->bnad;
rx->rx_flags &= ~BNA_RX_F_ENABLED;
bfa_fsm_send_event(rx, RX_E_STOP);
}
}
void
bna_rx_cleanup_complete(struct bna_rx *rx)
{
bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
}
void
bna_rx_vlan_strip_enable(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
rxf->vlan_strip_pending = true;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
void
bna_rx_vlan_strip_disable(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
rxf->vlan_strip_pending = true;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
enum bna_cb_status
bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
enum bna_rxmode bitmask,
void (*cbfn)(struct bnad *, struct bna_rx *))
{
struct bna_rxf *rxf = &rx->rxf;
int need_hw_config = 0;
/* Error checks */
if (is_promisc_enable(new_mode, bitmask)) {
/* If promisc mode is already enabled elsewhere in the system */
if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
(rx->bna->promisc_rid != rxf->rx->rid))
goto err_return;
/* If default mode is already enabled in the system */
if (rx->bna->default_mode_rid != BFI_INVALID_RID)
goto err_return;
/* Trying to enable promiscuous and default mode together */
if (is_default_enable(new_mode, bitmask))
goto err_return;
}
if (is_default_enable(new_mode, bitmask)) {
/* If default mode is already enabled elsewhere in the system */
if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
(rx->bna->default_mode_rid != rxf->rx->rid)) {
goto err_return;
}
/* If promiscuous mode is already enabled in the system */
if (rx->bna->promisc_rid != BFI_INVALID_RID)
goto err_return;
}
/* Process the commands */
if (is_promisc_enable(new_mode, bitmask)) {
if (bna_rxf_promisc_enable(rxf))
need_hw_config = 1;
} else if (is_promisc_disable(new_mode, bitmask)) {
if (bna_rxf_promisc_disable(rxf))
need_hw_config = 1;
}
if (is_allmulti_enable(new_mode, bitmask)) {
if (bna_rxf_allmulti_enable(rxf))
need_hw_config = 1;
} else if (is_allmulti_disable(new_mode, bitmask)) {
if (bna_rxf_allmulti_disable(rxf))
need_hw_config = 1;
}
/* Trigger h/w if needed */
if (need_hw_config) {
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
} else if (cbfn)
(*cbfn)(rx->bna->bnad, rx);
return BNA_CB_SUCCESS;
err_return:
return BNA_CB_FAIL;
}
void
bna_rx_vlanfilter_enable(struct bna_rx *rx)
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
bfa_fsm_send_event(rxf, RXF_E_CONFIG);
}
}
void
bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
{
struct bna_rxp *rxp;
struct list_head *qe;
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
}
}
void
bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
{
int i, j;
for (i = 0; i < BNA_LOAD_T_MAX; i++)
for (j = 0; j < BNA_BIAS_T_MAX; j++)
bna->rx_mod.dim_vector[i][j] = vector[i][j];
}
void
bna_rx_dim_update(struct bna_ccb *ccb)
{
struct bna *bna = ccb->cq->rx->bna;
u32 load, bias;
u32 pkt_rt, small_rt, large_rt;
u8 coalescing_timeo;
if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
(ccb->pkt_rate.large_pkt_cnt == 0))
return;
/* Arrive at preconfigured coalescing timeo value based on pkt rate */
small_rt = ccb->pkt_rate.small_pkt_cnt;
large_rt = ccb->pkt_rate.large_pkt_cnt;
pkt_rt = small_rt + large_rt;
if (pkt_rt < BNA_PKT_RATE_10K)
load = BNA_LOAD_T_LOW_4;
else if (pkt_rt < BNA_PKT_RATE_20K)
load = BNA_LOAD_T_LOW_3;
else if (pkt_rt < BNA_PKT_RATE_30K)
load = BNA_LOAD_T_LOW_2;
else if (pkt_rt < BNA_PKT_RATE_40K)
load = BNA_LOAD_T_LOW_1;
else if (pkt_rt < BNA_PKT_RATE_50K)
load = BNA_LOAD_T_HIGH_1;
else if (pkt_rt < BNA_PKT_RATE_60K)
load = BNA_LOAD_T_HIGH_2;
else if (pkt_rt < BNA_PKT_RATE_80K)
load = BNA_LOAD_T_HIGH_3;
else
load = BNA_LOAD_T_HIGH_4;
if (small_rt > (large_rt << 1))
bias = 0;
else
bias = 1;
ccb->pkt_rate.small_pkt_cnt = 0;
ccb->pkt_rate.large_pkt_cnt = 0;
coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
ccb->rx_coalescing_timeo = coalescing_timeo;
/* Set it to IB */
bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
}
const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
{12, 12},
{6, 10},
{5, 10},
{4, 8},
{3, 6},
{3, 6},
{2, 4},
{1, 2},
};
/* TX */
#define call_tx_stop_cbfn(tx) \
do { \
if ((tx)->stop_cbfn) { \
void (*cbfn)(void *, struct bna_tx *); \
void *cbarg; \
cbfn = (tx)->stop_cbfn; \
cbarg = (tx)->stop_cbarg; \
(tx)->stop_cbfn = NULL; \
(tx)->stop_cbarg = NULL; \
cbfn(cbarg, (tx)); \
} \
} while (0)
#define call_tx_prio_change_cbfn(tx) \
do { \
if ((tx)->prio_change_cbfn) { \
void (*cbfn)(struct bnad *, struct bna_tx *); \
cbfn = (tx)->prio_change_cbfn; \
(tx)->prio_change_cbfn = NULL; \
cbfn((tx)->bna->bnad, (tx)); \
} \
} while (0)
static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
static void bna_bfi_tx_enet_start(struct bna_tx *tx);
static void bna_tx_enet_stop(struct bna_tx *tx);
enum bna_tx_event {
TX_E_START = 1,
TX_E_STOP = 2,
TX_E_FAIL = 3,
TX_E_STARTED = 4,
TX_E_STOPPED = 5,
TX_E_PRIO_CHANGE = 6,
TX_E_CLEANUP_DONE = 7,
TX_E_BW_UPDATE = 8,
};
bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
enum bna_tx_event);
static void
bna_tx_sm_stopped_entry(struct bna_tx *tx)
{
call_tx_stop_cbfn(tx);
}
static void
bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_START:
bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
break;
case TX_E_STOP:
call_tx_stop_cbfn(tx);
break;
case TX_E_FAIL:
/* No-op */
break;
case TX_E_PRIO_CHANGE:
call_tx_prio_change_cbfn(tx);
break;
case TX_E_BW_UPDATE:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_start_wait_entry(struct bna_tx *tx)
{
bna_bfi_tx_enet_start(tx);
}
static void
bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
break;
case TX_E_FAIL:
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_STARTED:
if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
BNA_TX_F_BW_UPDATED);
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
} else
bfa_fsm_set_state(tx, bna_tx_sm_started);
break;
case TX_E_PRIO_CHANGE:
tx->flags |= BNA_TX_F_PRIO_CHANGED;
break;
case TX_E_BW_UPDATE:
tx->flags |= BNA_TX_F_BW_UPDATED;
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_started_entry(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
int is_regular = (tx->type == BNA_TX_T_REGULAR);
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
txq->tcb->priority = txq->priority;
/* Start IB */
bna_ib_start(tx->bna, &txq->ib, is_regular);
}
tx->tx_resume_cbfn(tx->bna->bnad, tx);
}
static void
bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
tx->tx_stall_cbfn(tx->bna->bnad, tx);
bna_tx_enet_stop(tx);
break;
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
tx->tx_stall_cbfn(tx->bna->bnad, tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
case TX_E_STOPPED:
bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
case TX_E_STARTED:
/**
* We are here due to start_wait -> stop_wait transition on
* TX_E_STOP event
*/
bna_tx_enet_stop(tx);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
case TX_E_CLEANUP_DONE:
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
{
tx->tx_stall_cbfn(tx->bna->bnad, tx);
bna_tx_enet_stop(tx);
}
static void
bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
break;
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
break;
case TX_E_STOPPED:
bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
{
call_tx_prio_change_cbfn(tx);
tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
}
static void
bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
break;
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
break;
case TX_E_PRIO_CHANGE:
case TX_E_BW_UPDATE:
/* No-op */
break;
case TX_E_CLEANUP_DONE:
bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_failed_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_START:
bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
break;
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
break;
case TX_E_FAIL:
/* No-op */
break;
case TX_E_CLEANUP_DONE:
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
break;
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_failed);
break;
case TX_E_CLEANUP_DONE:
bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
break;
case TX_E_BW_UPDATE:
/* No-op */
break;
default:
bfa_sm_fault(event);
}
}
static void
bna_bfi_tx_enet_start(struct bna_tx *tx)
{
struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
struct bna_txq *txq = NULL;
struct list_head *qe;
int i;
bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
cfg_req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
cfg_req->num_queues = tx->num_txq;
for (i = 0, qe = bfa_q_first(&tx->txq_q);
i < tx->num_txq;
i++, qe = bfa_q_next(qe)) {
txq = (struct bna_txq *)qe;
bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
cfg_req->q_cfg[i].q.priority = txq->priority;
cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
txq->ib.ib_seg_host_addr.lsb;
cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
txq->ib.ib_seg_host_addr.msb;
cfg_req->q_cfg[i].ib.intr.msix_index =
htons((u16)txq->ib.intr_vector);
}
cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
cfg_req->ib_cfg.coalescing_timeout =
htonl((u32)txq->ib.coalescing_timeo);
cfg_req->ib_cfg.inter_pkt_timeout =
htonl((u32)txq->ib.interpkt_timeo);
cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
}
static void
bna_bfi_tx_enet_stop(struct bna_tx *tx)
{
struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
req->mh.num_entries = htons(
bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
&req->mh);
bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
}
static void
bna_tx_enet_stop(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
/* Stop IB */
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_stop(tx->bna, &txq->ib);
}
bna_bfi_tx_enet_stop(tx);
}
static void
bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
u8 *kva;
u64 dma;
struct bna_dma_addr bna_dma;
int i;
txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
txq->qpt.kv_qpt_ptr = qpt_mem->kva;
txq->qpt.page_count = page_count;
txq->qpt.page_size = page_size;
txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
txq->tcb->sw_q = page_mem->kva;
kva = page_mem->kva;
BNA_GET_DMA_ADDR(&page_mem->dma, dma);
for (i = 0; i < page_count; i++) {
txq->tcb->sw_qpt[i] = kva;
kva += PAGE_SIZE;
BNA_SET_DMA_ADDR(dma, &bna_dma);
((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
bna_dma.lsb;
((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
bna_dma.msb;
dma += PAGE_SIZE;
}
}
static struct bna_tx *
bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct list_head *qe = NULL;
struct bna_tx *tx = NULL;
if (list_empty(&tx_mod->tx_free_q))
return NULL;
if (type == BNA_TX_T_REGULAR) {
bfa_q_deq(&tx_mod->tx_free_q, &qe);
} else {
bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
}
tx = (struct bna_tx *)qe;
bfa_q_qe_init(&tx->qe);
tx->type = type;
return tx;
}
static void
bna_tx_free(struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
struct bna_txq *txq;
struct list_head *prev_qe;
struct list_head *qe;
while (!list_empty(&tx->txq_q)) {
bfa_q_deq(&tx->txq_q, &txq);
bfa_q_qe_init(&txq->qe);
txq->tcb = NULL;
txq->tx = NULL;
list_add_tail(&txq->qe, &tx_mod->txq_free_q);
}
list_for_each(qe, &tx_mod->tx_active_q) {
if (qe == &tx->qe) {
list_del(&tx->qe);
bfa_q_qe_init(&tx->qe);
break;
}
}
tx->bna = NULL;
tx->priv = NULL;
prev_qe = NULL;
list_for_each(qe, &tx_mod->tx_free_q) {
if (((struct bna_tx *)qe)->rid < tx->rid)
prev_qe = qe;
else {
break;
}
}
if (prev_qe == NULL) {
/* This is the first entry */
bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
} else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
/* This is the last entry */
list_add_tail(&tx->qe, &tx_mod->tx_free_q);
} else {
/* Somewhere in the middle */
bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
bfa_q_prev(&tx->qe) = prev_qe;
bfa_q_next(prev_qe) = &tx->qe;
bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
}
}
static void
bna_tx_start(struct bna_tx *tx)
{
tx->flags |= BNA_TX_F_ENET_STARTED;
if (tx->flags & BNA_TX_F_ENABLED)
bfa_fsm_send_event(tx, TX_E_START);
}
static void
bna_tx_stop(struct bna_tx *tx)
{
tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
tx->stop_cbarg = &tx->bna->tx_mod;
tx->flags &= ~BNA_TX_F_ENET_STARTED;
bfa_fsm_send_event(tx, TX_E_STOP);
}
static void
bna_tx_fail(struct bna_tx *tx)
{
tx->flags &= ~BNA_TX_F_ENET_STARTED;
bfa_fsm_send_event(tx, TX_E_FAIL);
}
void
bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
{
struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
struct bna_txq *txq = NULL;
struct list_head *qe;
int i;
bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
sizeof(struct bfi_enet_tx_cfg_rsp));
tx->hw_id = cfg_rsp->hw_id;
for (i = 0, qe = bfa_q_first(&tx->txq_q);
i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
txq = (struct bna_txq *)qe;
/* Setup doorbells */
txq->tcb->i_dbell->doorbell_addr =
tx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].i_dbell);
txq->tcb->q_dbell =
tx->bna->pcidev.pci_bar_kva
+ ntohl(cfg_rsp->q_handles[i].q_dbell);
txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
/* Initialize producer/consumer indexes */
(*txq->tcb->hw_consumer_index) = 0;
txq->tcb->producer_index = txq->tcb->consumer_index = 0;
}
bfa_fsm_send_event(tx, TX_E_STARTED);
}
void
bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
{
bfa_fsm_send_event(tx, TX_E_STOPPED);
}
void
bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
struct list_head *qe;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
}
}
void
bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
{
u32 q_size;
u32 page_count;
struct bna_mem_info *mem_info;
res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_tcb);
mem_info->num = num_txq;
q_size = txq_depth * BFI_TXQ_WI_SIZE;
q_size = ALIGN(q_size, PAGE_SIZE);
page_count = q_size >> PAGE_SHIFT;
res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = page_count * sizeof(struct bna_dma_addr);
mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = page_count * sizeof(void *);
mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE * page_count;
mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = BFI_IBIDX_SIZE;
mem_info->num = num_txq;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
BNA_INTR_T_MSIX;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
}
struct bna_tx *
bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_config *tx_cfg,
const struct bna_tx_event_cbfn *tx_cbfn,
struct bna_res_info *res_info, void *priv)
{
struct bna_intr_info *intr_info;
struct bna_tx_mod *tx_mod = &bna->tx_mod;
struct bna_tx *tx;
struct bna_txq *txq;
struct list_head *qe;
int page_count;
int i;
intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
PAGE_SIZE;
/**
* Get resources
*/
if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
return NULL;
/* Tx */
tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
if (!tx)
return NULL;
tx->bna = bna;
tx->priv = priv;
/* TxQs */
INIT_LIST_HEAD(&tx->txq_q);
for (i = 0; i < tx_cfg->num_txq; i++) {
if (list_empty(&tx_mod->txq_free_q))
goto err_return;
bfa_q_deq(&tx_mod->txq_free_q, &txq);
bfa_q_qe_init(&txq->qe);
list_add_tail(&txq->qe, &tx->txq_q);
txq->tx = tx;
}
/*
* Initialize
*/
/* Tx */
tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
/* Following callbacks are mandatory */
tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
list_add_tail(&tx->qe, &tx_mod->tx_active_q);
tx->num_txq = tx_cfg->num_txq;
tx->flags = 0;
if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
switch (tx->type) {
case BNA_TX_T_REGULAR:
if (!(tx->bna->tx_mod.flags &
BNA_TX_MOD_F_ENET_LOOPBACK))
tx->flags |= BNA_TX_F_ENET_STARTED;
break;
case BNA_TX_T_LOOPBACK:
if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
tx->flags |= BNA_TX_F_ENET_STARTED;
break;
}
}
/* TxQ */
i = 0;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
txq->tcb = (struct bna_tcb *)
res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
txq->tx_packets = 0;
txq->tx_bytes = 0;
/* IB */
txq->ib.ib_seg_host_addr.lsb =
res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
txq->ib.ib_seg_host_addr.msb =
res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
txq->ib.ib_seg_host_addr_kva =
res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
txq->ib.intr_type = intr_info->intr_type;
txq->ib.intr_vector = (intr_info->num == 1) ?
intr_info->idl[0].vector :
intr_info->idl[i].vector;
if (intr_info->intr_type == BNA_INTR_T_INTX)
txq->ib.intr_vector = (1 << txq->ib.intr_vector);
txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
/* TCB */
txq->tcb->q_depth = tx_cfg->txq_depth;
txq->tcb->unmap_q = (void *)
res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
txq->tcb->hw_consumer_index =
(u32 *)txq->ib.ib_seg_host_addr_kva;
txq->tcb->i_dbell = &txq->ib.door_bell;
txq->tcb->intr_type = txq->ib.intr_type;
txq->tcb->intr_vector = txq->ib.intr_vector;
txq->tcb->txq = txq;
txq->tcb->bnad = bnad;
txq->tcb->id = i;
/* QPT, SWQPT, Pages */
bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
&res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
&res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
&res_info[BNA_TX_RES_MEM_T_PAGE].
res_u.mem_info.mdl[i]);
/* Callback to bnad for setting up TCB */
if (tx->tcb_setup_cbfn)
(tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
txq->priority = txq->tcb->id;
else
txq->priority = tx_mod->default_prio;
i++;
}
tx->txf_vlan_id = 0;
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
tx_mod->rid_mask |= (1 << tx->rid);
return tx;
err_return:
bna_tx_free(tx);
return NULL;
}
void
bna_tx_destroy(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
if (tx->tcb_destroy_cbfn)
(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
}
tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
bna_tx_free(tx);
}
void
bna_tx_enable(struct bna_tx *tx)
{
if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
return;
tx->flags |= BNA_TX_F_ENABLED;
if (tx->flags & BNA_TX_F_ENET_STARTED)
bfa_fsm_send_event(tx, TX_E_START);
}
void
bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
void (*cbfn)(void *, struct bna_tx *))
{
if (type == BNA_SOFT_CLEANUP) {
(*cbfn)(tx->bna->bnad, tx);
return;
}
tx->stop_cbfn = cbfn;
tx->stop_cbarg = tx->bna->bnad;
tx->flags &= ~BNA_TX_F_ENABLED;
bfa_fsm_send_event(tx, TX_E_STOP);
}
void
bna_tx_cleanup_complete(struct bna_tx *tx)
{
bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
}
static void
bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
bfa_wc_down(&tx_mod->tx_stop_wc);
}
static void
bna_tx_mod_cb_tx_stopped_all(void *arg)
{
struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
if (tx_mod->stop_cbfn)
tx_mod->stop_cbfn(&tx_mod->bna->enet);
tx_mod->stop_cbfn = NULL;
}
void
bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
struct bna_res_info *res_info)
{
int i;
tx_mod->bna = bna;
tx_mod->flags = 0;
tx_mod->tx = (struct bna_tx *)
res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
tx_mod->txq = (struct bna_txq *)
res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&tx_mod->tx_free_q);
INIT_LIST_HEAD(&tx_mod->tx_active_q);
INIT_LIST_HEAD(&tx_mod->txq_free_q);
for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
tx_mod->tx[i].rid = i;
bfa_q_qe_init(&tx_mod->tx[i].qe);
list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
bfa_q_qe_init(&tx_mod->txq[i].qe);
list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
}
tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
tx_mod->default_prio = 0;
tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
tx_mod->iscsi_prio = -1;
}
void
bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &tx_mod->tx_free_q)
i++;
i = 0;
list_for_each(qe, &tx_mod->txq_free_q)
i++;
tx_mod->bna = NULL;
}
void
bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
if (type == BNA_TX_T_LOOPBACK)
tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
if (tx->type == type)
bna_tx_start(tx);
}
}
void
bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
if (tx->type == type) {
bfa_wc_up(&tx_mod->tx_stop_wc);
bna_tx_stop(tx);
}
}
bfa_wc_wait(&tx_mod->tx_stop_wc);
}
void
bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
bna_tx_fail(tx);
}
}
void
bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
}
}
| gpl-2.0 |
teleofis/OpenWRT | DLpatch/linux-3.18.29/drivers/staging/lustre/lnet/lnet/config.c | 537 | 26757 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_LNET
#include "../../include/linux/lnet/lib-lnet.h"
typedef struct { /* tmp struct for parsing routes */
struct list_head ltb_list; /* stash on lists */
int ltb_size; /* allocated size */
char ltb_text[0]; /* text buffer */
} lnet_text_buf_t;
static int lnet_tbnob; /* track text buf allocation */
#define LNET_MAX_TEXTBUF_NOB (64<<10) /* bound allocation */
#define LNET_SINGLE_TEXTBUF_NOB (4<<10)
static void
lnet_syntax(char *name, char *str, int offset, int width)
{
static char dots[LNET_SINGLE_TEXTBUF_NOB];
static char dashes[LNET_SINGLE_TEXTBUF_NOB];
memset(dots, '.', sizeof(dots));
dots[sizeof(dots)-1] = 0;
memset(dashes, '-', sizeof(dashes));
dashes[sizeof(dashes)-1] = 0;
LCONSOLE_ERROR_MSG(0x10f, "Error parsing '%s=\"%s\"'\n", name, str);
LCONSOLE_ERROR_MSG(0x110, "here...........%.*s..%.*s|%.*s|\n",
(int)strlen(name), dots, offset, dots,
(width < 1) ? 0 : width - 1, dashes);
}
static int
lnet_issep(char c)
{
switch (c) {
case '\n':
case '\r':
case ';':
return 1;
default:
return 0;
}
}
static int
lnet_net_unique(__u32 net, struct list_head *nilist)
{
struct list_head *tmp;
lnet_ni_t *ni;
list_for_each(tmp, nilist) {
ni = list_entry(tmp, lnet_ni_t, ni_list);
if (LNET_NIDNET(ni->ni_nid) == net)
return 0;
}
return 1;
}
void
lnet_ni_free(struct lnet_ni *ni)
{
if (ni->ni_refs != NULL)
cfs_percpt_free(ni->ni_refs);
if (ni->ni_tx_queues != NULL)
cfs_percpt_free(ni->ni_tx_queues);
if (ni->ni_cpts != NULL)
cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
LIBCFS_FREE(ni, sizeof(*ni));
}
static lnet_ni_t *
lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist)
{
struct lnet_tx_queue *tq;
struct lnet_ni *ni;
int rc;
int i;
if (!lnet_net_unique(net, nilist)) {
LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n",
libcfs_net2str(net));
return NULL;
}
LIBCFS_ALLOC(ni, sizeof(*ni));
if (ni == NULL) {
CERROR("Out of memory creating network %s\n",
libcfs_net2str(net));
return NULL;
}
spin_lock_init(&ni->ni_lock);
INIT_LIST_HEAD(&ni->ni_cptlist);
ni->ni_refs = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ni->ni_refs[0]));
if (ni->ni_refs == NULL)
goto failed;
ni->ni_tx_queues = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*ni->ni_tx_queues[0]));
if (ni->ni_tx_queues == NULL)
goto failed;
cfs_percpt_for_each(tq, i, ni->ni_tx_queues)
INIT_LIST_HEAD(&tq->tq_delayed);
if (el == NULL) {
ni->ni_cpts = NULL;
ni->ni_ncpts = LNET_CPT_NUMBER;
} else {
rc = cfs_expr_list_values(el, LNET_CPT_NUMBER, &ni->ni_cpts);
if (rc <= 0) {
CERROR("Failed to set CPTs for NI %s: %d\n",
libcfs_net2str(net), rc);
goto failed;
}
LASSERT(rc <= LNET_CPT_NUMBER);
if (rc == LNET_CPT_NUMBER) {
LIBCFS_FREE(ni->ni_cpts, rc * sizeof(ni->ni_cpts[0]));
ni->ni_cpts = NULL;
}
ni->ni_ncpts = rc;
}
/* LND will fill in the address part of the NID */
ni->ni_nid = LNET_MKNID(net, 0);
ni->ni_last_alive = get_seconds();
list_add_tail(&ni->ni_list, nilist);
return ni;
failed:
lnet_ni_free(ni);
return NULL;
}
int
lnet_parse_networks(struct list_head *nilist, char *networks)
{
struct cfs_expr_list *el = NULL;
int tokensize = strlen(networks) + 1;
char *tokens;
char *str;
char *tmp;
struct lnet_ni *ni;
__u32 net;
int nnets = 0;
if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) {
/* _WAY_ conservative */
LCONSOLE_ERROR_MSG(0x112,
"Can't parse networks: string too long\n");
return -EINVAL;
}
LIBCFS_ALLOC(tokens, tokensize);
if (tokens == NULL) {
CERROR("Can't allocate net tokens\n");
return -ENOMEM;
}
the_lnet.ln_network_tokens = tokens;
the_lnet.ln_network_tokens_nob = tokensize;
memcpy(tokens, networks, tokensize);
str = tmp = tokens;
/* Add in the loopback network */
ni = lnet_ni_alloc(LNET_MKNET(LOLND, 0), NULL, nilist);
if (ni == NULL)
goto failed;
while (str != NULL && *str != 0) {
char *comma = strchr(str, ',');
char *bracket = strchr(str, '(');
char *square = strchr(str, '[');
char *iface;
int niface;
int rc;
/* NB we don't check interface conflicts here; it's the LNDs
* responsibility (if it cares at all) */
if (square != NULL && (comma == NULL || square < comma)) {
/* i.e: o2ib0(ib0)[1,2], number between square
* brackets are CPTs this NI needs to be bond */
if (bracket != NULL && bracket > square) {
tmp = square;
goto failed_syntax;
}
tmp = strchr(square, ']');
if (tmp == NULL) {
tmp = square;
goto failed_syntax;
}
rc = cfs_expr_list_parse(square, tmp - square + 1,
0, LNET_CPT_NUMBER - 1, &el);
if (rc != 0) {
tmp = square;
goto failed_syntax;
}
while (square <= tmp)
*square++ = ' ';
}
if (bracket == NULL ||
(comma != NULL && comma < bracket)) {
/* no interface list specified */
if (comma != NULL)
*comma++ = 0;
net = libcfs_str2net(cfs_trimwhite(str));
if (net == LNET_NIDNET(LNET_NID_ANY)) {
LCONSOLE_ERROR_MSG(0x113,
"Unrecognised network type\n");
tmp = str;
goto failed_syntax;
}
if (LNET_NETTYP(net) != LOLND && /* LO is implicit */
lnet_ni_alloc(net, el, nilist) == NULL)
goto failed;
if (el != NULL) {
cfs_expr_list_free(el);
el = NULL;
}
str = comma;
continue;
}
*bracket = 0;
net = libcfs_str2net(cfs_trimwhite(str));
if (net == LNET_NIDNET(LNET_NID_ANY)) {
tmp = str;
goto failed_syntax;
}
nnets++;
ni = lnet_ni_alloc(net, el, nilist);
if (ni == NULL)
goto failed;
if (el != NULL) {
cfs_expr_list_free(el);
el = NULL;
}
niface = 0;
iface = bracket + 1;
bracket = strchr(iface, ')');
if (bracket == NULL) {
tmp = iface;
goto failed_syntax;
}
*bracket = 0;
do {
comma = strchr(iface, ',');
if (comma != NULL)
*comma++ = 0;
iface = cfs_trimwhite(iface);
if (*iface == 0) {
tmp = iface;
goto failed_syntax;
}
if (niface == LNET_MAX_INTERFACES) {
LCONSOLE_ERROR_MSG(0x115,
"Too many interfaces for net %s\n",
libcfs_net2str(net));
goto failed;
}
ni->ni_interfaces[niface++] = iface;
iface = comma;
} while (iface != NULL);
str = bracket + 1;
comma = strchr(bracket + 1, ',');
if (comma != NULL) {
*comma = 0;
str = cfs_trimwhite(str);
if (*str != 0) {
tmp = str;
goto failed_syntax;
}
str = comma + 1;
continue;
}
str = cfs_trimwhite(str);
if (*str != 0) {
tmp = str;
goto failed_syntax;
}
}
LASSERT(!list_empty(nilist));
return 0;
failed_syntax:
lnet_syntax("networks", networks, (int)(tmp - tokens), strlen(tmp));
failed:
while (!list_empty(nilist)) {
ni = list_entry(nilist->next, lnet_ni_t, ni_list);
list_del(&ni->ni_list);
lnet_ni_free(ni);
}
if (el != NULL)
cfs_expr_list_free(el);
LIBCFS_FREE(tokens, tokensize);
the_lnet.ln_network_tokens = NULL;
return -EINVAL;
}
static lnet_text_buf_t *
lnet_new_text_buf(int str_len)
{
lnet_text_buf_t *ltb;
int nob;
/* NB allocate space for the terminating 0 */
nob = offsetof(lnet_text_buf_t, ltb_text[str_len + 1]);
if (nob > LNET_SINGLE_TEXTBUF_NOB) {
/* _way_ conservative for "route net gateway..." */
CERROR("text buffer too big\n");
return NULL;
}
if (lnet_tbnob + nob > LNET_MAX_TEXTBUF_NOB) {
CERROR("Too many text buffers\n");
return NULL;
}
LIBCFS_ALLOC(ltb, nob);
if (ltb == NULL)
return NULL;
ltb->ltb_size = nob;
ltb->ltb_text[0] = 0;
lnet_tbnob += nob;
return ltb;
}
static void
lnet_free_text_buf(lnet_text_buf_t *ltb)
{
lnet_tbnob -= ltb->ltb_size;
LIBCFS_FREE(ltb, ltb->ltb_size);
}
static void
lnet_free_text_bufs(struct list_head *tbs)
{
lnet_text_buf_t *ltb;
while (!list_empty(tbs)) {
ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
}
}
static int
lnet_str2tbs_sep(struct list_head *tbs, char *str)
{
struct list_head pending;
char *sep;
int nob;
int i;
lnet_text_buf_t *ltb;
INIT_LIST_HEAD(&pending);
/* Split 'str' into separate commands */
for (;;) {
/* skip leading whitespace */
while (isspace(*str))
str++;
/* scan for separator or comment */
for (sep = str; *sep != 0; sep++)
if (lnet_issep(*sep) || *sep == '#')
break;
nob = (int)(sep - str);
if (nob > 0) {
ltb = lnet_new_text_buf(nob);
if (ltb == NULL) {
lnet_free_text_bufs(&pending);
return -1;
}
for (i = 0; i < nob; i++)
if (isspace(str[i]))
ltb->ltb_text[i] = ' ';
else
ltb->ltb_text[i] = str[i];
ltb->ltb_text[nob] = 0;
list_add_tail(<b->ltb_list, &pending);
}
if (*sep == '#') {
/* scan for separator */
do {
sep++;
} while (*sep != 0 && !lnet_issep(*sep));
}
if (*sep == 0)
break;
str = sep + 1;
}
list_splice(&pending, tbs->prev);
return 0;
}
static int
lnet_expand1tb(struct list_head *list,
char *str, char *sep1, char *sep2,
char *item, int itemlen)
{
int len1 = (int)(sep1 - str);
int len2 = strlen(sep2 + 1);
lnet_text_buf_t *ltb;
LASSERT(*sep1 == '[');
LASSERT(*sep2 == ']');
ltb = lnet_new_text_buf(len1 + itemlen + len2);
if (ltb == NULL)
return -ENOMEM;
memcpy(ltb->ltb_text, str, len1);
memcpy(<b->ltb_text[len1], item, itemlen);
memcpy(<b->ltb_text[len1+itemlen], sep2 + 1, len2);
ltb->ltb_text[len1 + itemlen + len2] = 0;
list_add_tail(<b->ltb_list, list);
return 0;
}
static int
lnet_str2tbs_expand(struct list_head *tbs, char *str)
{
char num[16];
struct list_head pending;
char *sep;
char *sep2;
char *parsed;
char *enditem;
int lo;
int hi;
int stride;
int i;
int nob;
int scanned;
INIT_LIST_HEAD(&pending);
sep = strchr(str, '[');
if (sep == NULL) /* nothing to expand */
return 0;
sep2 = strchr(sep, ']');
if (sep2 == NULL)
goto failed;
for (parsed = sep; parsed < sep2; parsed = enditem) {
enditem = ++parsed;
while (enditem < sep2 && *enditem != ',')
enditem++;
if (enditem == parsed) /* no empty items */
goto failed;
if (sscanf(parsed, "%d-%d/%d%n", &lo, &hi, &stride, &scanned) < 3) {
if (sscanf(parsed, "%d-%d%n", &lo, &hi, &scanned) < 2) {
/* simple string enumeration */
if (lnet_expand1tb(&pending, str, sep, sep2,
parsed, (int)(enditem - parsed)) != 0)
goto failed;
continue;
}
stride = 1;
}
/* range expansion */
if (enditem != parsed + scanned) /* no trailing junk */
goto failed;
if (hi < 0 || lo < 0 || stride < 0 || hi < lo ||
(hi - lo) % stride != 0)
goto failed;
for (i = lo; i <= hi; i += stride) {
snprintf(num, sizeof(num), "%d", i);
nob = strlen(num);
if (nob + 1 == sizeof(num))
goto failed;
if (lnet_expand1tb(&pending, str, sep, sep2,
num, nob) != 0)
goto failed;
}
}
list_splice(&pending, tbs->prev);
return 1;
failed:
lnet_free_text_bufs(&pending);
return -1;
}
static int
lnet_parse_hops(char *str, unsigned int *hops)
{
int len = strlen(str);
int nob = len;
return (sscanf(str, "%u%n", hops, &nob) >= 1 &&
nob == len &&
*hops > 0 && *hops < 256);
}
#define LNET_PRIORITY_SEPARATOR (':')
static int
lnet_parse_priority(char *str, unsigned int *priority, char **token)
{
int nob;
char *sep;
int len;
sep = strchr(str, LNET_PRIORITY_SEPARATOR);
if (sep == NULL) {
*priority = 0;
return 0;
}
len = strlen(sep + 1);
if ((sscanf((sep+1), "%u%n", priority, &nob) < 1) || (len != nob)) {
/* Update the caller's token pointer so it treats the found
priority as the token to report in the error message. */
*token += sep - str + 1;
return -1;
}
CDEBUG(D_NET, "gateway %s, priority %d, nob %d\n", str, *priority, nob);
/*
* Change priority separator to \0 to be able to parse NID
*/
*sep = '\0';
return 0;
}
static int
lnet_parse_route(char *str, int *im_a_router)
{
/* static scratch buffer OK (single threaded) */
static char cmd[LNET_SINGLE_TEXTBUF_NOB];
struct list_head nets;
struct list_head gateways;
struct list_head *tmp1;
struct list_head *tmp2;
__u32 net;
lnet_nid_t nid;
lnet_text_buf_t *ltb;
int rc;
char *sep;
char *token = str;
int ntokens = 0;
int myrc = -1;
unsigned int hops;
int got_hops = 0;
unsigned int priority = 0;
INIT_LIST_HEAD(&gateways);
INIT_LIST_HEAD(&nets);
/* save a copy of the string for error messages */
strncpy(cmd, str, sizeof(cmd) - 1);
cmd[sizeof(cmd) - 1] = 0;
sep = str;
for (;;) {
/* scan for token start */
while (isspace(*sep))
sep++;
if (*sep == 0) {
if (ntokens < (got_hops ? 3 : 2))
goto token_error;
break;
}
ntokens++;
token = sep++;
/* scan for token end */
while (*sep != 0 && !isspace(*sep))
sep++;
if (*sep != 0)
*sep++ = 0;
if (ntokens == 1) {
tmp2 = &nets; /* expanding nets */
} else if (ntokens == 2 &&
lnet_parse_hops(token, &hops)) {
got_hops = 1; /* got a hop count */
continue;
} else {
tmp2 = &gateways; /* expanding gateways */
}
ltb = lnet_new_text_buf(strlen(token));
if (ltb == NULL)
goto out;
strcpy(ltb->ltb_text, token);
tmp1 = <b->ltb_list;
list_add_tail(tmp1, tmp2);
while (tmp1 != tmp2) {
ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
rc = lnet_str2tbs_expand(tmp1->next, ltb->ltb_text);
if (rc < 0)
goto token_error;
tmp1 = tmp1->next;
if (rc > 0) { /* expanded! */
list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
continue;
}
if (ntokens == 1) {
net = libcfs_str2net(ltb->ltb_text);
if (net == LNET_NIDNET(LNET_NID_ANY) ||
LNET_NETTYP(net) == LOLND)
goto token_error;
} else {
rc = lnet_parse_priority(ltb->ltb_text,
&priority, &token);
if (rc < 0)
goto token_error;
nid = libcfs_str2nid(ltb->ltb_text);
if (nid == LNET_NID_ANY ||
LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
goto token_error;
}
}
}
if (!got_hops)
hops = 1;
LASSERT(!list_empty(&nets));
LASSERT(!list_empty(&gateways));
list_for_each(tmp1, &nets) {
ltb = list_entry(tmp1, lnet_text_buf_t, ltb_list);
net = libcfs_str2net(ltb->ltb_text);
LASSERT(net != LNET_NIDNET(LNET_NID_ANY));
list_for_each(tmp2, &gateways) {
ltb = list_entry(tmp2, lnet_text_buf_t, ltb_list);
nid = libcfs_str2nid(ltb->ltb_text);
LASSERT(nid != LNET_NID_ANY);
if (lnet_islocalnid(nid)) {
*im_a_router = 1;
continue;
}
rc = lnet_add_route(net, hops, nid, priority);
if (rc != 0) {
CERROR("Can't create route to %s via %s\n",
libcfs_net2str(net),
libcfs_nid2str(nid));
goto out;
}
}
}
myrc = 0;
goto out;
token_error:
lnet_syntax("routes", cmd, (int)(token - str), strlen(token));
out:
lnet_free_text_bufs(&nets);
lnet_free_text_bufs(&gateways);
return myrc;
}
static int
lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router)
{
lnet_text_buf_t *ltb;
while (!list_empty(tbs)) {
ltb = list_entry(tbs->next, lnet_text_buf_t, ltb_list);
if (lnet_parse_route(ltb->ltb_text, im_a_router) < 0) {
lnet_free_text_bufs(tbs);
return -EINVAL;
}
list_del(<b->ltb_list);
lnet_free_text_buf(ltb);
}
return 0;
}
int
lnet_parse_routes(char *routes, int *im_a_router)
{
struct list_head tbs;
int rc = 0;
*im_a_router = 0;
INIT_LIST_HEAD(&tbs);
if (lnet_str2tbs_sep(&tbs, routes) < 0) {
CERROR("Error parsing routes\n");
rc = -EINVAL;
} else {
rc = lnet_parse_route_tbs(&tbs, im_a_router);
}
LASSERT(lnet_tbnob == 0);
return rc;
}
static int
lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip)
{
LIST_HEAD(list);
int rc;
int i;
rc = cfs_ip_addr_parse(token, len, &list);
if (rc != 0)
return rc;
for (rc = i = 0; !rc && i < nip; i++)
rc = cfs_ip_addr_match(ipaddrs[i], &list);
cfs_ip_addr_free(&list);
return rc;
}
static int
lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip)
{
static char tokens[LNET_SINGLE_TEXTBUF_NOB];
int matched = 0;
int ntokens = 0;
int len;
char *net = NULL;
char *sep;
char *token;
int rc;
LASSERT(strlen(net_entry) < sizeof(tokens));
/* work on a copy of the string */
strcpy(tokens, net_entry);
sep = tokens;
for (;;) {
/* scan for token start */
while (isspace(*sep))
sep++;
if (*sep == 0)
break;
token = sep++;
/* scan for token end */
while (*sep != 0 && !isspace(*sep))
sep++;
if (*sep != 0)
*sep++ = 0;
if (ntokens++ == 0) {
net = token;
continue;
}
len = strlen(token);
rc = lnet_match_network_token(token, len, ipaddrs, nip);
if (rc < 0) {
lnet_syntax("ip2nets", net_entry,
(int)(token - tokens), len);
return rc;
}
matched |= (rc != 0);
}
if (!matched)
return 0;
strcpy(net_entry, net); /* replace with matched net */
return 1;
}
static __u32
lnet_netspec2net(char *netspec)
{
char *bracket = strchr(netspec, '(');
__u32 net;
if (bracket != NULL)
*bracket = 0;
net = libcfs_str2net(netspec);
if (bracket != NULL)
*bracket = '(';
return net;
}
static int
lnet_splitnets(char *source, struct list_head *nets)
{
int offset = 0;
int offset2;
int len;
lnet_text_buf_t *tb;
lnet_text_buf_t *tb2;
struct list_head *t;
char *sep;
char *bracket;
__u32 net;
LASSERT(!list_empty(nets));
LASSERT(nets->next == nets->prev); /* single entry */
tb = list_entry(nets->next, lnet_text_buf_t, ltb_list);
for (;;) {
sep = strchr(tb->ltb_text, ',');
bracket = strchr(tb->ltb_text, '(');
if (sep != NULL &&
bracket != NULL &&
bracket < sep) {
/* netspec lists interfaces... */
offset2 = offset + (int)(bracket - tb->ltb_text);
len = strlen(bracket);
bracket = strchr(bracket + 1, ')');
if (bracket == NULL ||
!(bracket[1] == ',' || bracket[1] == 0)) {
lnet_syntax("ip2nets", source, offset2, len);
return -EINVAL;
}
sep = (bracket[1] == 0) ? NULL : bracket + 1;
}
if (sep != NULL)
*sep++ = 0;
net = lnet_netspec2net(tb->ltb_text);
if (net == LNET_NIDNET(LNET_NID_ANY)) {
lnet_syntax("ip2nets", source, offset,
strlen(tb->ltb_text));
return -EINVAL;
}
list_for_each(t, nets) {
tb2 = list_entry(t, lnet_text_buf_t, ltb_list);
if (tb2 == tb)
continue;
if (net == lnet_netspec2net(tb2->ltb_text)) {
/* duplicate network */
lnet_syntax("ip2nets", source, offset,
strlen(tb->ltb_text));
return -EINVAL;
}
}
if (sep == NULL)
return 0;
offset += (int)(sep - tb->ltb_text);
tb2 = lnet_new_text_buf(strlen(sep));
if (tb2 == NULL)
return -ENOMEM;
strcpy(tb2->ltb_text, sep);
list_add_tail(&tb2->ltb_list, nets);
tb = tb2;
}
}
static int
lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
{
static char networks[LNET_SINGLE_TEXTBUF_NOB];
static char source[LNET_SINGLE_TEXTBUF_NOB];
struct list_head raw_entries;
struct list_head matched_nets;
struct list_head current_nets;
struct list_head *t;
struct list_head *t2;
lnet_text_buf_t *tb;
lnet_text_buf_t *tb2;
__u32 net1;
__u32 net2;
int len;
int count;
int dup;
int rc;
INIT_LIST_HEAD(&raw_entries);
if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) {
CERROR("Error parsing ip2nets\n");
LASSERT(lnet_tbnob == 0);
return -EINVAL;
}
INIT_LIST_HEAD(&matched_nets);
INIT_LIST_HEAD(¤t_nets);
networks[0] = 0;
count = 0;
len = 0;
rc = 0;
while (!list_empty(&raw_entries)) {
tb = list_entry(raw_entries.next, lnet_text_buf_t,
ltb_list);
strncpy(source, tb->ltb_text, sizeof(source)-1);
source[sizeof(source)-1] = 0;
/* replace ltb_text with the network(s) add on match */
rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip);
if (rc < 0)
break;
list_del(&tb->ltb_list);
if (rc == 0) { /* no match */
lnet_free_text_buf(tb);
continue;
}
/* split into separate networks */
INIT_LIST_HEAD(¤t_nets);
list_add(&tb->ltb_list, ¤t_nets);
rc = lnet_splitnets(source, ¤t_nets);
if (rc < 0)
break;
dup = 0;
list_for_each(t, ¤t_nets) {
tb = list_entry(t, lnet_text_buf_t, ltb_list);
net1 = lnet_netspec2net(tb->ltb_text);
LASSERT(net1 != LNET_NIDNET(LNET_NID_ANY));
list_for_each(t2, &matched_nets) {
tb2 = list_entry(t2, lnet_text_buf_t,
ltb_list);
net2 = lnet_netspec2net(tb2->ltb_text);
LASSERT(net2 != LNET_NIDNET(LNET_NID_ANY));
if (net1 == net2) {
dup = 1;
break;
}
}
if (dup)
break;
}
if (dup) {
lnet_free_text_bufs(¤t_nets);
continue;
}
list_for_each_safe(t, t2, ¤t_nets) {
tb = list_entry(t, lnet_text_buf_t, ltb_list);
list_del(&tb->ltb_list);
list_add_tail(&tb->ltb_list, &matched_nets);
len += snprintf(networks + len, sizeof(networks) - len,
"%s%s", (len == 0) ? "" : ",",
tb->ltb_text);
if (len >= sizeof(networks)) {
CERROR("Too many matched networks\n");
rc = -E2BIG;
goto out;
}
}
count++;
}
out:
lnet_free_text_bufs(&raw_entries);
lnet_free_text_bufs(&matched_nets);
lnet_free_text_bufs(¤t_nets);
LASSERT(lnet_tbnob == 0);
if (rc < 0)
return rc;
*networksp = networks;
return count;
}
static void
lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip)
{
LIBCFS_FREE(ipaddrs, nip * sizeof(*ipaddrs));
}
static int
lnet_ipaddr_enumerate(__u32 **ipaddrsp)
{
int up;
__u32 netmask;
__u32 *ipaddrs;
__u32 *ipaddrs2;
int nip;
char **ifnames;
int nif = libcfs_ipif_enumerate(&ifnames);
int i;
int rc;
if (nif <= 0)
return nif;
LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs));
if (ipaddrs == NULL) {
CERROR("Can't allocate ipaddrs[%d]\n", nif);
libcfs_ipif_free_enumeration(ifnames, nif);
return -ENOMEM;
}
for (i = nip = 0; i < nif; i++) {
if (!strcmp(ifnames[i], "lo"))
continue;
rc = libcfs_ipif_query(ifnames[i], &up,
&ipaddrs[nip], &netmask);
if (rc != 0) {
CWARN("Can't query interface %s: %d\n",
ifnames[i], rc);
continue;
}
if (!up) {
CWARN("Ignoring interface %s: it's down\n",
ifnames[i]);
continue;
}
nip++;
}
libcfs_ipif_free_enumeration(ifnames, nif);
if (nip == nif) {
*ipaddrsp = ipaddrs;
} else {
if (nip > 0) {
LIBCFS_ALLOC(ipaddrs2, nip * sizeof(*ipaddrs2));
if (ipaddrs2 == NULL) {
CERROR("Can't allocate ipaddrs[%d]\n", nip);
nip = -ENOMEM;
} else {
memcpy(ipaddrs2, ipaddrs,
nip * sizeof(*ipaddrs));
*ipaddrsp = ipaddrs2;
rc = nip;
}
}
lnet_ipaddr_free_enumeration(ipaddrs, nif);
}
return nip;
}
int
lnet_parse_ip2nets(char **networksp, char *ip2nets)
{
__u32 *ipaddrs = NULL;
int nip = lnet_ipaddr_enumerate(&ipaddrs);
int rc;
if (nip < 0) {
LCONSOLE_ERROR_MSG(0x117,
"Error %d enumerating local IP interfaces for ip2nets to match\n",
nip);
return nip;
}
if (nip == 0) {
LCONSOLE_ERROR_MSG(0x118,
"No local IP interfaces for ip2nets to match\n");
return -ENOENT;
}
rc = lnet_match_networks(networksp, ip2nets, ipaddrs, nip);
lnet_ipaddr_free_enumeration(ipaddrs, nip);
if (rc < 0) {
LCONSOLE_ERROR_MSG(0x119, "Error %d parsing ip2nets\n", rc);
return rc;
}
if (rc == 0) {
LCONSOLE_ERROR_MSG(0x11a,
"ip2nets does not match any local IP interfaces\n");
return -ENOENT;
}
return 0;
}
int
lnet_set_ip_niaddr(lnet_ni_t *ni)
{
__u32 net = LNET_NIDNET(ni->ni_nid);
char **names;
int n;
__u32 ip;
__u32 netmask;
int up;
int i;
int rc;
/* Convenience for LNDs that use the IP address of a local interface as
* the local address part of their NID */
if (ni->ni_interfaces[0] != NULL) {
CLASSERT(LNET_MAX_INTERFACES > 1);
if (ni->ni_interfaces[1] != NULL) {
CERROR("Net %s doesn't support multiple interfaces\n",
libcfs_net2str(net));
return -EPERM;
}
rc = libcfs_ipif_query(ni->ni_interfaces[0],
&up, &ip, &netmask);
if (rc != 0) {
CERROR("Net %s can't query interface %s: %d\n",
libcfs_net2str(net), ni->ni_interfaces[0], rc);
return -EPERM;
}
if (!up) {
CERROR("Net %s can't use interface %s: it's down\n",
libcfs_net2str(net), ni->ni_interfaces[0]);
return -ENETDOWN;
}
ni->ni_nid = LNET_MKNID(net, ip);
return 0;
}
n = libcfs_ipif_enumerate(&names);
if (n <= 0) {
CERROR("Net %s can't enumerate interfaces: %d\n",
libcfs_net2str(net), n);
return 0;
}
for (i = 0; i < n; i++) {
if (!strcmp(names[i], "lo")) /* skip the loopback IF */
continue;
rc = libcfs_ipif_query(names[i], &up, &ip, &netmask);
if (rc != 0) {
CWARN("Net %s can't query interface %s: %d\n",
libcfs_net2str(net), names[i], rc);
continue;
}
if (!up) {
CWARN("Net %s ignoring interface %s (down)\n",
libcfs_net2str(net), names[i]);
continue;
}
libcfs_ipif_free_enumeration(names, n);
ni->ni_nid = LNET_MKNID(net, ip);
return 0;
}
CERROR("Net %s can't find any interfaces\n", libcfs_net2str(net));
libcfs_ipif_free_enumeration(names, n);
return -ENOENT;
}
EXPORT_SYMBOL(lnet_set_ip_niaddr);
| gpl-2.0 |
MinimalOS-AOSP/kernel_huawei_angler | drivers/pinctrl/pinctrl-coh901.c | 2073 | 23572 | /*
* U300 GPIO module.
*
* Copyright (C) 2007-2012 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* COH 901 571/3 - Used in DB3210 (U365 2.0) and DB3350 (U335 1.0)
* Author: Linus Walleij <linus.walleij@linaro.org>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_data/pinctrl-coh901.h>
#include "pinctrl-coh901.h"
#define U300_GPIO_PORT_STRIDE (0x30)
/*
* Control Register 32bit (R/W)
* bit 15-9 (mask 0x0000FE00) contains the number of cores. 8*cores
* gives the number of GPIO pins.
* bit 8-2 (mask 0x000001FC) contains the core version ID.
*/
#define U300_GPIO_CR (0x00)
#define U300_GPIO_CR_SYNC_SEL_ENABLE (0x00000002UL)
#define U300_GPIO_CR_BLOCK_CLKRQ_ENABLE (0x00000001UL)
#define U300_GPIO_PXPDIR (0x04)
#define U300_GPIO_PXPDOR (0x08)
#define U300_GPIO_PXPCR (0x0C)
#define U300_GPIO_PXPCR_ALL_PINS_MODE_MASK (0x0000FFFFUL)
#define U300_GPIO_PXPCR_PIN_MODE_MASK (0x00000003UL)
#define U300_GPIO_PXPCR_PIN_MODE_SHIFT (0x00000002UL)
#define U300_GPIO_PXPCR_PIN_MODE_INPUT (0x00000000UL)
#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL (0x00000001UL)
#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN (0x00000002UL)
#define U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE (0x00000003UL)
#define U300_GPIO_PXPER (0x10)
#define U300_GPIO_PXPER_ALL_PULL_UP_DISABLE_MASK (0x000000FFUL)
#define U300_GPIO_PXPER_PULL_UP_DISABLE (0x00000001UL)
#define U300_GPIO_PXIEV (0x14)
#define U300_GPIO_PXIEN (0x18)
#define U300_GPIO_PXIFR (0x1C)
#define U300_GPIO_PXICR (0x20)
#define U300_GPIO_PXICR_ALL_IRQ_CONFIG_MASK (0x000000FFUL)
#define U300_GPIO_PXICR_IRQ_CONFIG_MASK (0x00000001UL)
#define U300_GPIO_PXICR_IRQ_CONFIG_FALLING_EDGE (0x00000000UL)
#define U300_GPIO_PXICR_IRQ_CONFIG_RISING_EDGE (0x00000001UL)
/* 8 bits per port, no version has more than 7 ports */
#define U300_GPIO_PINS_PER_PORT 8
#define U300_GPIO_MAX (U300_GPIO_PINS_PER_PORT * 7)
struct u300_gpio {
struct gpio_chip chip;
struct list_head port_list;
struct clk *clk;
void __iomem *base;
struct device *dev;
u32 stride;
/* Register offsets */
u32 pcr;
u32 dor;
u32 dir;
u32 per;
u32 icr;
u32 ien;
u32 iev;
};
struct u300_gpio_port {
struct list_head node;
struct u300_gpio *gpio;
char name[8];
struct irq_domain *domain;
int irq;
int number;
u8 toggle_edge_mode;
};
/*
* Macro to expand to read a specific register found in the "gpio"
* struct. It requires the struct u300_gpio *gpio variable to exist in
* its context. It calculates the port offset from the given pin
* offset, muliplies by the port stride and adds the register offset
* so it provides a pointer to the desired register.
*/
#define U300_PIN_REG(pin, reg) \
(gpio->base + (pin >> 3) * gpio->stride + gpio->reg)
/*
* Provides a bitmask for a specific gpio pin inside an 8-bit GPIO
* register.
*/
#define U300_PIN_BIT(pin) \
(1 << (pin & 0x07))
struct u300_gpio_confdata {
u16 bias_mode;
bool output;
int outval;
};
/* BS335 has seven ports of 8 bits each = GPIO pins 0..55 */
#define BS335_GPIO_NUM_PORTS 7
#define U300_FLOATING_INPUT { \
.bias_mode = PIN_CONFIG_BIAS_HIGH_IMPEDANCE, \
.output = false, \
}
#define U300_PULL_UP_INPUT { \
.bias_mode = PIN_CONFIG_BIAS_PULL_UP, \
.output = false, \
}
#define U300_OUTPUT_LOW { \
.output = true, \
.outval = 0, \
}
#define U300_OUTPUT_HIGH { \
.output = true, \
.outval = 1, \
}
/* Initial configuration */
static const struct __initconst u300_gpio_confdata
bs335_gpio_config[BS335_GPIO_NUM_PORTS][U300_GPIO_PINS_PER_PORT] = {
/* Port 0, pins 0-7 */
{
U300_FLOATING_INPUT,
U300_OUTPUT_HIGH,
U300_FLOATING_INPUT,
U300_OUTPUT_LOW,
U300_OUTPUT_LOW,
U300_OUTPUT_LOW,
U300_OUTPUT_LOW,
U300_OUTPUT_LOW,
},
/* Port 1, pins 0-7 */
{
U300_OUTPUT_LOW,
U300_OUTPUT_LOW,
U300_OUTPUT_LOW,
U300_PULL_UP_INPUT,
U300_FLOATING_INPUT,
U300_OUTPUT_HIGH,
U300_OUTPUT_LOW,
U300_OUTPUT_LOW,
},
/* Port 2, pins 0-7 */
{
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_OUTPUT_LOW,
U300_PULL_UP_INPUT,
U300_OUTPUT_LOW,
U300_PULL_UP_INPUT,
},
/* Port 3, pins 0-7 */
{
U300_PULL_UP_INPUT,
U300_OUTPUT_LOW,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
},
/* Port 4, pins 0-7 */
{
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
},
/* Port 5, pins 0-7 */
{
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
},
/* Port 6, pind 0-7 */
{
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
U300_FLOATING_INPUT,
}
};
/**
* to_u300_gpio() - get the pointer to u300_gpio
* @chip: the gpio chip member of the structure u300_gpio
*/
static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
{
return container_of(chip, struct u300_gpio, chip);
}
static int u300_gpio_request(struct gpio_chip *chip, unsigned offset)
{
/*
* Map back to global GPIO space and request muxing, the direction
* parameter does not matter for this controller.
*/
int gpio = chip->base + offset;
return pinctrl_request_gpio(gpio);
}
static void u300_gpio_free(struct gpio_chip *chip, unsigned offset)
{
int gpio = chip->base + offset;
pinctrl_free_gpio(gpio);
}
static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
return readl(U300_PIN_REG(offset, dir)) & U300_PIN_BIT(offset);
}
static void u300_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
unsigned long flags;
u32 val;
local_irq_save(flags);
val = readl(U300_PIN_REG(offset, dor));
if (value)
writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
else
writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, dor));
local_irq_restore(flags);
}
static int u300_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
unsigned long flags;
u32 val;
local_irq_save(flags);
val = readl(U300_PIN_REG(offset, pcr));
/* Mask out this pin, note 2 bits per setting */
val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK << ((offset & 0x07) << 1));
writel(val, U300_PIN_REG(offset, pcr));
local_irq_restore(flags);
return 0;
}
static int u300_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
unsigned long flags;
u32 oldmode;
u32 val;
local_irq_save(flags);
val = readl(U300_PIN_REG(offset, pcr));
/*
* Drive mode must be set by the special mode set function, set
* push/pull mode by default if no mode has been selected.
*/
oldmode = val & (U300_GPIO_PXPCR_PIN_MODE_MASK <<
((offset & 0x07) << 1));
/* mode = 0 means input, else some mode is already set */
if (oldmode == 0) {
val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK <<
((offset & 0x07) << 1));
val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
<< ((offset & 0x07) << 1));
writel(val, U300_PIN_REG(offset, pcr));
}
u300_gpio_set(chip, offset, value);
local_irq_restore(flags);
return 0;
}
static int u300_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
int portno = offset >> 3;
struct u300_gpio_port *port = NULL;
struct list_head *p;
int retirq;
bool found = false;
list_for_each(p, &gpio->port_list) {
port = list_entry(p, struct u300_gpio_port, node);
if (port->number == portno) {
found = true;
break;
}
}
if (!found) {
dev_err(gpio->dev, "could not locate port for GPIO %d IRQ\n",
offset);
return -EINVAL;
}
/*
* The local hwirqs on the port are the lower three bits, there
* are exactly 8 IRQs per port since they are 8-bit
*/
retirq = irq_find_mapping(port->domain, (offset & 0x7));
dev_dbg(gpio->dev, "request IRQ for GPIO %d, return %d from port %d\n",
offset, retirq, port->number);
return retirq;
}
/* Returning -EINVAL means "supported but not available" */
int u300_gpio_config_get(struct gpio_chip *chip,
unsigned offset,
unsigned long *config)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
enum pin_config_param param = (enum pin_config_param) *config;
bool biasmode;
u32 drmode;
/* One bit per pin, clamp to bool range */
biasmode = !!(readl(U300_PIN_REG(offset, per)) & U300_PIN_BIT(offset));
/* Mask out the two bits for this pin and shift to bits 0,1 */
drmode = readl(U300_PIN_REG(offset, pcr));
drmode &= (U300_GPIO_PXPCR_PIN_MODE_MASK << ((offset & 0x07) << 1));
drmode >>= ((offset & 0x07) << 1);
switch (param) {
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
*config = 0;
if (biasmode)
return 0;
else
return -EINVAL;
break;
case PIN_CONFIG_BIAS_PULL_UP:
*config = 0;
if (!biasmode)
return 0;
else
return -EINVAL;
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
*config = 0;
if (drmode == U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL)
return 0;
else
return -EINVAL;
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
*config = 0;
if (drmode == U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN)
return 0;
else
return -EINVAL;
break;
case PIN_CONFIG_DRIVE_OPEN_SOURCE:
*config = 0;
if (drmode == U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE)
return 0;
else
return -EINVAL;
break;
default:
break;
}
return -ENOTSUPP;
}
int u300_gpio_config_set(struct gpio_chip *chip, unsigned offset,
enum pin_config_param param)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
unsigned long flags;
u32 val;
local_irq_save(flags);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
val = readl(U300_PIN_REG(offset, per));
writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
break;
case PIN_CONFIG_BIAS_PULL_UP:
val = readl(U300_PIN_REG(offset, per));
writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, per));
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
val = readl(U300_PIN_REG(offset, pcr));
val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
<< ((offset & 0x07) << 1));
val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_PUSH_PULL
<< ((offset & 0x07) << 1));
writel(val, U300_PIN_REG(offset, pcr));
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
val = readl(U300_PIN_REG(offset, pcr));
val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
<< ((offset & 0x07) << 1));
val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_DRAIN
<< ((offset & 0x07) << 1));
writel(val, U300_PIN_REG(offset, pcr));
break;
case PIN_CONFIG_DRIVE_OPEN_SOURCE:
val = readl(U300_PIN_REG(offset, pcr));
val &= ~(U300_GPIO_PXPCR_PIN_MODE_MASK
<< ((offset & 0x07) << 1));
val |= (U300_GPIO_PXPCR_PIN_MODE_OUTPUT_OPEN_SOURCE
<< ((offset & 0x07) << 1));
writel(val, U300_PIN_REG(offset, pcr));
break;
default:
local_irq_restore(flags);
dev_err(gpio->dev, "illegal configuration requested\n");
return -EINVAL;
}
local_irq_restore(flags);
return 0;
}
static struct gpio_chip u300_gpio_chip = {
.label = "u300-gpio-chip",
.owner = THIS_MODULE,
.request = u300_gpio_request,
.free = u300_gpio_free,
.get = u300_gpio_get,
.set = u300_gpio_set,
.direction_input = u300_gpio_direction_input,
.direction_output = u300_gpio_direction_output,
.to_irq = u300_gpio_to_irq,
};
static void u300_toggle_trigger(struct u300_gpio *gpio, unsigned offset)
{
u32 val;
val = readl(U300_PIN_REG(offset, icr));
/* Set mode depending on state */
if (u300_gpio_get(&gpio->chip, offset)) {
/* High now, let's trigger on falling edge next then */
writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
dev_dbg(gpio->dev, "next IRQ on falling edge on pin %d\n",
offset);
} else {
/* Low now, let's trigger on rising edge next then */
writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
dev_dbg(gpio->dev, "next IRQ on rising edge on pin %d\n",
offset);
}
}
static int u300_gpio_irq_type(struct irq_data *d, unsigned trigger)
{
struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
struct u300_gpio *gpio = port->gpio;
int offset = (port->number << 3) + d->hwirq;
u32 val;
if ((trigger & IRQF_TRIGGER_RISING) &&
(trigger & IRQF_TRIGGER_FALLING)) {
/*
* The GPIO block can only trigger on falling OR rising edges,
* not both. So we need to toggle the mode whenever the pin
* goes from one state to the other with a special state flag
*/
dev_dbg(gpio->dev,
"trigger on both rising and falling edge on pin %d\n",
offset);
port->toggle_edge_mode |= U300_PIN_BIT(offset);
u300_toggle_trigger(gpio, offset);
} else if (trigger & IRQF_TRIGGER_RISING) {
dev_dbg(gpio->dev, "trigger on rising edge on pin %d\n",
offset);
val = readl(U300_PIN_REG(offset, icr));
writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
} else if (trigger & IRQF_TRIGGER_FALLING) {
dev_dbg(gpio->dev, "trigger on falling edge on pin %d\n",
offset);
val = readl(U300_PIN_REG(offset, icr));
writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, icr));
port->toggle_edge_mode &= ~U300_PIN_BIT(offset);
}
return 0;
}
static void u300_gpio_irq_enable(struct irq_data *d)
{
struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
struct u300_gpio *gpio = port->gpio;
int offset = (port->number << 3) + d->hwirq;
u32 val;
unsigned long flags;
dev_dbg(gpio->dev, "enable IRQ for hwirq %lu on port %s, offset %d\n",
d->hwirq, port->name, offset);
local_irq_save(flags);
val = readl(U300_PIN_REG(offset, ien));
writel(val | U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
local_irq_restore(flags);
}
static void u300_gpio_irq_disable(struct irq_data *d)
{
struct u300_gpio_port *port = irq_data_get_irq_chip_data(d);
struct u300_gpio *gpio = port->gpio;
int offset = (port->number << 3) + d->hwirq;
u32 val;
unsigned long flags;
local_irq_save(flags);
val = readl(U300_PIN_REG(offset, ien));
writel(val & ~U300_PIN_BIT(offset), U300_PIN_REG(offset, ien));
local_irq_restore(flags);
}
static struct irq_chip u300_gpio_irqchip = {
.name = "u300-gpio-irqchip",
.irq_enable = u300_gpio_irq_enable,
.irq_disable = u300_gpio_irq_disable,
.irq_set_type = u300_gpio_irq_type,
};
static void u300_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct u300_gpio_port *port = irq_get_handler_data(irq);
struct u300_gpio *gpio = port->gpio;
int pinoffset = port->number << 3; /* get the right stride */
unsigned long val;
desc->irq_data.chip->irq_ack(&desc->irq_data);
/* Read event register */
val = readl(U300_PIN_REG(pinoffset, iev));
/* Mask relevant bits */
val &= 0xFFU; /* 8 bits per port */
/* ACK IRQ (clear event) */
writel(val, U300_PIN_REG(pinoffset, iev));
/* Call IRQ handler */
if (val != 0) {
int irqoffset;
for_each_set_bit(irqoffset, &val, U300_GPIO_PINS_PER_PORT) {
int pin_irq = irq_find_mapping(port->domain, irqoffset);
int offset = pinoffset + irqoffset;
dev_dbg(gpio->dev, "GPIO IRQ %d on pin %d\n",
pin_irq, offset);
generic_handle_irq(pin_irq);
/*
* Triggering IRQ on both rising and falling edge
* needs mockery
*/
if (port->toggle_edge_mode & U300_PIN_BIT(offset))
u300_toggle_trigger(gpio, offset);
}
}
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
static void __init u300_gpio_init_pin(struct u300_gpio *gpio,
int offset,
const struct u300_gpio_confdata *conf)
{
/* Set mode: input or output */
if (conf->output) {
u300_gpio_direction_output(&gpio->chip, offset, conf->outval);
/* Deactivate bias mode for output */
u300_gpio_config_set(&gpio->chip, offset,
PIN_CONFIG_BIAS_HIGH_IMPEDANCE);
/* Set drive mode for output */
u300_gpio_config_set(&gpio->chip, offset,
PIN_CONFIG_DRIVE_PUSH_PULL);
dev_dbg(gpio->dev, "set up pin %d as output, value: %d\n",
offset, conf->outval);
} else {
u300_gpio_direction_input(&gpio->chip, offset);
/* Always set output low on input pins */
u300_gpio_set(&gpio->chip, offset, 0);
/* Set bias mode for input */
u300_gpio_config_set(&gpio->chip, offset, conf->bias_mode);
dev_dbg(gpio->dev, "set up pin %d as input, bias: %04x\n",
offset, conf->bias_mode);
}
}
static void __init u300_gpio_init_coh901571(struct u300_gpio *gpio,
struct u300_gpio_platform *plat)
{
int i, j;
/* Write default config and values to all pins */
for (i = 0; i < plat->ports; i++) {
for (j = 0; j < 8; j++) {
const struct u300_gpio_confdata *conf;
int offset = (i*8) + j;
conf = &bs335_gpio_config[i][j];
u300_gpio_init_pin(gpio, offset, conf);
}
}
}
static inline void u300_gpio_free_ports(struct u300_gpio *gpio)
{
struct u300_gpio_port *port;
struct list_head *p, *n;
list_for_each_safe(p, n, &gpio->port_list) {
port = list_entry(p, struct u300_gpio_port, node);
list_del(&port->node);
if (port->domain)
irq_domain_remove(port->domain);
kfree(port);
}
}
/*
* Here we map a GPIO in the local gpio_chip pin space to a pin in
* the local pinctrl pin space. The pin controller used is
* pinctrl-u300.
*/
struct coh901_pinpair {
unsigned int offset;
unsigned int pin_base;
};
#define COH901_PINRANGE(a, b) { .offset = a, .pin_base = b }
static struct coh901_pinpair coh901_pintable[] = {
COH901_PINRANGE(10, 426),
COH901_PINRANGE(11, 180),
COH901_PINRANGE(12, 165), /* MS/MMC card insertion */
COH901_PINRANGE(13, 179),
COH901_PINRANGE(14, 178),
COH901_PINRANGE(16, 194),
COH901_PINRANGE(17, 193),
COH901_PINRANGE(18, 192),
COH901_PINRANGE(19, 191),
COH901_PINRANGE(20, 186),
COH901_PINRANGE(21, 185),
COH901_PINRANGE(22, 184),
COH901_PINRANGE(23, 183),
COH901_PINRANGE(24, 182),
COH901_PINRANGE(25, 181),
};
static int __init u300_gpio_probe(struct platform_device *pdev)
{
struct u300_gpio_platform *plat = dev_get_platdata(&pdev->dev);
struct u300_gpio *gpio;
struct resource *memres;
int err = 0;
int portno;
u32 val;
u32 ifr;
int i;
gpio = devm_kzalloc(&pdev->dev, sizeof(struct u300_gpio), GFP_KERNEL);
if (gpio == NULL)
return -ENOMEM;
gpio->chip = u300_gpio_chip;
gpio->chip.ngpio = plat->ports * U300_GPIO_PINS_PER_PORT;
gpio->chip.dev = &pdev->dev;
gpio->chip.base = plat->gpio_base;
gpio->dev = &pdev->dev;
memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gpio->base = devm_ioremap_resource(&pdev->dev, memres);
if (IS_ERR(gpio->base))
return PTR_ERR(gpio->base);
gpio->clk = devm_clk_get(gpio->dev, NULL);
if (IS_ERR(gpio->clk)) {
err = PTR_ERR(gpio->clk);
dev_err(gpio->dev, "could not get GPIO clock\n");
return err;
}
err = clk_prepare_enable(gpio->clk);
if (err) {
dev_err(gpio->dev, "could not enable GPIO clock\n");
return err;
}
dev_info(gpio->dev,
"initializing GPIO Controller COH 901 571/3\n");
gpio->stride = U300_GPIO_PORT_STRIDE;
gpio->pcr = U300_GPIO_PXPCR;
gpio->dor = U300_GPIO_PXPDOR;
gpio->dir = U300_GPIO_PXPDIR;
gpio->per = U300_GPIO_PXPER;
gpio->icr = U300_GPIO_PXICR;
gpio->ien = U300_GPIO_PXIEN;
gpio->iev = U300_GPIO_PXIEV;
ifr = U300_GPIO_PXIFR;
val = readl(gpio->base + U300_GPIO_CR);
dev_info(gpio->dev, "COH901571/3 block version: %d, " \
"number of cores: %d totalling %d pins\n",
((val & 0x000001FC) >> 2),
((val & 0x0000FE00) >> 9),
((val & 0x0000FE00) >> 9) * 8);
writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE,
gpio->base + U300_GPIO_CR);
u300_gpio_init_coh901571(gpio, plat);
/* Add each port with its IRQ separately */
INIT_LIST_HEAD(&gpio->port_list);
for (portno = 0 ; portno < plat->ports; portno++) {
struct u300_gpio_port *port =
kmalloc(sizeof(struct u300_gpio_port), GFP_KERNEL);
if (!port) {
dev_err(gpio->dev, "out of memory\n");
err = -ENOMEM;
goto err_no_port;
}
snprintf(port->name, 8, "gpio%d", portno);
port->number = portno;
port->gpio = gpio;
port->irq = platform_get_irq_byname(pdev,
port->name);
dev_dbg(gpio->dev, "register IRQ %d for port %s\n", port->irq,
port->name);
port->domain = irq_domain_add_linear(pdev->dev.of_node,
U300_GPIO_PINS_PER_PORT,
&irq_domain_simple_ops,
port);
if (!port->domain) {
err = -ENOMEM;
goto err_no_domain;
}
irq_set_chained_handler(port->irq, u300_gpio_irq_handler);
irq_set_handler_data(port->irq, port);
/* For each GPIO pin set the unique IRQ handler */
for (i = 0; i < U300_GPIO_PINS_PER_PORT; i++) {
int irqno = irq_create_mapping(port->domain, i);
dev_dbg(gpio->dev, "GPIO%d on port %s gets IRQ %d\n",
gpio->chip.base + (port->number << 3) + i,
port->name, irqno);
irq_set_chip_and_handler(irqno, &u300_gpio_irqchip,
handle_simple_irq);
set_irq_flags(irqno, IRQF_VALID);
irq_set_chip_data(irqno, port);
}
/* Turns off irq force (test register) for this port */
writel(0x0, gpio->base + portno * gpio->stride + ifr);
list_add_tail(&port->node, &gpio->port_list);
}
dev_dbg(gpio->dev, "initialized %d GPIO ports\n", portno);
err = gpiochip_add(&gpio->chip);
if (err) {
dev_err(gpio->dev, "unable to add gpiochip: %d\n", err);
goto err_no_chip;
}
/*
* Add pinctrl pin ranges, the pin controller must be registered
* at this point
*/
for (i = 0; i < ARRAY_SIZE(coh901_pintable); i++) {
struct coh901_pinpair *p = &coh901_pintable[i];
err = gpiochip_add_pin_range(&gpio->chip, "pinctrl-u300",
p->offset, p->pin_base, 1);
if (err)
goto err_no_range;
}
platform_set_drvdata(pdev, gpio);
return 0;
err_no_range:
if (gpiochip_remove(&gpio->chip))
dev_err(&pdev->dev, "failed to remove gpio chip\n");
err_no_chip:
err_no_domain:
err_no_port:
u300_gpio_free_ports(gpio);
clk_disable_unprepare(gpio->clk);
dev_err(&pdev->dev, "module ERROR:%d\n", err);
return err;
}
static int __exit u300_gpio_remove(struct platform_device *pdev)
{
struct u300_gpio *gpio = platform_get_drvdata(pdev);
int err;
/* Turn off the GPIO block */
writel(0x00000000U, gpio->base + U300_GPIO_CR);
err = gpiochip_remove(&gpio->chip);
if (err < 0) {
dev_err(gpio->dev, "unable to remove gpiochip: %d\n", err);
return err;
}
u300_gpio_free_ports(gpio);
clk_disable_unprepare(gpio->clk);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver u300_gpio_driver = {
.driver = {
.name = "u300-gpio",
},
.remove = __exit_p(u300_gpio_remove),
};
static int __init u300_gpio_init(void)
{
return platform_driver_probe(&u300_gpio_driver, u300_gpio_probe);
}
static void __exit u300_gpio_exit(void)
{
platform_driver_unregister(&u300_gpio_driver);
}
arch_initcall(u300_gpio_init);
module_exit(u300_gpio_exit);
MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
MODULE_DESCRIPTION("ST-Ericsson AB COH 901 335/COH 901 571/3 GPIO driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
os2sd/android_kernel_lge_msm7x27-3.0.x | arch/mips/netlogic/xlr/setup.c | 2585 | 5248 | /*
* Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
* reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the NetLogic
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/serial_8250.h>
#include <linux/pm.h>
#include <asm/reboot.h>
#include <asm/time.h>
#include <asm/bootinfo.h>
#include <asm/smp-ops.h>
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/psb-bootinfo.h>
#include <asm/netlogic/xlr/xlr.h>
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
#include <asm/netlogic/xlr/gpio.h>
unsigned long netlogic_io_base = (unsigned long)(DEFAULT_NETLOGIC_IO_BASE);
unsigned long nlm_common_ebase = 0x0;
struct psb_info nlm_prom_info;
static void nlm_early_serial_setup(void)
{
struct uart_port s;
nlm_reg_t *uart_base;
uart_base = netlogic_io_mmio(NETLOGIC_IO_UART_0_OFFSET);
memset(&s, 0, sizeof(s));
s.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
s.iotype = UPIO_MEM32;
s.regshift = 2;
s.irq = PIC_UART_0_IRQ;
s.uartclk = PIC_CLKS_PER_SEC;
s.serial_in = nlm_xlr_uart_in;
s.serial_out = nlm_xlr_uart_out;
s.mapbase = (unsigned long)uart_base;
s.membase = (unsigned char __iomem *)uart_base;
early_serial_setup(&s);
}
static void nlm_linux_exit(void)
{
nlm_reg_t *mmio;
mmio = netlogic_io_mmio(NETLOGIC_IO_GPIO_OFFSET);
/* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */
netlogic_write_reg(mmio, NETLOGIC_GPIO_SWRESET_REG, 1);
for ( ; ; )
cpu_wait();
}
void __init plat_mem_setup(void)
{
panic_timeout = 5;
_machine_restart = (void (*)(char *))nlm_linux_exit;
_machine_halt = nlm_linux_exit;
pm_power_off = nlm_linux_exit;
}
const char *get_system_type(void)
{
return "Netlogic XLR/XLS Series";
}
void __init prom_free_prom_memory(void)
{
/* Nothing yet */
}
static void build_arcs_cmdline(int *argv)
{
int i, remain, len;
char *arg;
remain = sizeof(arcs_cmdline) - 1;
arcs_cmdline[0] = '\0';
for (i = 0; argv[i] != 0; i++) {
arg = (char *)(long)argv[i];
len = strlen(arg);
if (len + 1 > remain)
break;
strcat(arcs_cmdline, arg);
strcat(arcs_cmdline, " ");
remain -= len + 1;
}
/* Add the default options here */
if ((strstr(arcs_cmdline, "console=")) == NULL) {
arg = "console=ttyS0,38400 ";
len = strlen(arg);
if (len > remain)
goto fail;
strcat(arcs_cmdline, arg);
remain -= len;
}
#ifdef CONFIG_BLK_DEV_INITRD
if ((strstr(arcs_cmdline, "rdinit=")) == NULL) {
arg = "rdinit=/sbin/init ";
len = strlen(arg);
if (len > remain)
goto fail;
strcat(arcs_cmdline, arg);
remain -= len;
}
#endif
return;
fail:
panic("Cannot add %s, command line too big!", arg);
}
static void prom_add_memory(void)
{
struct nlm_boot_mem_map *bootm;
u64 start, size;
u64 pref_backup = 512; /* avoid pref walking beyond end */
int i;
bootm = (void *)(long)nlm_prom_info.psb_mem_map;
for (i = 0; i < bootm->nr_map; i++) {
if (bootm->map[i].type != BOOT_MEM_RAM)
continue;
start = bootm->map[i].addr;
size = bootm->map[i].size;
/* Work around for using bootloader mem */
if (i == 0 && start == 0 && size == 0x0c000000)
size = 0x0ff00000;
add_memory_region(start, size - pref_backup, BOOT_MEM_RAM);
}
}
void __init prom_init(void)
{
int *argv, *envp; /* passed as 32 bit ptrs */
struct psb_info *prom_infop;
/* truncate to 32 bit and sign extend all args */
argv = (int *)(long)(int)fw_arg1;
envp = (int *)(long)(int)fw_arg2;
prom_infop = (struct psb_info *)(long)(int)fw_arg3;
nlm_prom_info = *prom_infop;
nlm_early_serial_setup();
build_arcs_cmdline(argv);
nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
prom_add_memory();
#ifdef CONFIG_SMP
nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map);
register_smp_ops(&nlm_smp_ops);
#endif
}
| gpl-2.0 |
htc-mirror/holiday-ics-crc-3.0.16-25afef7 | fs/ext3/xattr_security.c | 2841 | 1953 | /*
* linux/fs/ext3/xattr_security.c
* Handler for storing security labels as extended attributes.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/ext3_jbd.h>
#include <linux/ext3_fs.h>
#include <linux/security.h>
#include "xattr.h"
static size_t
ext3_xattr_security_list(struct dentry *dentry, char *list, size_t list_size,
const char *name, size_t name_len, int type)
{
const size_t prefix_len = XATTR_SECURITY_PREFIX_LEN;
const size_t total_len = prefix_len + name_len + 1;
if (list && total_len <= list_size) {
memcpy(list, XATTR_SECURITY_PREFIX, prefix_len);
memcpy(list+prefix_len, name, name_len);
list[prefix_len + name_len] = '\0';
}
return total_len;
}
static int
ext3_xattr_security_get(struct dentry *dentry, const char *name,
void *buffer, size_t size, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
return ext3_xattr_get(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY,
name, buffer, size);
}
static int
ext3_xattr_security_set(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags, int type)
{
if (strcmp(name, "") == 0)
return -EINVAL;
return ext3_xattr_set(dentry->d_inode, EXT3_XATTR_INDEX_SECURITY,
name, value, size, flags);
}
int
ext3_init_security(handle_t *handle, struct inode *inode, struct inode *dir,
const struct qstr *qstr)
{
int err;
size_t len;
void *value;
char *name;
err = security_inode_init_security(inode, dir, qstr, &name, &value, &len);
if (err) {
if (err == -EOPNOTSUPP)
return 0;
return err;
}
err = ext3_xattr_set_handle(handle, inode, EXT3_XATTR_INDEX_SECURITY,
name, value, len, 0);
kfree(name);
kfree(value);
return err;
}
const struct xattr_handler ext3_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = ext3_xattr_security_list,
.get = ext3_xattr_security_get,
.set = ext3_xattr_security_set,
};
| gpl-2.0 |
Tesla-Redux-Devices/JuiceD-N6-Kernel | drivers/media/pci/bt8xx/dst_ca.c | 2841 | 21571 | /*
CA-driver for TwinHan DST Frontend/Card
Copyright (C) 2004, 2005 Manu Abraham (manu@kromtek.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/dvb/ca.h>
#include "dvbdev.h"
#include "dvb_frontend.h"
#include "dst_ca.h"
#include "dst_common.h"
#define DST_CA_ERROR 0
#define DST_CA_NOTICE 1
#define DST_CA_INFO 2
#define DST_CA_DEBUG 3
#define dprintk(x, y, z, format, arg...) do { \
if (z) { \
if ((x > DST_CA_ERROR) && (x > y)) \
printk(KERN_ERR "%s: " format "\n", __func__ , ##arg); \
else if ((x > DST_CA_NOTICE) && (x > y)) \
printk(KERN_NOTICE "%s: " format "\n", __func__ , ##arg); \
else if ((x > DST_CA_INFO) && (x > y)) \
printk(KERN_INFO "%s: " format "\n", __func__ , ##arg); \
else if ((x > DST_CA_DEBUG) && (x > y)) \
printk(KERN_DEBUG "%s: " format "\n", __func__ , ##arg); \
} else { \
if (x > y) \
printk(format, ## arg); \
} \
} while(0)
static DEFINE_MUTEX(dst_ca_mutex);
static unsigned int verbose = 5;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
/* Need some more work */
static int ca_set_slot_descr(void)
{
/* We could make this more graceful ? */
return -EOPNOTSUPP;
}
/* Need some more work */
static int ca_set_pid(void)
{
/* We could make this more graceful ? */
return -EOPNOTSUPP;
}
static void put_command_and_length(u8 *data, int command, int length)
{
data[0] = (command >> 16) & 0xff;
data[1] = (command >> 8) & 0xff;
data[2] = command & 0xff;
data[3] = length;
}
static void put_checksum(u8 *check_string, int length)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Computing string checksum.");
dprintk(verbose, DST_CA_DEBUG, 1, " -> string length : 0x%02x", length);
check_string[length] = dst_check_sum (check_string, length);
dprintk(verbose, DST_CA_DEBUG, 1, " -> checksum : 0x%02x", check_string[length]);
}
static int dst_ci_command(struct dst_state* state, u8 * data, u8 *ca_string, u8 len, int read)
{
u8 reply;
mutex_lock(&state->dst_mutex);
dst_comm_init(state);
msleep(65);
if (write_dst(state, data, len)) {
dprintk(verbose, DST_CA_INFO, 1, " Write not successful, trying to recover");
dst_error_recovery(state);
goto error;
}
if ((dst_pio_disable(state)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " DST PIO disable failed.");
goto error;
}
if (read_dst(state, &reply, GET_ACK) < 0) {
dprintk(verbose, DST_CA_INFO, 1, " Read not successful, trying to recover");
dst_error_recovery(state);
goto error;
}
if (read) {
if (! dst_wait_dst_ready(state, LONG_DELAY)) {
dprintk(verbose, DST_CA_NOTICE, 1, " 8820 not ready");
goto error;
}
if (read_dst(state, ca_string, 128) < 0) { /* Try to make this dynamic */
dprintk(verbose, DST_CA_INFO, 1, " Read not successful, trying to recover");
dst_error_recovery(state);
goto error;
}
}
mutex_unlock(&state->dst_mutex);
return 0;
error:
mutex_unlock(&state->dst_mutex);
return -EIO;
}
static int dst_put_ci(struct dst_state *state, u8 *data, int len, u8 *ca_string, int read)
{
u8 dst_ca_comm_err = 0;
while (dst_ca_comm_err < RETRIES) {
dprintk(verbose, DST_CA_NOTICE, 1, " Put Command");
if (dst_ci_command(state, data, ca_string, len, read)) { // If error
dst_error_recovery(state);
dst_ca_comm_err++; // work required here.
} else {
break;
}
}
if(dst_ca_comm_err == RETRIES)
return -1;
return 0;
}
static int ca_get_app_info(struct dst_state *state)
{
int length, str_length;
static u8 command[8] = {0x07, 0x40, 0x01, 0x00, 0x01, 0x00, 0x00, 0xff};
put_checksum(&command[0], command[0]);
if ((dst_put_ci(state, command, sizeof(command), state->messages, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
return -1;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
dprintk(verbose, DST_CA_INFO, 1, " ================================ CI Module Application Info ======================================");
dprintk(verbose, DST_CA_INFO, 1, " Application Type=[%d], Application Vendor=[%d], Vendor Code=[%d]\n%s: Application info=[%s]",
state->messages[7], (state->messages[8] << 8) | state->messages[9],
(state->messages[10] << 8) | state->messages[11], __func__, (char *)(&state->messages[12]));
dprintk(verbose, DST_CA_INFO, 1, " ==================================================================================================");
// Transform dst message to correct application_info message
length = state->messages[5];
str_length = length - 6;
if (str_length < 0) {
str_length = 0;
dprintk(verbose, DST_CA_ERROR, 1, "Invalid string length returned in ca_get_app_info(). Recovering.");
}
// First, the command and length fields
put_command_and_length(&state->messages[0], CA_APP_INFO, length);
// Copy application_type, application_manufacturer and manufacturer_code
memmove(&state->messages[4], &state->messages[7], 5);
// Set string length and copy string
state->messages[9] = str_length;
memmove(&state->messages[10], &state->messages[12], str_length);
return 0;
}
static int ca_get_ca_info(struct dst_state *state)
{
int srcPtr, dstPtr, i, num_ids;
static u8 slot_command[8] = {0x07, 0x40, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff};
const int in_system_id_pos = 8, out_system_id_pos = 4, in_num_ids_pos = 7;
put_checksum(&slot_command[0], slot_command[0]);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), state->messages, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
return -1;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
// Print raw data
dprintk(verbose, DST_CA_INFO, 0, " DST data = [");
for (i = 0; i < state->messages[0] + 1; i++) {
dprintk(verbose, DST_CA_INFO, 0, " 0x%02x", state->messages[i]);
}
dprintk(verbose, DST_CA_INFO, 0, "]\n");
// Set the command and length of the output
num_ids = state->messages[in_num_ids_pos];
if (num_ids >= 100) {
num_ids = 100;
dprintk(verbose, DST_CA_ERROR, 1, "Invalid number of ids (>100). Recovering.");
}
put_command_and_length(&state->messages[0], CA_INFO, num_ids * 2);
dprintk(verbose, DST_CA_INFO, 0, " CA_INFO = [");
srcPtr = in_system_id_pos;
dstPtr = out_system_id_pos;
for(i = 0; i < num_ids; i++) {
dprintk(verbose, DST_CA_INFO, 0, " 0x%02x%02x", state->messages[srcPtr + 0], state->messages[srcPtr + 1]);
// Append to output
state->messages[dstPtr + 0] = state->messages[srcPtr + 0];
state->messages[dstPtr + 1] = state->messages[srcPtr + 1];
srcPtr += 2;
dstPtr += 2;
}
dprintk(verbose, DST_CA_INFO, 0, "]\n");
return 0;
}
static int ca_get_slot_caps(struct dst_state *state, struct ca_caps *p_ca_caps, void __user *arg)
{
int i;
u8 slot_cap[256];
static u8 slot_command[8] = {0x07, 0x40, 0x02, 0x00, 0x02, 0x00, 0x00, 0xff};
put_checksum(&slot_command[0], slot_command[0]);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_cap, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
return -1;
}
dprintk(verbose, DST_CA_NOTICE, 1, " -->dst_put_ci SUCCESS !");
/* Will implement the rest soon */
dprintk(verbose, DST_CA_INFO, 1, " Slot cap = [%d]", slot_cap[7]);
dprintk(verbose, DST_CA_INFO, 0, "===================================\n");
for (i = 0; i < slot_cap[0] + 1; i++)
dprintk(verbose, DST_CA_INFO, 0, " %d", slot_cap[i]);
dprintk(verbose, DST_CA_INFO, 0, "\n");
p_ca_caps->slot_num = 1;
p_ca_caps->slot_type = 1;
p_ca_caps->descr_num = slot_cap[7];
p_ca_caps->descr_type = 1;
if (copy_to_user(arg, p_ca_caps, sizeof (struct ca_caps)))
return -EFAULT;
return 0;
}
/* Need some more work */
static int ca_get_slot_descr(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
return -EOPNOTSUPP;
}
static int ca_get_slot_info(struct dst_state *state, struct ca_slot_info *p_ca_slot_info, void __user *arg)
{
int i;
static u8 slot_command[8] = {0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff};
u8 *slot_info = state->messages;
put_checksum(&slot_command[0], 7);
if ((dst_put_ci(state, slot_command, sizeof (slot_command), slot_info, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->dst_put_ci FAILED !");
return -1;
}
dprintk(verbose, DST_CA_INFO, 1, " -->dst_put_ci SUCCESS !");
/* Will implement the rest soon */
dprintk(verbose, DST_CA_INFO, 1, " Slot info = [%d]", slot_info[3]);
dprintk(verbose, DST_CA_INFO, 0, "===================================\n");
for (i = 0; i < 8; i++)
dprintk(verbose, DST_CA_INFO, 0, " %d", slot_info[i]);
dprintk(verbose, DST_CA_INFO, 0, "\n");
if (slot_info[4] & 0x80) {
p_ca_slot_info->flags = CA_CI_MODULE_PRESENT;
p_ca_slot_info->num = 1;
p_ca_slot_info->type = CA_CI;
} else if (slot_info[4] & 0x40) {
p_ca_slot_info->flags = CA_CI_MODULE_READY;
p_ca_slot_info->num = 1;
p_ca_slot_info->type = CA_CI;
} else
p_ca_slot_info->flags = 0;
if (copy_to_user(arg, p_ca_slot_info, sizeof (struct ca_slot_info)))
return -EFAULT;
return 0;
}
static int ca_get_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
u8 i = 0;
u32 command = 0;
if (copy_from_user(p_ca_message, arg, sizeof (struct ca_msg)))
return -EFAULT;
if (p_ca_message->msg) {
dprintk(verbose, DST_CA_NOTICE, 1, " Message = [%*ph]",
3, p_ca_message->msg);
for (i = 0; i < 3; i++) {
command = command | p_ca_message->msg[i];
if (i < 2)
command = command << 8;
}
dprintk(verbose, DST_CA_NOTICE, 1, " Command=[0x%x]", command);
switch (command) {
case CA_APP_INFO:
memcpy(p_ca_message->msg, state->messages, 128);
if (copy_to_user(arg, p_ca_message, sizeof (struct ca_msg)) )
return -EFAULT;
break;
case CA_INFO:
memcpy(p_ca_message->msg, state->messages, 128);
if (copy_to_user(arg, p_ca_message, sizeof (struct ca_msg)) )
return -EFAULT;
break;
}
}
return 0;
}
static int handle_dst_tag(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer, u32 length)
{
if (state->dst_hw_cap & DST_TYPE_HAS_SESSION) {
hw_buffer->msg[2] = p_ca_message->msg[1]; /* MSB */
hw_buffer->msg[3] = p_ca_message->msg[2]; /* LSB */
} else {
if (length > 247) {
dprintk(verbose, DST_CA_ERROR, 1, " Message too long ! *** Bailing Out *** !");
return -1;
}
hw_buffer->msg[0] = (length & 0xff) + 7;
hw_buffer->msg[1] = 0x40;
hw_buffer->msg[2] = 0x03;
hw_buffer->msg[3] = 0x00;
hw_buffer->msg[4] = 0x03;
hw_buffer->msg[5] = length & 0xff;
hw_buffer->msg[6] = 0x00;
/*
* Need to compute length for EN50221 section 8.3.2, for the time being
* assuming 8.3.2 is not applicable
*/
memcpy(&hw_buffer->msg[7], &p_ca_message->msg[4], length);
}
return 0;
}
static int write_to_8820(struct dst_state *state, struct ca_msg *hw_buffer, u8 length, u8 reply)
{
if ((dst_put_ci(state, hw_buffer->msg, length, hw_buffer->msg, reply)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " DST-CI Command failed.");
dprintk(verbose, DST_CA_NOTICE, 1, " Resetting DST.");
rdc_reset_state(state);
return -1;
}
dprintk(verbose, DST_CA_NOTICE, 1, " DST-CI Command success.");
return 0;
}
static u32 asn_1_decode(u8 *asn_1_array)
{
u8 length_field = 0, word_count = 0, count = 0;
u32 length = 0;
length_field = asn_1_array[0];
dprintk(verbose, DST_CA_DEBUG, 1, " Length field=[%02x]", length_field);
if (length_field < 0x80) {
length = length_field & 0x7f;
dprintk(verbose, DST_CA_DEBUG, 1, " Length=[%02x]\n", length);
} else {
word_count = length_field & 0x7f;
for (count = 0; count < word_count; count++) {
length = length << 8;
length += asn_1_array[count + 1];
dprintk(verbose, DST_CA_DEBUG, 1, " Length=[%04x]", length);
}
}
return length;
}
static int debug_string(u8 *msg, u32 length, u32 offset)
{
u32 i;
dprintk(verbose, DST_CA_DEBUG, 0, " String=[ ");
for (i = offset; i < length; i++)
dprintk(verbose, DST_CA_DEBUG, 0, "%02x ", msg[i]);
dprintk(verbose, DST_CA_DEBUG, 0, "]\n");
return 0;
}
static int ca_set_pmt(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer, u8 reply, u8 query)
{
u32 length = 0;
u8 tag_length = 8;
length = asn_1_decode(&p_ca_message->msg[3]);
dprintk(verbose, DST_CA_DEBUG, 1, " CA Message length=[%d]", length);
debug_string(&p_ca_message->msg[4], length, 0); /* length is excluding tag & length */
memset(hw_buffer->msg, '\0', length);
handle_dst_tag(state, p_ca_message, hw_buffer, length);
put_checksum(hw_buffer->msg, hw_buffer->msg[0]);
debug_string(hw_buffer->msg, (length + tag_length), 0); /* tags too */
write_to_8820(state, hw_buffer, (length + tag_length), reply);
return 0;
}
/* Board supports CA PMT reply ? */
static int dst_check_ca_pmt(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer)
{
int ca_pmt_reply_test = 0;
/* Do test board */
/* Not there yet but soon */
/* CA PMT Reply capable */
if (ca_pmt_reply_test) {
if ((ca_set_pmt(state, p_ca_message, hw_buffer, 1, GET_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !");
return -1;
}
/* Process CA PMT Reply */
/* will implement soon */
dprintk(verbose, DST_CA_ERROR, 1, " Not there yet");
}
/* CA PMT Reply not capable */
if (!ca_pmt_reply_test) {
if ((ca_set_pmt(state, p_ca_message, hw_buffer, 0, NO_REPLY)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " ca_set_pmt.. failed !");
return -1;
}
dprintk(verbose, DST_CA_NOTICE, 1, " ca_set_pmt.. success !");
/* put a dummy message */
}
return 0;
}
static int ca_send_message(struct dst_state *state, struct ca_msg *p_ca_message, void __user *arg)
{
int i = 0;
u32 command = 0;
struct ca_msg *hw_buffer;
int result = 0;
if ((hw_buffer = kmalloc(sizeof (struct ca_msg), GFP_KERNEL)) == NULL) {
dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure");
return -ENOMEM;
}
dprintk(verbose, DST_CA_DEBUG, 1, " ");
if (copy_from_user(p_ca_message, arg, sizeof (struct ca_msg))) {
result = -EFAULT;
goto free_mem_and_exit;
}
if (p_ca_message->msg) {
/* EN50221 tag */
command = 0;
for (i = 0; i < 3; i++) {
command = command | p_ca_message->msg[i];
if (i < 2)
command = command << 8;
}
dprintk(verbose, DST_CA_DEBUG, 1, " Command=[0x%x]\n", command);
switch (command) {
case CA_PMT:
dprintk(verbose, DST_CA_DEBUG, 1, "Command = SEND_CA_PMT");
if ((ca_set_pmt(state, p_ca_message, hw_buffer, 0, 0)) < 0) { // code simplification started
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_PMT Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_PMT Success !");
break;
case CA_PMT_REPLY:
dprintk(verbose, DST_CA_INFO, 1, "Command = CA_PMT_REPLY");
/* Have to handle the 2 basic types of cards here */
if ((dst_check_ca_pmt(state, p_ca_message, hw_buffer)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_PMT_REPLY Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_PMT_REPLY Success !");
break;
case CA_APP_INFO_ENQUIRY: // only for debugging
dprintk(verbose, DST_CA_INFO, 1, " Getting Cam Application information");
if ((ca_get_app_info(state)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_APP_INFO_ENQUIRY Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_APP_INFO_ENQUIRY Success !");
break;
case CA_INFO_ENQUIRY:
dprintk(verbose, DST_CA_INFO, 1, " Getting CA Information");
if ((ca_get_ca_info(state)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_INFO_ENQUIRY Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_INFO_ENQUIRY Success !");
break;
}
}
free_mem_and_exit:
kfree (hw_buffer);
return result;
}
static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioctl_arg)
{
struct dvb_device *dvbdev;
struct dst_state *state;
struct ca_slot_info *p_ca_slot_info;
struct ca_caps *p_ca_caps;
struct ca_msg *p_ca_message;
void __user *arg = (void __user *)ioctl_arg;
int result = 0;
mutex_lock(&dst_ca_mutex);
dvbdev = file->private_data;
state = (struct dst_state *)dvbdev->priv;
p_ca_message = kmalloc(sizeof (struct ca_msg), GFP_KERNEL);
p_ca_slot_info = kmalloc(sizeof (struct ca_slot_info), GFP_KERNEL);
p_ca_caps = kmalloc(sizeof (struct ca_caps), GFP_KERNEL);
if (!p_ca_message || !p_ca_slot_info || !p_ca_caps) {
dprintk(verbose, DST_CA_ERROR, 1, " Memory allocation failure");
result = -ENOMEM;
goto free_mem_and_exit;
}
/* We have now only the standard ioctl's, the driver is upposed to handle internals. */
switch (cmd) {
case CA_SEND_MSG:
dprintk(verbose, DST_CA_INFO, 1, " Sending message");
if ((ca_send_message(state, p_ca_message, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SEND_MSG Failed !");
result = -1;
goto free_mem_and_exit;
}
break;
case CA_GET_MSG:
dprintk(verbose, DST_CA_INFO, 1, " Getting message");
if ((ca_get_message(state, p_ca_message, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_MSG Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_MSG Success !");
break;
case CA_RESET:
dprintk(verbose, DST_CA_ERROR, 1, " Resetting DST");
dst_error_bailout(state);
msleep(4000);
break;
case CA_GET_SLOT_INFO:
dprintk(verbose, DST_CA_INFO, 1, " Getting Slot info");
if ((ca_get_slot_info(state, p_ca_slot_info, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_SLOT_INFO Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_SLOT_INFO Success !");
break;
case CA_GET_CAP:
dprintk(verbose, DST_CA_INFO, 1, " Getting Slot capabilities");
if ((ca_get_slot_caps(state, p_ca_caps, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_CAP Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_CAP Success !");
break;
case CA_GET_DESCR_INFO:
dprintk(verbose, DST_CA_INFO, 1, " Getting descrambler description");
if ((ca_get_slot_descr(state, p_ca_message, arg)) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_GET_DESCR_INFO Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_GET_DESCR_INFO Success !");
break;
case CA_SET_DESCR:
dprintk(verbose, DST_CA_INFO, 1, " Setting descrambler");
if ((ca_set_slot_descr()) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_DESCR Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_DESCR Success !");
break;
case CA_SET_PID:
dprintk(verbose, DST_CA_INFO, 1, " Setting PID");
if ((ca_set_pid()) < 0) {
dprintk(verbose, DST_CA_ERROR, 1, " -->CA_SET_PID Failed !");
result = -1;
goto free_mem_and_exit;
}
dprintk(verbose, DST_CA_INFO, 1, " -->CA_SET_PID Success !");
default:
result = -EOPNOTSUPP;
}
free_mem_and_exit:
kfree (p_ca_message);
kfree (p_ca_slot_info);
kfree (p_ca_caps);
mutex_unlock(&dst_ca_mutex);
return result;
}
static int dst_ca_open(struct inode *inode, struct file *file)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device opened [%p] ", file);
try_module_get(THIS_MODULE);
return 0;
}
static int dst_ca_release(struct inode *inode, struct file *file)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device closed.");
module_put(THIS_MODULE);
return 0;
}
static ssize_t dst_ca_read(struct file *file, char __user *buffer, size_t length, loff_t *offset)
{
ssize_t bytes_read = 0;
dprintk(verbose, DST_CA_DEBUG, 1, " Device read.");
return bytes_read;
}
static ssize_t dst_ca_write(struct file *file, const char __user *buffer, size_t length, loff_t *offset)
{
dprintk(verbose, DST_CA_DEBUG, 1, " Device write.");
return 0;
}
static const struct file_operations dst_ca_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dst_ca_ioctl,
.open = dst_ca_open,
.release = dst_ca_release,
.read = dst_ca_read,
.write = dst_ca_write,
.llseek = noop_llseek,
};
static struct dvb_device dvbdev_ca = {
.priv = NULL,
.users = 1,
.readers = 1,
.writers = 1,
.fops = &dst_ca_fops
};
struct dvb_device *dst_ca_attach(struct dst_state *dst, struct dvb_adapter *dvb_adapter)
{
struct dvb_device *dvbdev;
dprintk(verbose, DST_CA_ERROR, 1, "registering DST-CA device");
if (dvb_register_device(dvb_adapter, &dvbdev, &dvbdev_ca, dst, DVB_DEVICE_CA) == 0) {
dst->dst_ca = dvbdev;
return dst->dst_ca;
}
return NULL;
}
EXPORT_SYMBOL(dst_ca_attach);
MODULE_DESCRIPTION("DST DVB-S/T/C Combo CA driver");
MODULE_AUTHOR("Manu Abraham");
MODULE_LICENSE("GPL");
| gpl-2.0 |
sac23/Sacs_Stock_Kernel | arch/arm/mach-omap2/board-omap4panda.c | 4633 | 16061 | /*
* Board support file for OMAP4430 based PandaBoard.
*
* Copyright (C) 2010 Texas Instruments
*
* Author: David Anders <x0132446@ti.com>
*
* Based on mach-omap2/board-4430sdp.c
*
* Author: Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* Based on mach-omap2/board-3430sdp.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/usb/otg.h>
#include <linux/i2c/twl.h>
#include <linux/mfd/twl6040.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
#include <linux/wl12xx.h>
#include <linux/platform_data/omap-abe-twl6040.h>
#include <mach/hardware.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <video/omapdss.h>
#include <plat/board.h>
#include "common.h"
#include <plat/usb.h>
#include <plat/mmc.h>
#include <video/omap-panel-dvi.h>
#include "hsmmc.h"
#include "control.h"
#include "mux.h"
#include "common-board-devices.h"
#define GPIO_HUB_POWER 1
#define GPIO_HUB_NRESET 62
#define GPIO_WIFI_PMENA 43
#define GPIO_WIFI_IRQ 53
#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
#define HDMI_GPIO_HPD 63 /* Hotplug detect */
/* wl127x BT, FM, GPS connectivity chip */
static int wl1271_gpios[] = {46, -1, -1};
static struct platform_device wl1271_device = {
.name = "kim",
.id = -1,
.dev = {
.platform_data = &wl1271_gpios,
},
};
static struct gpio_led gpio_leds[] = {
{
.name = "pandaboard::status1",
.default_trigger = "heartbeat",
.gpio = 7,
},
{
.name = "pandaboard::status2",
.default_trigger = "mmc0",
.gpio = 8,
},
};
static struct gpio_led_platform_data gpio_led_info = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct platform_device leds_gpio = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &gpio_led_info,
},
};
static struct omap_abe_twl6040_data panda_abe_audio_data = {
/* Audio out */
.has_hs = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT,
/* HandsFree through expasion connector */
.has_hf = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT,
/* PandaBoard: FM TX, PandaBoardES: can be connected to audio out */
.has_aux = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT,
/* PandaBoard: FM RX, PandaBoardES: audio in */
.has_afm = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT,
/* No jack detection. */
.jack_detection = 0,
/* MCLK input is 38.4MHz */
.mclk_freq = 38400000,
};
static struct platform_device panda_abe_audio = {
.name = "omap-abe-twl6040",
.id = -1,
.dev = {
.platform_data = &panda_abe_audio_data,
},
};
static struct platform_device btwilink_device = {
.name = "btwilink",
.id = -1,
};
static struct platform_device *panda_devices[] __initdata = {
&leds_gpio,
&wl1271_device,
&panda_abe_audio,
&btwilink_device,
};
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
.phy_reset = false,
.reset_gpio_port[0] = -EINVAL,
.reset_gpio_port[1] = -EINVAL,
.reset_gpio_port[2] = -EINVAL
};
static struct gpio panda_ehci_gpios[] __initdata = {
{ GPIO_HUB_POWER, GPIOF_OUT_INIT_LOW, "hub_power" },
{ GPIO_HUB_NRESET, GPIOF_OUT_INIT_LOW, "hub_nreset" },
};
static void __init omap4_ehci_init(void)
{
int ret;
struct clk *phy_ref_clk;
/* FREF_CLK3 provides the 19.2 MHz reference clock to the PHY */
phy_ref_clk = clk_get(NULL, "auxclk3_ck");
if (IS_ERR(phy_ref_clk)) {
pr_err("Cannot request auxclk3\n");
return;
}
clk_set_rate(phy_ref_clk, 19200000);
clk_enable(phy_ref_clk);
/* disable the power to the usb hub prior to init and reset phy+hub */
ret = gpio_request_array(panda_ehci_gpios,
ARRAY_SIZE(panda_ehci_gpios));
if (ret) {
pr_err("Unable to initialize EHCI power/reset\n");
return;
}
gpio_export(GPIO_HUB_POWER, 0);
gpio_export(GPIO_HUB_NRESET, 0);
gpio_set_value(GPIO_HUB_NRESET, 1);
usbhs_init(&usbhs_bdata);
/* enable power to hub */
gpio_set_value(GPIO_HUB_POWER, 1);
}
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_UTMI,
.mode = MUSB_OTG,
.power = 100,
};
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
.gpio_wp = -EINVAL,
.gpio_cd = -EINVAL,
},
{
.name = "wl1271",
.mmc = 5,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
.gpio_wp = -EINVAL,
.gpio_cd = -EINVAL,
.ocr_mask = MMC_VDD_165_195,
.nonremovable = true,
},
{} /* Terminator */
};
static struct regulator_consumer_supply omap4_panda_vmmc5_supply[] = {
REGULATOR_SUPPLY("vmmc", "omap_hsmmc.4"),
};
static struct regulator_init_data panda_vmmc5 = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(omap4_panda_vmmc5_supply),
.consumer_supplies = omap4_panda_vmmc5_supply,
};
static struct fixed_voltage_config panda_vwlan = {
.supply_name = "vwl1271",
.microvolts = 1800000, /* 1.8V */
.gpio = GPIO_WIFI_PMENA,
.startup_delay = 70000, /* 70msec */
.enable_high = 1,
.enabled_at_boot = 0,
.init_data = &panda_vmmc5,
};
static struct platform_device omap_vwlan_device = {
.name = "reg-fixed-voltage",
.id = 1,
.dev = {
.platform_data = &panda_vwlan,
},
};
struct wl12xx_platform_data omap_panda_wlan_data __initdata = {
/* PANDA ref clock is 38.4 MHz */
.board_ref_clock = 2,
};
static int omap4_twl6030_hsmmc_late_init(struct device *dev)
{
int irq = 0;
struct platform_device *pdev = container_of(dev,
struct platform_device, dev);
struct omap_mmc_platform_data *pdata = dev->platform_data;
if (!pdata) {
dev_err(dev, "%s: NULL platform data\n", __func__);
return -EINVAL;
}
/* Setting MMC1 Card detect Irq */
if (pdev->id == 0) {
irq = twl6030_mmc_card_detect_config();
if (irq < 0) {
dev_err(dev, "%s: Error card detect config(%d)\n",
__func__, irq);
return irq;
}
pdata->slots[0].card_detect = twl6030_mmc_card_detect;
}
return 0;
}
static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev)
{
struct omap_mmc_platform_data *pdata;
/* dev can be null if CONFIG_MMC_OMAP_HS is not set */
if (!dev) {
pr_err("Failed omap4_twl6030_hsmmc_set_late_init\n");
return;
}
pdata = dev->platform_data;
pdata->init = omap4_twl6030_hsmmc_late_init;
}
static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
struct omap2_hsmmc_info *c;
omap_hsmmc_init(controllers);
for (c = controllers; c->mmc; c++)
omap4_twl6030_hsmmc_set_late_init(&c->pdev->dev);
return 0;
}
static struct twl6040_codec_data twl6040_codec = {
/* single-step ramp for headset and handsfree */
.hs_left_step = 0x0f,
.hs_right_step = 0x0f,
.hf_left_step = 0x1d,
.hf_right_step = 0x1d,
};
static struct twl6040_platform_data twl6040_data = {
.codec = &twl6040_codec,
.audpwron_gpio = 127,
.irq_base = TWL6040_CODEC_IRQ_BASE,
};
/* Panda board uses the common PMIC configuration */
static struct twl4030_platform_data omap4_panda_twldata;
/*
* Display monitor features are burnt in their EEPROM as EDID data. The EEPROM
* is connected as I2C slave device, and can be accessed at address 0x50
*/
static struct i2c_board_info __initdata panda_i2c_eeprom[] = {
{
I2C_BOARD_INFO("eeprom", 0x50),
},
};
static int __init omap4_panda_i2c_init(void)
{
omap4_pmic_get_config(&omap4_panda_twldata, TWL_COMMON_PDATA_USB,
TWL_COMMON_REGULATOR_VDAC |
TWL_COMMON_REGULATOR_VAUX2 |
TWL_COMMON_REGULATOR_VAUX3 |
TWL_COMMON_REGULATOR_VMMC |
TWL_COMMON_REGULATOR_VPP |
TWL_COMMON_REGULATOR_VANA |
TWL_COMMON_REGULATOR_VCXIO |
TWL_COMMON_REGULATOR_VUSB |
TWL_COMMON_REGULATOR_CLK32KG);
omap4_pmic_init("twl6030", &omap4_panda_twldata,
&twl6040_data, OMAP44XX_IRQ_SYS_2N);
omap_register_i2c_bus(2, 400, NULL, 0);
/*
* Bus 3 is attached to the DVI port where devices like the pico DLP
* projector don't work reliably with 400kHz
*/
omap_register_i2c_bus(3, 100, panda_i2c_eeprom,
ARRAY_SIZE(panda_i2c_eeprom));
omap_register_i2c_bus(4, 400, NULL, 0);
return 0;
}
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
/* WLAN IRQ - GPIO 53 */
OMAP4_MUX(GPMC_NCS3, OMAP_MUX_MODE3 | OMAP_PIN_INPUT),
/* WLAN POWER ENABLE - GPIO 43 */
OMAP4_MUX(GPMC_A19, OMAP_MUX_MODE3 | OMAP_PIN_OUTPUT),
/* WLAN SDIO: MMC5 CMD */
OMAP4_MUX(SDMMC5_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
/* WLAN SDIO: MMC5 CLK */
OMAP4_MUX(SDMMC5_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
/* WLAN SDIO: MMC5 DAT[0-3] */
OMAP4_MUX(SDMMC5_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP4_MUX(SDMMC5_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP4_MUX(SDMMC5_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP4_MUX(SDMMC5_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
/* gpio 0 - TFP410 PD */
OMAP4_MUX(KPD_COL1, OMAP_PIN_OUTPUT | OMAP_MUX_MODE3),
/* dispc2_data23 */
OMAP4_MUX(USBB2_ULPITLL_STP, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data22 */
OMAP4_MUX(USBB2_ULPITLL_DIR, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data21 */
OMAP4_MUX(USBB2_ULPITLL_NXT, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data20 */
OMAP4_MUX(USBB2_ULPITLL_DAT0, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data19 */
OMAP4_MUX(USBB2_ULPITLL_DAT1, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data18 */
OMAP4_MUX(USBB2_ULPITLL_DAT2, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data15 */
OMAP4_MUX(USBB2_ULPITLL_DAT3, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data14 */
OMAP4_MUX(USBB2_ULPITLL_DAT4, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data13 */
OMAP4_MUX(USBB2_ULPITLL_DAT5, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data12 */
OMAP4_MUX(USBB2_ULPITLL_DAT6, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data11 */
OMAP4_MUX(USBB2_ULPITLL_DAT7, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data10 */
OMAP4_MUX(DPM_EMU3, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data9 */
OMAP4_MUX(DPM_EMU4, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data16 */
OMAP4_MUX(DPM_EMU5, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data17 */
OMAP4_MUX(DPM_EMU6, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_hsync */
OMAP4_MUX(DPM_EMU7, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_pclk */
OMAP4_MUX(DPM_EMU8, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_vsync */
OMAP4_MUX(DPM_EMU9, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_de */
OMAP4_MUX(DPM_EMU10, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data8 */
OMAP4_MUX(DPM_EMU11, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data7 */
OMAP4_MUX(DPM_EMU12, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data6 */
OMAP4_MUX(DPM_EMU13, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data5 */
OMAP4_MUX(DPM_EMU14, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data4 */
OMAP4_MUX(DPM_EMU15, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data3 */
OMAP4_MUX(DPM_EMU16, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data2 */
OMAP4_MUX(DPM_EMU17, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data1 */
OMAP4_MUX(DPM_EMU18, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data0 */
OMAP4_MUX(DPM_EMU19, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#else
#define board_mux NULL
#endif
/* Display DVI */
#define PANDA_DVI_TFP410_POWER_DOWN_GPIO 0
static int omap4_panda_enable_dvi(struct omap_dss_device *dssdev)
{
gpio_set_value(dssdev->reset_gpio, 1);
return 0;
}
static void omap4_panda_disable_dvi(struct omap_dss_device *dssdev)
{
gpio_set_value(dssdev->reset_gpio, 0);
}
/* Using generic display panel */
static struct panel_dvi_platform_data omap4_dvi_panel = {
.platform_enable = omap4_panda_enable_dvi,
.platform_disable = omap4_panda_disable_dvi,
.i2c_bus_num = 3,
};
struct omap_dss_device omap4_panda_dvi_device = {
.type = OMAP_DISPLAY_TYPE_DPI,
.name = "dvi",
.driver_name = "dvi",
.data = &omap4_dvi_panel,
.phy.dpi.data_lines = 24,
.reset_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
.channel = OMAP_DSS_CHANNEL_LCD2,
};
int __init omap4_panda_dvi_init(void)
{
int r;
/* Requesting TFP410 DVI GPIO and disabling it, at bootup */
r = gpio_request_one(omap4_panda_dvi_device.reset_gpio,
GPIOF_OUT_INIT_LOW, "DVI PD");
if (r)
pr_err("Failed to get DVI powerdown GPIO\n");
return r;
}
static struct gpio panda_hdmi_gpios[] = {
{ HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
{ HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
{ HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
};
static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
{
int status;
status = gpio_request_array(panda_hdmi_gpios,
ARRAY_SIZE(panda_hdmi_gpios));
if (status)
pr_err("Cannot request HDMI GPIOs\n");
return status;
}
static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev)
{
gpio_free_array(panda_hdmi_gpios, ARRAY_SIZE(panda_hdmi_gpios));
}
static struct omap_dss_hdmi_data omap4_panda_hdmi_data = {
.hpd_gpio = HDMI_GPIO_HPD,
};
static struct omap_dss_device omap4_panda_hdmi_device = {
.name = "hdmi",
.driver_name = "hdmi_panel",
.type = OMAP_DISPLAY_TYPE_HDMI,
.platform_enable = omap4_panda_panel_enable_hdmi,
.platform_disable = omap4_panda_panel_disable_hdmi,
.channel = OMAP_DSS_CHANNEL_DIGIT,
.data = &omap4_panda_hdmi_data,
};
static struct omap_dss_device *omap4_panda_dss_devices[] = {
&omap4_panda_dvi_device,
&omap4_panda_hdmi_device,
};
static struct omap_dss_board_info omap4_panda_dss_data = {
.num_devices = ARRAY_SIZE(omap4_panda_dss_devices),
.devices = omap4_panda_dss_devices,
.default_device = &omap4_panda_dvi_device,
};
void __init omap4_panda_display_init(void)
{
int r;
r = omap4_panda_dvi_init();
if (r)
pr_err("error initializing panda DVI\n");
omap_display_init(&omap4_panda_dss_data);
/*
* OMAP4460SDP/Blaze and OMAP4430 ES2.3 SDP/Blaze boards and
* later have external pull up on the HDMI I2C lines
*/
if (cpu_is_omap446x() || omap_rev() > OMAP4430_REV_ES2_2)
omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
else
omap_hdmi_init(0);
omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
}
static void omap4_panda_init_rev(void)
{
if (cpu_is_omap443x()) {
/* PandaBoard 4430 */
/* ASoC audio configuration */
panda_abe_audio_data.card_name = "PandaBoard";
panda_abe_audio_data.has_hsmic = 1;
} else {
/* PandaBoard ES */
/* ASoC audio configuration */
panda_abe_audio_data.card_name = "PandaBoardES";
}
}
static void __init omap4_panda_init(void)
{
int package = OMAP_PACKAGE_CBS;
int ret;
if (omap_rev() == OMAP4430_REV_ES1_0)
package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, NULL, package);
omap_panda_wlan_data.irq = gpio_to_irq(GPIO_WIFI_IRQ);
ret = wl12xx_set_platform_data(&omap_panda_wlan_data);
if (ret)
pr_err("error setting wl12xx data: %d\n", ret);
omap4_panda_init_rev();
omap4_panda_i2c_init();
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
platform_device_register(&omap_vwlan_device);
omap_serial_init();
omap_sdrc_init(NULL, NULL);
omap4_twl6030_hsmmc_init(mmc);
omap4_ehci_init();
usb_musb_init(&musb_board_data);
omap4_panda_display_init();
}
MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
/* Maintainer: David Anders - Texas Instruments Inc */
.atag_offset = 0x100,
.reserve = omap_reserve,
.map_io = omap4_map_io,
.init_early = omap4430_init_early,
.init_irq = gic_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = omap4_panda_init,
.timer = &omap4_timer,
.restart = omap_prcm_restart,
MACHINE_END
| gpl-2.0 |
davidmueller13/AK-Flo | drivers/i2c/busses/i2c-ixp2000.c | 4889 | 4165 | /*
* drivers/i2c/busses/i2c-ixp2000.c
*
* I2C adapter for IXP2000 systems using GPIOs for I2C bus
*
* Author: Deepak Saxena <dsaxena@plexity.net>
* Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com>
* Made generic by: Jeff Daly <jeffrey.daly@intel.com>
*
* Copyright (c) 2003-2004 MontaVista Software Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
* From Jeff Daly:
*
* I2C adapter driver for Intel IXDP2xxx platforms. This should work for any
* IXP2000 platform if it uses the HW GPIO in the same manner. Basically,
* SDA and SCL GPIOs have external pullups. Setting the respective GPIO to
* an input will make the signal a '1' via the pullup. Setting them to
* outputs will pull them down.
*
* The GPIOs are open drain signals and are used as configuration strap inputs
* during power-up so there's generally a buffer on the board that needs to be
* 'enabled' to drive the GPIOs.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/slab.h>
#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
#include <mach/gpio-ixp2000.h>
static inline int ixp2000_scl_pin(void *data)
{
return ((struct ixp2000_i2c_pins*)data)->scl_pin;
}
static inline int ixp2000_sda_pin(void *data)
{
return ((struct ixp2000_i2c_pins*)data)->sda_pin;
}
static void ixp2000_bit_setscl(void *data, int val)
{
int i = 5000;
if (val) {
gpio_line_config(ixp2000_scl_pin(data), GPIO_IN);
while(!gpio_line_get(ixp2000_scl_pin(data)) && i--);
} else {
gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT);
}
}
static void ixp2000_bit_setsda(void *data, int val)
{
if (val) {
gpio_line_config(ixp2000_sda_pin(data), GPIO_IN);
} else {
gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT);
}
}
static int ixp2000_bit_getscl(void *data)
{
return gpio_line_get(ixp2000_scl_pin(data));
}
static int ixp2000_bit_getsda(void *data)
{
return gpio_line_get(ixp2000_sda_pin(data));
}
struct ixp2000_i2c_data {
struct ixp2000_i2c_pins *gpio_pins;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo_data;
};
static int ixp2000_i2c_remove(struct platform_device *plat_dev)
{
struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev);
platform_set_drvdata(plat_dev, NULL);
i2c_del_adapter(&drv_data->adapter);
kfree(drv_data);
return 0;
}
static int ixp2000_i2c_probe(struct platform_device *plat_dev)
{
int err;
struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data;
struct ixp2000_i2c_data *drv_data =
kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL);
if (!drv_data)
return -ENOMEM;
drv_data->gpio_pins = gpio;
drv_data->algo_data.data = gpio;
drv_data->algo_data.setsda = ixp2000_bit_setsda;
drv_data->algo_data.setscl = ixp2000_bit_setscl;
drv_data->algo_data.getsda = ixp2000_bit_getsda;
drv_data->algo_data.getscl = ixp2000_bit_getscl;
drv_data->algo_data.udelay = 6;
drv_data->algo_data.timeout = HZ;
strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
sizeof(drv_data->adapter.name));
drv_data->adapter.algo_data = &drv_data->algo_data,
drv_data->adapter.dev.parent = &plat_dev->dev;
gpio_line_config(gpio->sda_pin, GPIO_IN);
gpio_line_config(gpio->scl_pin, GPIO_IN);
gpio_line_set(gpio->scl_pin, 0);
gpio_line_set(gpio->sda_pin, 0);
if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) {
dev_err(&plat_dev->dev, "Could not install, error %d\n", err);
kfree(drv_data);
return err;
}
platform_set_drvdata(plat_dev, drv_data);
return 0;
}
static struct platform_driver ixp2000_i2c_driver = {
.probe = ixp2000_i2c_probe,
.remove = ixp2000_i2c_remove,
.driver = {
.name = "IXP2000-I2C",
.owner = THIS_MODULE,
},
};
module_platform_driver(ixp2000_i2c_driver);
MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:IXP2000-I2C");
| gpl-2.0 |
kriz2000/2.6.32_CAF | drivers/isdn/hardware/eicon/dqueue.c | 5145 | 2180 | /* $Id: dqueue.c,v 1.5 2003/04/12 21:40:49 schindler Exp $
*
* Driver for Eicon DIVA Server ISDN cards.
* User Mode IDI Interface
*
* Copyright 2000-2003 by Armin Schindler (mac@melware.de)
* Copyright 2000-2003 Cytronics & Melware (info@melware.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include "platform.h"
#include "dqueue.h"
int
diva_data_q_init(diva_um_idi_data_queue_t * q,
int max_length, int max_segments)
{
int i;
q->max_length = max_length;
q->segments = max_segments;
for (i = 0; i < q->segments; i++) {
q->data[i] = NULL;
q->length[i] = 0;
}
q->read = q->write = q->count = q->segment_pending = 0;
for (i = 0; i < q->segments; i++) {
if (!(q->data[i] = diva_os_malloc(0, q->max_length))) {
diva_data_q_finit(q);
return (-1);
}
}
return (0);
}
int diva_data_q_finit(diva_um_idi_data_queue_t * q)
{
int i;
for (i = 0; i < q->segments; i++) {
if (q->data[i]) {
diva_os_free(0, q->data[i]);
}
q->data[i] = NULL;
q->length[i] = 0;
}
q->read = q->write = q->count = q->segment_pending = 0;
return (0);
}
int diva_data_q_get_max_length(const diva_um_idi_data_queue_t * q)
{
return (q->max_length);
}
void *diva_data_q_get_segment4write(diva_um_idi_data_queue_t * q)
{
if ((!q->segment_pending) && (q->count < q->segments)) {
q->segment_pending = 1;
return (q->data[q->write]);
}
return NULL;
}
void
diva_data_q_ack_segment4write(diva_um_idi_data_queue_t * q, int length)
{
if (q->segment_pending) {
q->length[q->write] = length;
q->count++;
q->write++;
if (q->write >= q->segments) {
q->write = 0;
}
q->segment_pending = 0;
}
}
const void *diva_data_q_get_segment4read(const diva_um_idi_data_queue_t *
q)
{
if (q->count) {
return (q->data[q->read]);
}
return NULL;
}
int diva_data_q_get_segment_length(const diva_um_idi_data_queue_t * q)
{
return (q->length[q->read]);
}
void diva_data_q_ack_segment4read(diva_um_idi_data_queue_t * q)
{
if (q->count) {
q->length[q->read] = 0;
q->count--;
q->read++;
if (q->read >= q->segments) {
q->read = 0;
}
}
}
| gpl-2.0 |
AICP/kernel_samsung_trlte | arch/powerpc/platforms/pseries/io_event_irq.c | 6681 | 5201 | /*
* Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/irq.h>
#include <asm/io_event_irq.h>
#include "pseries.h"
/*
* IO event interrupt is a mechanism provided by RTAS to return
* information about hardware error and non-error events. Device
* drivers can register their event handlers to receive events.
* Device drivers are expected to use atomic_notifier_chain_register()
* and atomic_notifier_chain_unregister() to register and unregister
* their event handlers. Since multiple IO event types and scopes
* share an IO event interrupt, the event handlers are called one
* by one until the IO event is claimed by one of the handlers.
* The event handlers are expected to return NOTIFY_OK if the
* event is handled by the event handler or NOTIFY_DONE if the
* event does not belong to the handler.
*
* Usage:
*
* Notifier function:
* #include <asm/io_event_irq.h>
* int event_handler(struct notifier_block *nb, unsigned long val, void *data) {
* p = (struct pseries_io_event_sect_data *) data;
* if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE;
* :
* :
* return NOTIFY_OK;
* }
* struct notifier_block event_nb = {
* .notifier_call = event_handler,
* }
*
* Registration:
* atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb);
*
* Unregistration:
* atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb);
*/
ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list);
EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list);
static int ioei_check_exception_token;
static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
/**
* Find the data portion of an IO Event section from event log.
* @elog: RTAS error/event log.
*
* Return:
* pointer to a valid IO event section data. NULL if not found.
*/
static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
{
struct pseries_errorlog *sect;
/* We should only ever get called for io-event interrupts, but if
* we do get called for another type then something went wrong so
* make some noise about it.
* RTAS_TYPE_IO only exists in extended event log version 6 or later.
* No need to check event log version.
*/
if (unlikely(elog->type != RTAS_TYPE_IO)) {
printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d",
elog->type);
return NULL;
}
sect = get_pseries_errorlog(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
if (unlikely(!sect)) {
printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
"log does not contain an IO Event section. "
"Could be a bug in system firmware!\n");
return NULL;
}
return (struct pseries_io_event *) §->data;
}
/*
* PAPR:
* - check-exception returns the first found error or event and clear that
* error or event so it is reported once.
* - Each interrupt returns one event. If a plateform chooses to report
* multiple events through a single interrupt, it must ensure that the
* interrupt remains asserted until check-exception has been used to
* process all out-standing events for that interrupt.
*
* Implementation notes:
* - Events must be processed in the order they are returned. Hence,
* sequential in nature.
* - The owner of an event is determined by combinations of scope,
* event type, and sub-type. There is no easy way to pre-sort clients
* by scope or event type alone. For example, Torrent ISR route change
* event is reported with scope 0x00 (Not Applicatable) rather than
* 0x3B (Torrent-hub). It is better to let the clients to identify
* who owns the the event.
*/
static irqreturn_t ioei_interrupt(int irq, void *dev_id)
{
struct pseries_io_event *event;
int rtas_rc;
for (;;) {
rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL,
RTAS_VECTOR_EXTERNAL_INTERRUPT,
virq_to_hw(irq),
RTAS_IO_EVENTS, 1 /* Time Critical */,
__pa(ioei_rtas_buf),
RTAS_DATA_BUF_SIZE);
if (rtas_rc != 0)
break;
event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf);
if (!event)
continue;
atomic_notifier_call_chain(&pseries_ioei_notifier_list,
0, event);
}
return IRQ_HANDLED;
}
static int __init ioei_init(void)
{
struct device_node *np;
ioei_check_exception_token = rtas_token("check-exception");
if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE)
return -ENODEV;
np = of_find_node_by_path("/event-sources/ibm,io-events");
if (np) {
request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT");
pr_info("IBM I/O event interrupts enabled\n");
of_node_put(np);
} else {
return -ENODEV;
}
return 0;
}
machine_subsys_initcall(pseries, ioei_init);
| gpl-2.0 |
tchaari/android_kernel_samsung_crespo | drivers/net/wimax/i2400m/usb-rx.c | 7193 | 15077 | /*
* Intel Wireless WiMAX Connection 2400m
* USB RX handling
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
* - Initial implementation
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Use skb_clone(), break up processing in chunks
* - Split transport/device specific
* - Make buffer size dynamic to exert less memory pressure
*
*
* This handles the RX path on USB.
*
* When a notification is received that says 'there is RX data ready',
* we call i2400mu_rx_kick(); that wakes up the RX kthread, which
* reads a buffer from USB and passes it to i2400m_rx() in the generic
* handling code. The RX buffer has an specific format that is
* described in rx.c.
*
* We use a kernel thread in a loop because:
*
* - we want to be able to call the USB power management get/put
* functions (blocking) before each transaction.
*
* - We might get a lot of notifications and we don't want to submit
* a zillion reads; by serializing, we are throttling.
*
* - RX data processing can get heavy enough so that it is not
* appropriate for doing it in the USB callback; thus we run it in a
* process context.
*
* We provide a read buffer of an arbitrary size (short of a page); if
* the callback reports -EOVERFLOW, it means it was too small, so we
* just double the size and retry (being careful to append, as
* sometimes the device provided some data). Every now and then we
* check if the average packet size is smaller than the current packet
* size and if so, we halve it. At the end, the size of the
* preallocated buffer should be following the average received
* transaction size, adapting dynamically to it.
*
* ROADMAP
*
* i2400mu_rx_kick() Called from notif.c when we get a
* 'data ready' notification
* i2400mu_rxd() Kernel RX daemon
* i2400mu_rx() Receive USB data
* i2400m_rx() Send data to generic i2400m RX handling
*
* i2400mu_rx_setup() called from i2400mu_bus_dev_start()
*
* i2400mu_rx_release() called from i2400mu_bus_dev_stop()
*/
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include "i2400m-usb.h"
#define D_SUBMODULE rx
#include "usb-debug-levels.h"
/*
* Dynamic RX size
*
* We can't let the rx_size be a multiple of 512 bytes (the RX
* endpoint's max packet size). On some USB host controllers (we
* haven't been able to fully characterize which), if the device is
* about to send (for example) X bytes and we only post a buffer to
* receive n*512, it will fail to mark that as babble (so that
* i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
* rest).
*
* So on growing or shrinking, if it is a multiple of the
* maxpacketsize, we remove some (instead of incresing some, so in a
* buddy allocator we try to waste less space).
*
* Note we also need a hook for this on i2400mu_rx() -- when we do the
* first read, we are sure we won't hit this spot because
* i240mm->rx_size has been set properly. However, if we have to
* double because of -EOVERFLOW, when we launch the read to get the
* rest of the data, we *have* to make sure that also is not a
* multiple of the max_pkt_size.
*/
static
size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
{
struct device *dev = &i2400mu->usb_iface->dev;
size_t rx_size;
const size_t max_pkt_size = 512;
rx_size = 2 * i2400mu->rx_size;
if (rx_size % max_pkt_size == 0) {
rx_size -= 8;
d_printf(1, dev,
"RX: expected size grew to %zu [adjusted -8] "
"from %zu\n",
rx_size, i2400mu->rx_size);
} else
d_printf(1, dev,
"RX: expected size grew to %zu from %zu\n",
rx_size, i2400mu->rx_size);
return rx_size;
}
static
void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
{
const size_t max_pkt_size = 512;
struct device *dev = &i2400mu->usb_iface->dev;
if (unlikely(i2400mu->rx_size_cnt >= 100
&& i2400mu->rx_size_auto_shrink)) {
size_t avg_rx_size =
i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
size_t new_rx_size = i2400mu->rx_size / 2;
if (avg_rx_size < new_rx_size) {
if (new_rx_size % max_pkt_size == 0) {
new_rx_size -= 8;
d_printf(1, dev,
"RX: expected size shrank to %zu "
"[adjusted -8] from %zu\n",
new_rx_size, i2400mu->rx_size);
} else
d_printf(1, dev,
"RX: expected size shrank to %zu "
"from %zu\n",
new_rx_size, i2400mu->rx_size);
i2400mu->rx_size = new_rx_size;
i2400mu->rx_size_cnt = 0;
i2400mu->rx_size_acc = i2400mu->rx_size;
}
}
}
/*
* Receive a message with payloads from the USB bus into an skb
*
* @i2400mu: USB device descriptor
* @rx_skb: skb where to place the received message
*
* Deals with all the USB-specifics of receiving, dynamically
* increasing the buffer size if so needed. Returns the payload in the
* skb, ready to process. On a zero-length packet, we retry.
*
* On soft USB errors, we retry (until they become too frequent and
* then are promoted to hard); on hard USB errors, we reset the
* device. On other errors (skb realloacation, we just drop it and
* hope for the next invocation to solve it).
*
* Returns: pointer to the skb if ok, ERR_PTR on error.
* NOTE: this function might realloc the skb (if it is too small),
* so always update with the one returned.
* ERR_PTR() is < 0 on error.
* Will return NULL if it cannot reallocate -- this can be
* considered a transient retryable error.
*/
static
struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
{
int result = 0;
struct device *dev = &i2400mu->usb_iface->dev;
int usb_pipe, read_size, rx_size, do_autopm;
struct usb_endpoint_descriptor *epd;
const size_t max_pkt_size = 512;
d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
do_autopm = atomic_read(&i2400mu->do_autopm);
result = do_autopm ?
usb_autopm_get_interface(i2400mu->usb_iface) : 0;
if (result < 0) {
dev_err(dev, "RX: can't get autopm: %d\n", result);
do_autopm = 0;
}
epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
retry:
rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
if (unlikely(rx_size % max_pkt_size == 0)) {
rx_size -= 8;
d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
}
result = usb_bulk_msg(
i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
rx_size, &read_size, 200);
usb_mark_last_busy(i2400mu->usb_dev);
switch (result) {
case 0:
if (read_size == 0)
goto retry; /* ZLP, just resubmit */
skb_put(rx_skb, read_size);
break;
case -EPIPE:
/*
* Stall -- maybe the device is choking with our
* requests. Clear it and give it some time. If they
* happen to often, it might be another symptom, so we
* reset.
*
* No error handling for usb_clear_halt(0; if it
* works, the retry works; if it fails, this switch
* does the error handling for us.
*/
if (edc_inc(&i2400mu->urb_edc,
10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
dev_err(dev, "BM-CMD: too many stalls in "
"URB; resetting device\n");
goto do_reset;
}
usb_clear_halt(i2400mu->usb_dev, usb_pipe);
msleep(10); /* give the device some time */
goto retry;
case -EINVAL: /* while removing driver */
case -ENODEV: /* dev disconnect ... */
case -ENOENT: /* just ignore it */
case -ESHUTDOWN:
case -ECONNRESET:
break;
case -EOVERFLOW: { /* too small, reallocate */
struct sk_buff *new_skb;
rx_size = i2400mu_rx_size_grow(i2400mu);
if (rx_size <= (1 << 16)) /* cap it */
i2400mu->rx_size = rx_size;
else if (printk_ratelimit()) {
dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
result = -EINVAL;
goto out;
}
skb_put(rx_skb, read_size);
new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
GFP_KERNEL);
if (new_skb == NULL) {
if (printk_ratelimit())
dev_err(dev, "RX: Can't reallocate skb to %d; "
"RX dropped\n", rx_size);
kfree_skb(rx_skb);
rx_skb = NULL;
goto out; /* drop it...*/
}
kfree_skb(rx_skb);
rx_skb = new_skb;
i2400mu->rx_size_cnt = 0;
i2400mu->rx_size_acc = i2400mu->rx_size;
d_printf(1, dev, "RX: size changed to %d, received %d, "
"copied %d, capacity %ld\n",
rx_size, read_size, rx_skb->len,
(long) (skb_end_pointer(new_skb) - new_skb->head));
goto retry;
}
/* In most cases, it happens due to the hardware scheduling a
* read when there was no data - unfortunately, we have no way
* to tell this timeout from a USB timeout. So we just ignore
* it. */
case -ETIMEDOUT:
dev_err(dev, "RX: timeout: %d\n", result);
result = 0;
break;
default: /* Any error */
if (edc_inc(&i2400mu->urb_edc,
EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
goto error_reset;
dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
goto retry;
}
out:
if (do_autopm)
usb_autopm_put_interface(i2400mu->usb_iface);
d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
return rx_skb;
error_reset:
dev_err(dev, "RX: maximum errors in URB exceeded; "
"resetting device\n");
do_reset:
usb_queue_reset_device(i2400mu->usb_iface);
rx_skb = ERR_PTR(result);
goto out;
}
/*
* Kernel thread for USB reception of data
*
* This thread waits for a kick; once kicked, it will allocate an skb
* and receive a single message to it from USB (using
* i2400mu_rx()). Once received, it is passed to the generic i2400m RX
* code for processing.
*
* When done processing, it runs some dirty statistics to verify if
* the last 100 messages received were smaller than half of the
* current RX buffer size. In that case, the RX buffer size is
* halved. This will helps lowering the pressure on the memory
* allocator.
*
* Hard errors force the thread to exit.
*/
static
int i2400mu_rxd(void *_i2400mu)
{
int result = 0;
struct i2400mu *i2400mu = _i2400mu;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
size_t pending;
int rx_size;
struct sk_buff *rx_skb;
unsigned long flags;
d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
spin_lock_irqsave(&i2400m->rx_lock, flags);
BUG_ON(i2400mu->rx_kthread != NULL);
i2400mu->rx_kthread = current;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
while (1) {
d_printf(2, dev, "RX: waiting for messages\n");
pending = 0;
wait_event_interruptible(
i2400mu->rx_wq,
(kthread_should_stop() /* check this first! */
|| (pending = atomic_read(&i2400mu->rx_pending_count)))
);
if (kthread_should_stop())
break;
if (pending == 0)
continue;
rx_size = i2400mu->rx_size;
d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
if (rx_skb == NULL) {
dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
rx_size);
msleep(50); /* give it some time? */
continue;
}
/* Receive the message with the payloads */
rx_skb = i2400mu_rx(i2400mu, rx_skb);
result = PTR_ERR(rx_skb);
if (IS_ERR(rx_skb))
goto out;
atomic_dec(&i2400mu->rx_pending_count);
if (rx_skb == NULL || rx_skb->len == 0) {
/* some "ignorable" condition */
kfree_skb(rx_skb);
continue;
}
/* Deliver the message to the generic i2400m code */
i2400mu->rx_size_cnt++;
i2400mu->rx_size_acc += rx_skb->len;
result = i2400m_rx(i2400m, rx_skb);
if (result == -EIO
&& edc_inc(&i2400mu->urb_edc,
EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
goto error_reset;
}
/* Maybe adjust RX buffer size */
i2400mu_rx_size_maybe_shrink(i2400mu);
}
result = 0;
out:
spin_lock_irqsave(&i2400m->rx_lock, flags);
i2400mu->rx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
return result;
error_reset:
dev_err(dev, "RX: maximum errors in received buffer exceeded; "
"resetting device\n");
usb_queue_reset_device(i2400mu->usb_iface);
goto out;
}
/*
* Start reading from the device
*
* @i2400m: device instance
*
* Notify the RX thread that there is data pending.
*/
void i2400mu_rx_kick(struct i2400mu *i2400mu)
{
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
atomic_inc(&i2400mu->rx_pending_count);
wake_up_all(&i2400mu->rx_wq);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
int i2400mu_rx_setup(struct i2400mu *i2400mu)
{
int result = 0;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
struct task_struct *kthread;
kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
wimax_dev->name);
/* the kthread function sets i2400mu->rx_thread */
if (IS_ERR(kthread)) {
result = PTR_ERR(kthread);
dev_err(dev, "RX: cannot start thread: %d\n", result);
}
return result;
}
void i2400mu_rx_release(struct i2400mu *i2400mu)
{
unsigned long flags;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = i2400m_dev(i2400m);
struct task_struct *kthread;
spin_lock_irqsave(&i2400m->rx_lock, flags);
kthread = i2400mu->rx_kthread;
i2400mu->rx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->rx_lock, flags);
if (kthread)
kthread_stop(kthread);
else
d_printf(1, dev, "RX: kthread had already exited\n");
}
| gpl-2.0 |
RenderBroken/Victara-Stock-kernel | net/dccp/ccids/lib/packet_history.c | 8217 | 13465 | /*
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
*
* An implementation of the DCCP protocol
*
* This code has been developed by the University of Waikato WAND
* research group. For further information please see http://www.wand.net.nz/
* or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
*
* This code also uses code from Lulea University, rereleased as GPL by its
* authors:
* Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
*
* Changes to meet Linux coding standards, to make it meet latest ccid3 draft
* and to make it work as a loadable module in the DCCP stack written by
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
*
* Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/string.h>
#include <linux/slab.h>
#include "packet_history.h"
#include "../../dccp.h"
/*
* Transmitter History Routines
*/
static struct kmem_cache *tfrc_tx_hist_slab;
int __init tfrc_tx_packet_history_init(void)
{
tfrc_tx_hist_slab = kmem_cache_create("tfrc_tx_hist",
sizeof(struct tfrc_tx_hist_entry),
0, SLAB_HWCACHE_ALIGN, NULL);
return tfrc_tx_hist_slab == NULL ? -ENOBUFS : 0;
}
void tfrc_tx_packet_history_exit(void)
{
if (tfrc_tx_hist_slab != NULL) {
kmem_cache_destroy(tfrc_tx_hist_slab);
tfrc_tx_hist_slab = NULL;
}
}
int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno)
{
struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any());
if (entry == NULL)
return -ENOBUFS;
entry->seqno = seqno;
entry->stamp = ktime_get_real();
entry->next = *headp;
*headp = entry;
return 0;
}
void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
{
struct tfrc_tx_hist_entry *head = *headp;
while (head != NULL) {
struct tfrc_tx_hist_entry *next = head->next;
kmem_cache_free(tfrc_tx_hist_slab, head);
head = next;
}
*headp = NULL;
}
/*
* Receiver History Routines
*/
static struct kmem_cache *tfrc_rx_hist_slab;
int __init tfrc_rx_packet_history_init(void)
{
tfrc_rx_hist_slab = kmem_cache_create("tfrc_rxh_cache",
sizeof(struct tfrc_rx_hist_entry),
0, SLAB_HWCACHE_ALIGN, NULL);
return tfrc_rx_hist_slab == NULL ? -ENOBUFS : 0;
}
void tfrc_rx_packet_history_exit(void)
{
if (tfrc_rx_hist_slab != NULL) {
kmem_cache_destroy(tfrc_rx_hist_slab);
tfrc_rx_hist_slab = NULL;
}
}
static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry,
const struct sk_buff *skb,
const u64 ndp)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
entry->tfrchrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
entry->tfrchrx_ccval = dh->dccph_ccval;
entry->tfrchrx_type = dh->dccph_type;
entry->tfrchrx_ndp = ndp;
entry->tfrchrx_tstamp = ktime_get_real();
}
void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
const struct sk_buff *skb,
const u64 ndp)
{
struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h);
tfrc_rx_hist_entry_from_skb(entry, skb, ndp);
}
/* has the packet contained in skb been seen before? */
int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb)
{
const u64 seq = DCCP_SKB_CB(skb)->dccpd_seq;
int i;
if (dccp_delta_seqno(tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, seq) <= 0)
return 1;
for (i = 1; i <= h->loss_count; i++)
if (tfrc_rx_hist_entry(h, i)->tfrchrx_seqno == seq)
return 1;
return 0;
}
static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b)
{
const u8 idx_a = tfrc_rx_hist_index(h, a),
idx_b = tfrc_rx_hist_index(h, b);
struct tfrc_rx_hist_entry *tmp = h->ring[idx_a];
h->ring[idx_a] = h->ring[idx_b];
h->ring[idx_b] = tmp;
}
/*
* Private helper functions for loss detection.
*
* In the descriptions, `Si' refers to the sequence number of entry number i,
* whose NDP count is `Ni' (lower case is used for variables).
* Note: All __xxx_loss functions expect that a test against duplicates has been
* performed already: the seqno of the skb must not be less than the seqno
* of loss_prev; and it must not equal that of any valid history entry.
*/
static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1)
{
u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
s1 = DCCP_SKB_CB(skb)->dccpd_seq;
if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */
h->loss_count = 1;
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1);
}
}
static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2)
{
u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
s2 = DCCP_SKB_CB(skb)->dccpd_seq;
if (likely(dccp_delta_seqno(s1, s2) > 0)) { /* S1 < S2 */
h->loss_count = 2;
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2);
return;
}
/* S0 < S2 < S1 */
if (dccp_loss_free(s0, s2, n2)) {
u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
if (dccp_loss_free(s2, s1, n1)) {
/* hole is filled: S0, S2, and S1 are consecutive */
h->loss_count = 0;
h->loss_start = tfrc_rx_hist_index(h, 1);
} else
/* gap between S2 and S1: just update loss_prev */
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2);
} else { /* gap between S0 and S2 */
/*
* Reorder history to insert S2 between S0 and S1
*/
tfrc_rx_hist_swap(h, 0, 3);
h->loss_start = tfrc_rx_hist_index(h, 3);
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n2);
h->loss_count = 2;
}
}
/* return 1 if a new loss event has been identified */
static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3)
{
u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
s3 = DCCP_SKB_CB(skb)->dccpd_seq;
if (likely(dccp_delta_seqno(s2, s3) > 0)) { /* S2 < S3 */
h->loss_count = 3;
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3);
return 1;
}
/* S3 < S2 */
if (dccp_delta_seqno(s1, s3) > 0) { /* S1 < S3 < S2 */
/*
* Reorder history to insert S3 between S1 and S2
*/
tfrc_rx_hist_swap(h, 2, 3);
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3);
h->loss_count = 3;
return 1;
}
/* S0 < S3 < S1 */
if (dccp_loss_free(s0, s3, n3)) {
u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
if (dccp_loss_free(s3, s1, n1)) {
/* hole between S0 and S1 filled by S3 */
u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp;
if (dccp_loss_free(s1, s2, n2)) {
/* entire hole filled by S0, S3, S1, S2 */
h->loss_start = tfrc_rx_hist_index(h, 2);
h->loss_count = 0;
} else {
/* gap remains between S1 and S2 */
h->loss_start = tfrc_rx_hist_index(h, 1);
h->loss_count = 1;
}
} else /* gap exists between S3 and S1, loss_count stays at 2 */
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n3);
return 0;
}
/*
* The remaining case: S0 < S3 < S1 < S2; gap between S0 and S3
* Reorder history to insert S3 between S0 and S1.
*/
tfrc_rx_hist_swap(h, 0, 3);
h->loss_start = tfrc_rx_hist_index(h, 3);
tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n3);
h->loss_count = 3;
return 1;
}
/* recycle RX history records to continue loss detection if necessary */
static void __three_after_loss(struct tfrc_rx_hist *h)
{
/*
* At this stage we know already that there is a gap between S0 and S1
* (since S0 was the highest sequence number received before detecting
* the loss). To recycle the loss record, it is thus only necessary to
* check for other possible gaps between S1/S2 and between S2/S3.
*/
u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno;
u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp,
n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp;
if (dccp_loss_free(s1, s2, n2)) {
if (dccp_loss_free(s2, s3, n3)) {
/* no gap between S2 and S3: entire hole is filled */
h->loss_start = tfrc_rx_hist_index(h, 3);
h->loss_count = 0;
} else {
/* gap between S2 and S3 */
h->loss_start = tfrc_rx_hist_index(h, 2);
h->loss_count = 1;
}
} else { /* gap between S1 and S2 */
h->loss_start = tfrc_rx_hist_index(h, 1);
h->loss_count = 2;
}
}
/**
* tfrc_rx_handle_loss - Loss detection and further processing
* @h: The non-empty RX history object
* @lh: Loss Intervals database to update
* @skb: Currently received packet
* @ndp: The NDP count belonging to @skb
* @calc_first_li: Caller-dependent computation of first loss interval in @lh
* @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
* Chooses action according to pending loss, updates LI database when a new
* loss was detected, and does required post-processing. Returns 1 when caller
* should send feedback, 0 otherwise.
* Since it also takes care of reordering during loss detection and updates the
* records accordingly, the caller should not perform any more RX history
* operations when loss_count is greater than 0 after calling this function.
*/
int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
struct tfrc_loss_hist *lh,
struct sk_buff *skb, const u64 ndp,
u32 (*calc_first_li)(struct sock *), struct sock *sk)
{
int is_new_loss = 0;
if (h->loss_count == 0) {
__do_track_loss(h, skb, ndp);
} else if (h->loss_count == 1) {
__one_after_loss(h, skb, ndp);
} else if (h->loss_count != 2) {
DCCP_BUG("invalid loss_count %d", h->loss_count);
} else if (__two_after_loss(h, skb, ndp)) {
/*
* Update Loss Interval database and recycle RX records
*/
is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk);
__three_after_loss(h);
}
return is_new_loss;
}
int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h)
{
int i;
for (i = 0; i <= TFRC_NDUPACK; i++) {
h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC);
if (h->ring[i] == NULL)
goto out_free;
}
h->loss_count = h->loss_start = 0;
return 0;
out_free:
while (i-- != 0) {
kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
h->ring[i] = NULL;
}
return -ENOBUFS;
}
void tfrc_rx_hist_purge(struct tfrc_rx_hist *h)
{
int i;
for (i = 0; i <= TFRC_NDUPACK; ++i)
if (h->ring[i] != NULL) {
kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
h->ring[i] = NULL;
}
}
/**
* tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against
*/
static inline struct tfrc_rx_hist_entry *
tfrc_rx_hist_rtt_last_s(const struct tfrc_rx_hist *h)
{
return h->ring[0];
}
/**
* tfrc_rx_hist_rtt_prev_s: previously suitable (wrt rtt_last_s) RTT-sampling entry
*/
static inline struct tfrc_rx_hist_entry *
tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h)
{
return h->ring[h->rtt_sample_prev];
}
/**
* tfrc_rx_hist_sample_rtt - Sample RTT from timestamp / CCVal
* Based on ideas presented in RFC 4342, 8.1. Returns 0 if it was not able
* to compute a sample with given data - calling function should check this.
*/
u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb)
{
u32 sample = 0,
delta_v = SUB16(dccp_hdr(skb)->dccph_ccval,
tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
if (delta_v < 1 || delta_v > 4) { /* unsuitable CCVal delta */
if (h->rtt_sample_prev == 2) { /* previous candidate stored */
sample = SUB16(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval,
tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
if (sample)
sample = 4 / sample *
ktime_us_delta(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_tstamp,
tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp);
else /*
* FIXME: This condition is in principle not
* possible but occurs when CCID is used for
* two-way data traffic. I have tried to trace
* it, but the cause does not seem to be here.
*/
DCCP_BUG("please report to dccp@vger.kernel.org"
" => prev = %u, last = %u",
tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval,
tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
} else if (delta_v < 1) {
h->rtt_sample_prev = 1;
goto keep_ref_for_next_time;
}
} else if (delta_v == 4) /* optimal match */
sample = ktime_to_us(net_timedelta(tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp));
else { /* suboptimal match */
h->rtt_sample_prev = 2;
goto keep_ref_for_next_time;
}
if (unlikely(sample > DCCP_SANE_RTT_MAX)) {
DCCP_WARN("RTT sample %u too large, using max\n", sample);
sample = DCCP_SANE_RTT_MAX;
}
h->rtt_sample_prev = 0; /* use current entry as next reference */
keep_ref_for_next_time:
return sample;
}
| gpl-2.0 |
xiaowei942/kernel-11 | drivers/serial/vr41xx_siu.c | 26 | 20505 | /*
* Driver for NEC VR4100 series Serial Interface Unit.
*
* Copyright (C) 2004-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* Based on drivers/serial/8250.c, by Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_VR41XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <asm/io.h>
#include <asm/vr41xx/siu.h>
#include <asm/vr41xx/vr41xx.h>
#define SIU_BAUD_BASE 1152000
#define SIU_MAJOR 204
#define SIU_MINOR_BASE 82
#define RX_MAX_COUNT 256
#define TX_MAX_COUNT 15
#define SIUIRSEL 0x08
#define TMICMODE 0x20
#define TMICTX 0x10
#define IRMSEL 0x0c
#define IRMSEL_HP 0x08
#define IRMSEL_TEMIC 0x04
#define IRMSEL_SHARP 0x00
#define IRUSESEL 0x02
#define SIRSEL 0x01
static struct uart_port siu_uart_ports[SIU_PORTS_MAX] = {
[0 ... SIU_PORTS_MAX-1] = {
.lock = __SPIN_LOCK_UNLOCKED(siu_uart_ports->lock),
.irq = -1,
},
};
#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
static uint8_t lsr_break_flag[SIU_PORTS_MAX];
#endif
#define siu_read(port, offset) readb((port)->membase + (offset))
#define siu_write(port, offset, value) writeb((value), (port)->membase + (offset))
void vr41xx_select_siu_interface(siu_interface_t interface)
{
struct uart_port *port;
unsigned long flags;
uint8_t irsel;
port = &siu_uart_ports[0];
spin_lock_irqsave(&port->lock, flags);
irsel = siu_read(port, SIUIRSEL);
if (interface == SIU_INTERFACE_IRDA)
irsel |= SIRSEL;
else
irsel &= ~SIRSEL;
siu_write(port, SIUIRSEL, irsel);
spin_unlock_irqrestore(&port->lock, flags);
}
EXPORT_SYMBOL_GPL(vr41xx_select_siu_interface);
void vr41xx_use_irda(irda_use_t use)
{
struct uart_port *port;
unsigned long flags;
uint8_t irsel;
port = &siu_uart_ports[0];
spin_lock_irqsave(&port->lock, flags);
irsel = siu_read(port, SIUIRSEL);
if (use == FIR_USE_IRDA)
irsel |= IRUSESEL;
else
irsel &= ~IRUSESEL;
siu_write(port, SIUIRSEL, irsel);
spin_unlock_irqrestore(&port->lock, flags);
}
EXPORT_SYMBOL_GPL(vr41xx_use_irda);
void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed)
{
struct uart_port *port;
unsigned long flags;
uint8_t irsel;
port = &siu_uart_ports[0];
spin_lock_irqsave(&port->lock, flags);
irsel = siu_read(port, SIUIRSEL);
irsel &= ~(IRMSEL | TMICTX | TMICMODE);
switch (module) {
case SHARP_IRDA:
irsel |= IRMSEL_SHARP;
break;
case TEMIC_IRDA:
irsel |= IRMSEL_TEMIC | TMICMODE;
if (speed == IRDA_TX_4MBPS)
irsel |= TMICTX;
break;
case HP_IRDA:
irsel |= IRMSEL_HP;
break;
default:
break;
}
siu_write(port, SIUIRSEL, irsel);
spin_unlock_irqrestore(&port->lock, flags);
}
EXPORT_SYMBOL_GPL(vr41xx_select_irda_module);
static inline void siu_clear_fifo(struct uart_port *port)
{
siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO);
siu_write(port, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR |
UART_FCR_CLEAR_XMIT);
siu_write(port, UART_FCR, 0);
}
static inline unsigned long siu_port_size(struct uart_port *port)
{
switch (port->type) {
case PORT_VR41XX_SIU:
return 11UL;
case PORT_VR41XX_DSIU:
return 8UL;
}
return 0;
}
static inline unsigned int siu_check_type(struct uart_port *port)
{
if (port->line == 0)
return PORT_VR41XX_SIU;
if (port->line == 1 && port->irq != -1)
return PORT_VR41XX_DSIU;
return PORT_UNKNOWN;
}
static inline const char *siu_type_name(struct uart_port *port)
{
switch (port->type) {
case PORT_VR41XX_SIU:
return "SIU";
case PORT_VR41XX_DSIU:
return "DSIU";
}
return NULL;
}
static unsigned int siu_tx_empty(struct uart_port *port)
{
uint8_t lsr;
lsr = siu_read(port, UART_LSR);
if (lsr & UART_LSR_TEMT)
return TIOCSER_TEMT;
return 0;
}
static void siu_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
uint8_t mcr = 0;
if (mctrl & TIOCM_DTR)
mcr |= UART_MCR_DTR;
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_OUT1)
mcr |= UART_MCR_OUT1;
if (mctrl & TIOCM_OUT2)
mcr |= UART_MCR_OUT2;
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
siu_write(port, UART_MCR, mcr);
}
static unsigned int siu_get_mctrl(struct uart_port *port)
{
uint8_t msr;
unsigned int mctrl = 0;
msr = siu_read(port, UART_MSR);
if (msr & UART_MSR_DCD)
mctrl |= TIOCM_CAR;
if (msr & UART_MSR_RI)
mctrl |= TIOCM_RNG;
if (msr & UART_MSR_DSR)
mctrl |= TIOCM_DSR;
if (msr & UART_MSR_CTS)
mctrl |= TIOCM_CTS;
return mctrl;
}
static void siu_stop_tx(struct uart_port *port)
{
unsigned long flags;
uint8_t ier;
spin_lock_irqsave(&port->lock, flags);
ier = siu_read(port, UART_IER);
ier &= ~UART_IER_THRI;
siu_write(port, UART_IER, ier);
spin_unlock_irqrestore(&port->lock, flags);
}
static void siu_start_tx(struct uart_port *port)
{
unsigned long flags;
uint8_t ier;
spin_lock_irqsave(&port->lock, flags);
ier = siu_read(port, UART_IER);
ier |= UART_IER_THRI;
siu_write(port, UART_IER, ier);
spin_unlock_irqrestore(&port->lock, flags);
}
static void siu_stop_rx(struct uart_port *port)
{
unsigned long flags;
uint8_t ier;
spin_lock_irqsave(&port->lock, flags);
ier = siu_read(port, UART_IER);
ier &= ~UART_IER_RLSI;
siu_write(port, UART_IER, ier);
port->read_status_mask &= ~UART_LSR_DR;
spin_unlock_irqrestore(&port->lock, flags);
}
static void siu_enable_ms(struct uart_port *port)
{
unsigned long flags;
uint8_t ier;
spin_lock_irqsave(&port->lock, flags);
ier = siu_read(port, UART_IER);
ier |= UART_IER_MSI;
siu_write(port, UART_IER, ier);
spin_unlock_irqrestore(&port->lock, flags);
}
static void siu_break_ctl(struct uart_port *port, int ctl)
{
unsigned long flags;
uint8_t lcr;
spin_lock_irqsave(&port->lock, flags);
lcr = siu_read(port, UART_LCR);
if (ctl == -1)
lcr |= UART_LCR_SBC;
else
lcr &= ~UART_LCR_SBC;
siu_write(port, UART_LCR, lcr);
spin_unlock_irqrestore(&port->lock, flags);
}
static inline void receive_chars(struct uart_port *port, uint8_t *status)
{
struct tty_struct *tty;
uint8_t lsr, ch;
char flag;
int max_count = RX_MAX_COUNT;
tty = port->info->tty;
lsr = *status;
do {
ch = siu_read(port, UART_RX);
port->icount.rx++;
flag = TTY_NORMAL;
#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
lsr |= lsr_break_flag[port->line];
lsr_break_flag[port->line] = 0;
#endif
if (unlikely(lsr & (UART_LSR_BI | UART_LSR_FE |
UART_LSR_PE | UART_LSR_OE))) {
if (lsr & UART_LSR_BI) {
lsr &= ~(UART_LSR_FE | UART_LSR_PE);
port->icount.brk++;
if (uart_handle_break(port))
goto ignore_char;
}
if (lsr & UART_LSR_FE)
port->icount.frame++;
if (lsr & UART_LSR_PE)
port->icount.parity++;
if (lsr & UART_LSR_OE)
port->icount.overrun++;
lsr &= port->read_status_mask;
if (lsr & UART_LSR_BI)
flag = TTY_BREAK;
if (lsr & UART_LSR_FE)
flag = TTY_FRAME;
if (lsr & UART_LSR_PE)
flag = TTY_PARITY;
}
if (uart_handle_sysrq_char(port, ch))
goto ignore_char;
uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
ignore_char:
lsr = siu_read(port, UART_LSR);
} while ((lsr & UART_LSR_DR) && (max_count-- > 0));
tty_flip_buffer_push(tty);
*status = lsr;
}
static inline void check_modem_status(struct uart_port *port)
{
uint8_t msr;
msr = siu_read(port, UART_MSR);
if ((msr & UART_MSR_ANY_DELTA) == 0)
return;
if (msr & UART_MSR_DDCD)
uart_handle_dcd_change(port, msr & UART_MSR_DCD);
if (msr & UART_MSR_TERI)
port->icount.rng++;
if (msr & UART_MSR_DDSR)
port->icount.dsr++;
if (msr & UART_MSR_DCTS)
uart_handle_cts_change(port, msr & UART_MSR_CTS);
wake_up_interruptible(&port->info->delta_msr_wait);
}
static inline void transmit_chars(struct uart_port *port)
{
struct circ_buf *xmit;
int max_count = TX_MAX_COUNT;
xmit = &port->info->xmit;
if (port->x_char) {
siu_write(port, UART_TX, port->x_char);
port->icount.tx++;
port->x_char = 0;
return;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
siu_stop_tx(port);
return;
}
do {
siu_write(port, UART_TX, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
if (uart_circ_empty(xmit))
break;
} while (max_count-- > 0);
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
if (uart_circ_empty(xmit))
siu_stop_tx(port);
}
static irqreturn_t siu_interrupt(int irq, void *dev_id)
{
struct uart_port *port;
uint8_t iir, lsr;
port = (struct uart_port *)dev_id;
iir = siu_read(port, UART_IIR);
if (iir & UART_IIR_NO_INT)
return IRQ_NONE;
lsr = siu_read(port, UART_LSR);
if (lsr & UART_LSR_DR)
receive_chars(port, &lsr);
check_modem_status(port);
if (lsr & UART_LSR_THRE)
transmit_chars(port);
return IRQ_HANDLED;
}
static int siu_startup(struct uart_port *port)
{
int retval;
if (port->membase == NULL)
return -ENODEV;
siu_clear_fifo(port);
(void)siu_read(port, UART_LSR);
(void)siu_read(port, UART_RX);
(void)siu_read(port, UART_IIR);
(void)siu_read(port, UART_MSR);
if (siu_read(port, UART_LSR) == 0xff)
return -ENODEV;
retval = request_irq(port->irq, siu_interrupt, 0, siu_type_name(port), port);
if (retval)
return retval;
if (port->type == PORT_VR41XX_DSIU)
vr41xx_enable_dsiuint(DSIUINT_ALL);
siu_write(port, UART_LCR, UART_LCR_WLEN8);
spin_lock_irq(&port->lock);
siu_set_mctrl(port, port->mctrl);
spin_unlock_irq(&port->lock);
siu_write(port, UART_IER, UART_IER_RLSI | UART_IER_RDI);
(void)siu_read(port, UART_LSR);
(void)siu_read(port, UART_RX);
(void)siu_read(port, UART_IIR);
(void)siu_read(port, UART_MSR);
return 0;
}
static void siu_shutdown(struct uart_port *port)
{
unsigned long flags;
uint8_t lcr;
siu_write(port, UART_IER, 0);
spin_lock_irqsave(&port->lock, flags);
port->mctrl &= ~TIOCM_OUT2;
siu_set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
lcr = siu_read(port, UART_LCR);
lcr &= ~UART_LCR_SBC;
siu_write(port, UART_LCR, lcr);
siu_clear_fifo(port);
(void)siu_read(port, UART_RX);
if (port->type == PORT_VR41XX_DSIU)
vr41xx_disable_dsiuint(DSIUINT_ALL);
free_irq(port->irq, port);
}
static void siu_set_termios(struct uart_port *port, struct ktermios *new,
struct ktermios *old)
{
tcflag_t c_cflag, c_iflag;
uint8_t lcr, fcr, ier;
unsigned int baud, quot;
unsigned long flags;
c_cflag = new->c_cflag;
switch (c_cflag & CSIZE) {
case CS5:
lcr = UART_LCR_WLEN5;
break;
case CS6:
lcr = UART_LCR_WLEN6;
break;
case CS7:
lcr = UART_LCR_WLEN7;
break;
default:
lcr = UART_LCR_WLEN8;
break;
}
if (c_cflag & CSTOPB)
lcr |= UART_LCR_STOP;
if (c_cflag & PARENB)
lcr |= UART_LCR_PARITY;
if ((c_cflag & PARODD) != PARODD)
lcr |= UART_LCR_EPAR;
if (c_cflag & CMSPAR)
lcr |= UART_LCR_SPAR;
baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10;
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, c_cflag, baud);
c_iflag = new->c_iflag;
port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
if (c_iflag & INPCK)
port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= UART_LSR_BI;
port->ignore_status_mask = 0;
if (c_iflag & IGNPAR)
port->ignore_status_mask |= UART_LSR_FE | UART_LSR_PE;
if (c_iflag & IGNBRK) {
port->ignore_status_mask |= UART_LSR_BI;
if (c_iflag & IGNPAR)
port->ignore_status_mask |= UART_LSR_OE;
}
if ((c_cflag & CREAD) == 0)
port->ignore_status_mask |= UART_LSR_DR;
ier = siu_read(port, UART_IER);
ier &= ~UART_IER_MSI;
if (UART_ENABLE_MS(port, c_cflag))
ier |= UART_IER_MSI;
siu_write(port, UART_IER, ier);
siu_write(port, UART_LCR, lcr | UART_LCR_DLAB);
siu_write(port, UART_DLL, (uint8_t)quot);
siu_write(port, UART_DLM, (uint8_t)(quot >> 8));
siu_write(port, UART_LCR, lcr);
siu_write(port, UART_FCR, fcr);
siu_set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
}
static void siu_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
{
switch (state) {
case 0:
switch (port->type) {
case PORT_VR41XX_SIU:
vr41xx_supply_clock(SIU_CLOCK);
break;
case PORT_VR41XX_DSIU:
vr41xx_supply_clock(DSIU_CLOCK);
break;
}
break;
case 3:
switch (port->type) {
case PORT_VR41XX_SIU:
vr41xx_mask_clock(SIU_CLOCK);
break;
case PORT_VR41XX_DSIU:
vr41xx_mask_clock(DSIU_CLOCK);
break;
}
break;
}
}
static const char *siu_type(struct uart_port *port)
{
return siu_type_name(port);
}
static void siu_release_port(struct uart_port *port)
{
unsigned long size;
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
port->membase = NULL;
}
size = siu_port_size(port);
release_mem_region(port->mapbase, size);
}
static int siu_request_port(struct uart_port *port)
{
unsigned long size;
struct resource *res;
size = siu_port_size(port);
res = request_mem_region(port->mapbase, size, siu_type_name(port));
if (res == NULL)
return -EBUSY;
if (port->flags & UPF_IOREMAP) {
port->membase = ioremap(port->mapbase, size);
if (port->membase == NULL) {
release_resource(res);
return -ENOMEM;
}
}
return 0;
}
static void siu_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = siu_check_type(port);
(void)siu_request_port(port);
}
}
static int siu_verify_port(struct uart_port *port, struct serial_struct *serial)
{
if (port->type != PORT_VR41XX_SIU && port->type != PORT_VR41XX_DSIU)
return -EINVAL;
if (port->irq != serial->irq)
return -EINVAL;
if (port->iotype != serial->io_type)
return -EINVAL;
if (port->mapbase != (unsigned long)serial->iomem_base)
return -EINVAL;
return 0;
}
static struct uart_ops siu_uart_ops = {
.tx_empty = siu_tx_empty,
.set_mctrl = siu_set_mctrl,
.get_mctrl = siu_get_mctrl,
.stop_tx = siu_stop_tx,
.start_tx = siu_start_tx,
.stop_rx = siu_stop_rx,
.enable_ms = siu_enable_ms,
.break_ctl = siu_break_ctl,
.startup = siu_startup,
.shutdown = siu_shutdown,
.set_termios = siu_set_termios,
.pm = siu_pm,
.type = siu_type,
.release_port = siu_release_port,
.request_port = siu_request_port,
.config_port = siu_config_port,
.verify_port = siu_verify_port,
};
static int siu_init_ports(struct platform_device *pdev)
{
struct uart_port *port;
struct resource *res;
int *type = pdev->dev.platform_data;
int i;
if (!type)
return 0;
port = siu_uart_ports;
for (i = 0; i < SIU_PORTS_MAX; i++) {
port->type = type[i];
if (port->type == PORT_UNKNOWN)
continue;
port->irq = platform_get_irq(pdev, i);
port->uartclk = SIU_BAUD_BASE * 16;
port->fifosize = 16;
port->regshift = 0;
port->iotype = UPIO_MEM;
port->flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF;
port->line = i;
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
port->mapbase = res->start;
port++;
}
return i;
}
#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
static void wait_for_xmitr(struct uart_port *port)
{
int timeout = 10000;
uint8_t lsr, msr;
do {
lsr = siu_read(port, UART_LSR);
if (lsr & UART_LSR_BI)
lsr_break_flag[port->line] = UART_LSR_BI;
if ((lsr & BOTH_EMPTY) == BOTH_EMPTY)
break;
} while (timeout-- > 0);
if (port->flags & UPF_CONS_FLOW) {
timeout = 1000000;
do {
msr = siu_read(port, UART_MSR);
if ((msr & UART_MSR_CTS) != 0)
break;
} while (timeout-- > 0);
}
}
static void siu_console_putchar(struct uart_port *port, int ch)
{
wait_for_xmitr(port);
siu_write(port, UART_TX, ch);
}
static void siu_console_write(struct console *con, const char *s, unsigned count)
{
struct uart_port *port;
uint8_t ier;
port = &siu_uart_ports[con->index];
ier = siu_read(port, UART_IER);
siu_write(port, UART_IER, 0);
uart_console_write(port, s, count, siu_console_putchar);
wait_for_xmitr(port);
siu_write(port, UART_IER, ier);
}
static int __init siu_console_setup(struct console *con, char *options)
{
struct uart_port *port;
int baud = 9600;
int parity = 'n';
int bits = 8;
int flow = 'n';
if (con->index >= SIU_PORTS_MAX)
con->index = 0;
port = &siu_uart_ports[con->index];
if (port->membase == NULL) {
if (port->mapbase == 0)
return -ENODEV;
port->membase = ioremap(port->mapbase, siu_port_size(port));
}
if (port->type == PORT_VR41XX_SIU)
vr41xx_select_siu_interface(SIU_INTERFACE_RS232C);
if (options != NULL)
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, con, baud, parity, bits, flow);
}
static struct uart_driver siu_uart_driver;
static struct console siu_console = {
.name = "ttyVR",
.write = siu_console_write,
.device = uart_console_device,
.setup = siu_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &siu_uart_driver,
};
static int __devinit siu_console_init(void)
{
struct uart_port *port;
int i;
for (i = 0; i < SIU_PORTS_MAX; i++) {
port = &siu_uart_ports[i];
port->ops = &siu_uart_ops;
}
register_console(&siu_console);
return 0;
}
console_initcall(siu_console_init);
#define SERIAL_VR41XX_CONSOLE &siu_console
#else
#define SERIAL_VR41XX_CONSOLE NULL
#endif
static struct uart_driver siu_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "SIU",
.dev_name = "ttyVR",
.major = SIU_MAJOR,
.minor = SIU_MINOR_BASE,
.cons = SERIAL_VR41XX_CONSOLE,
};
static int __devinit siu_probe(struct platform_device *dev)
{
struct uart_port *port;
int num, i, retval;
num = siu_init_ports(dev);
if (num <= 0)
return -ENODEV;
siu_uart_driver.nr = num;
retval = uart_register_driver(&siu_uart_driver);
if (retval)
return retval;
for (i = 0; i < num; i++) {
port = &siu_uart_ports[i];
port->ops = &siu_uart_ops;
port->dev = &dev->dev;
retval = uart_add_one_port(&siu_uart_driver, port);
if (retval < 0) {
port->dev = NULL;
break;
}
}
if (i == 0 && retval < 0) {
uart_unregister_driver(&siu_uart_driver);
return retval;
}
return 0;
}
static int __devexit siu_remove(struct platform_device *dev)
{
struct uart_port *port;
int i;
for (i = 0; i < siu_uart_driver.nr; i++) {
port = &siu_uart_ports[i];
if (port->dev == &dev->dev) {
uart_remove_one_port(&siu_uart_driver, port);
port->dev = NULL;
}
}
uart_unregister_driver(&siu_uart_driver);
return 0;
}
static int siu_suspend(struct platform_device *dev, pm_message_t state)
{
struct uart_port *port;
int i;
for (i = 0; i < siu_uart_driver.nr; i++) {
port = &siu_uart_ports[i];
if ((port->type == PORT_VR41XX_SIU ||
port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
uart_suspend_port(&siu_uart_driver, port);
}
return 0;
}
static int siu_resume(struct platform_device *dev)
{
struct uart_port *port;
int i;
for (i = 0; i < siu_uart_driver.nr; i++) {
port = &siu_uart_ports[i];
if ((port->type == PORT_VR41XX_SIU ||
port->type == PORT_VR41XX_DSIU) && port->dev == &dev->dev)
uart_resume_port(&siu_uart_driver, port);
}
return 0;
}
static struct platform_driver siu_device_driver = {
.probe = siu_probe,
.remove = __devexit_p(siu_remove),
.suspend = siu_suspend,
.resume = siu_resume,
.driver = {
.name = "SIU",
.owner = THIS_MODULE,
},
};
static int __init vr41xx_siu_init(void)
{
return platform_driver_register(&siu_device_driver);
}
static void __exit vr41xx_siu_exit(void)
{
platform_driver_unregister(&siu_device_driver);
}
module_init(vr41xx_siu_init);
module_exit(vr41xx_siu_exit);
| gpl-2.0 |
tobigun/samsung-kernel-smg800f | drivers/battery/bq24260_charger.c | 26 | 12280 | /*
* bq24260_charger.c
* Samsung bq24260 Charger Driver
*
* Copyright (C) 2012 Samsung Electronics
*
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define DEBUG
#include <linux/battery/sec_charger.h>
static int bq24260_i2c_write(struct i2c_client *client,
int reg, u8 *buf)
{
int ret;
ret = i2c_smbus_write_i2c_block_data(client, reg, 1, buf);
if (ret < 0)
dev_err(&client->dev, "%s: Error(%d)\n", __func__, ret);
return ret;
}
static int bq24260_i2c_read(struct i2c_client *client,
int reg, u8 *buf)
{
int ret;
ret = i2c_smbus_read_i2c_block_data(client, reg, 1, buf);
if (ret < 0)
dev_err(&client->dev, "%s: Error(%d)\n", __func__, ret);
return ret;
}
static void bq24260_i2c_write_array(struct i2c_client *client,
u8 *buf, int size)
{
int i;
for (i = 0; i < size; i += 3)
bq24260_i2c_write(client, (u8) (*(buf + i)), (buf + i) + 1);
}
static void bq24260_set_command(struct i2c_client *client,
int reg, int datum)
{
int val;
u8 data = 0;
val = bq24260_i2c_read(client, reg, &data);
if (val >= 0) {
dev_dbg(&client->dev, "%s : reg(0x%02x): 0x%02x(0x%02x)",
__func__, reg, data, datum);
if (data != datum) {
data = datum;
if (bq24260_i2c_write(client, reg, &data) < 0)
dev_err(&client->dev,
"%s : error!\n", __func__);
val = bq24260_i2c_read(client, reg, &data);
if (val >= 0)
dev_dbg(&client->dev, " => 0x%02x\n", data);
}
}
}
static void bq24260_test_read(struct i2c_client *client)
{
u8 data = 0;
u32 addr = 0;
for (addr = 0; addr <= 0x06; addr++) {
bq24260_i2c_read(client, addr, &data);
dev_dbg(&client->dev,
"bq24260 addr : 0x%02x data : 0x%02x\n", addr, data);
}
}
static void bq24260_read_regs(struct i2c_client *client, char *str)
{
u8 data = 0;
u32 addr = 0;
for (addr = 0; addr <= 0x06; addr++) {
bq24260_i2c_read(client, addr, &data);
sprintf(str+strlen(str), "0x%x, ", data);
}
}
static int bq24260_get_charging_status(struct i2c_client *client)
{
int status = POWER_SUPPLY_STATUS_UNKNOWN;
u8 data = 0;
bq24260_i2c_read(client, BQ24260_STATUS, &data);
dev_info(&client->dev,
"%s : charger status(0x%02x)\n", __func__, data);
data = (data & 0x30);
switch (data) {
case 0x00:
status = POWER_SUPPLY_STATUS_DISCHARGING;
break;
case 0x10:
status = POWER_SUPPLY_STATUS_CHARGING;
break;
case 0x20:
status = POWER_SUPPLY_STATUS_FULL;
break;
case 0x30:
status = POWER_SUPPLY_STATUS_NOT_CHARGING;
break;
}
return (int)status;
}
static int bq24260_get_charging_health(struct i2c_client *client)
{
int health = POWER_SUPPLY_HEALTH_GOOD;
u8 data = 0;
bq24260_i2c_read(client, BQ24260_STATUS, &data);
dev_info(&client->dev,
"%s : charger status(0x%02x)\n", __func__, data);
if ((data & 0x30) == 0x30) { /* check for fault */
data = (data & 0x07);
switch (data) {
case 0x01:
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
break;
case 0x02:
health = POWER_SUPPLY_HEALTH_UNDERVOLTAGE;
break;
}
}
return (int)health;
}
static u8 bq24260_get_float_voltage_data(
int float_voltage)
{
u8 data;
if (float_voltage < 3500)
float_voltage = 3500;
data = (float_voltage - 3500) / 20;
return data << 2;
}
static u8 bq24260_get_input_current_limit_data(
int input_current)
{
u8 data = 0x00;
if (input_current <= 100)
data = 0x00;
else if (input_current <= 150)
data = 0x01;
else if (input_current <= 500)
data = 0x02;
else if (input_current <= 900)
data = 0x03;
else if (input_current <= 1000)
data = 0x04;
else if (input_current <= 2000)/* will be set as 1950mA */
data = 0x06;
else /* No limit */
data = 0x07;
return data << 4;
}
static u8 bq24260_get_termination_current_limit_data(
int termination_current)
{
u8 data;
/* default offset 50mA, max 300mA */
data = (termination_current - 50) / 50;
return data;
}
static u8 bq24260_get_fast_charging_current_data(
int fast_charging_current)
{
u8 data;
/* default offset 500mA */
if (fast_charging_current < 500)
fast_charging_current = 500;
data = (fast_charging_current - 500) / 100;
return data << 3;
}
static void bq24260_charger_function_conrol(
struct i2c_client *client)
{
struct sec_charger_info *charger = i2c_get_clientdata(client);
union power_supply_propval val;
int full_check_type;
u8 data;
if (charger->charging_current < 0) {
dev_dbg(&client->dev,
"%s : OTG is activated. Ignore command!\n", __func__);
return;
}
if (charger->cable_type ==
POWER_SUPPLY_TYPE_BATTERY) {
data = 0x00;
bq24260_i2c_read(client, BQ24260_CONTROL, &data);
data |= 0x2;
data &= 0x7f; /* Prevent register reset */
bq24260_set_command(client,
BQ24260_CONTROL, data);
} else {
data = 0x00;
bq24260_i2c_read(client, BQ24260_CONTROL, &data);
/* Enable charging */
data &= 0x7d; /*default enabled*/
psy_do_property("battery", get,
POWER_SUPPLY_PROP_CHARGE_NOW, val);
if (val.intval == SEC_BATTERY_CHARGING_1ST)
full_check_type = charger->pdata->full_check_type;
else
full_check_type = charger->pdata->full_check_type_2nd;
/* Termination setting */
switch (full_check_type) {
case SEC_BATTERY_FULLCHARGED_CHGGPIO:
case SEC_BATTERY_FULLCHARGED_CHGINT:
case SEC_BATTERY_FULLCHARGED_CHGPSY:
/* Enable Current Termination */
data |= 0x04;
break;
default:
data &= 0x7b;
break;
}
/* Input current limit */
dev_dbg(&client->dev, "%s : input current (%dmA)\n",
__func__, charger->pdata->charging_current
[charger->cable_type].input_current_limit);
data &= 0x0F;
data |= bq24260_get_input_current_limit_data(
charger->pdata->charging_current
[charger->cable_type].input_current_limit);
bq24260_set_command(client,
BQ24260_CONTROL, data);
data = 0x00;
/* Float voltage */
dev_dbg(&client->dev, "%s : float voltage (%dmV)\n",
__func__, charger->pdata->chg_float_voltage);
data |= bq24260_get_float_voltage_data(
charger->pdata->chg_float_voltage);
bq24260_set_command(client,
BQ24260_VOLTAGE, data);
data = 0x00;
/* Fast charge and Termination current */
dev_dbg(&client->dev, "%s : fast charging current (%dmA)\n",
__func__, charger->charging_current);
data |= bq24260_get_fast_charging_current_data(
charger->charging_current);
dev_dbg(&client->dev, "%s : termination current (%dmA)\n",
__func__, charger->pdata->charging_current[
charger->cable_type].full_check_current_1st >= 300 ?
300 : charger->pdata->charging_current[
charger->cable_type].full_check_current_1st);
data |= bq24260_get_termination_current_limit_data(
charger->pdata->charging_current[
charger->cable_type].full_check_current_1st);
bq24260_set_command(client,
BQ24260_CURRENT, data);
/* Special Charger Voltage
* Normal charge current
*/
bq24260_i2c_read(client, BQ24260_SPECIAL, &data);
data &= 0xdf;
bq24260_set_command(client,
BQ24260_SPECIAL, data);
}
}
static void bq24260_charger_otg_conrol(
struct i2c_client *client)
{
struct sec_charger_info *charger = i2c_get_clientdata(client);
u8 data;
if (charger->cable_type ==
POWER_SUPPLY_TYPE_BATTERY) {
dev_info(&client->dev, "%s : turn off OTG\n", __func__);
/* turn off OTG */
bq24260_i2c_read(client, BQ24260_STATUS, &data);
data &= 0xbf;
bq24260_set_command(client,
BQ24260_STATUS, data);
} else {
dev_info(&client->dev, "%s : turn on OTG\n", __func__);
/* turn on OTG */
bq24260_i2c_read(client, BQ24260_STATUS, &data);
data |= 0x40;
bq24260_set_command(client,
BQ24260_STATUS, data);
}
}
static int bq24260_get_charge_type(struct i2c_client *client)
{
int ret;
u8 data;
bq24260_i2c_read(client, BQ24260_STATUS, &data);
data = (data & 0x30)>>4;
switch (data) {
case 0x01:
ret = POWER_SUPPLY_CHARGE_TYPE_FAST;
break;
default:
ret = POWER_SUPPLY_CHARGE_TYPE_NONE;
break;
}
return ret;
}
bool sec_hal_chg_init(struct i2c_client *client)
{
bq24260_test_read(client);
return true;
}
bool sec_hal_chg_suspend(struct i2c_client *client)
{
return true;
}
bool sec_hal_chg_resume(struct i2c_client *client)
{
return true;
}
bool sec_hal_chg_get_property(struct i2c_client *client,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct sec_charger_info *charger = i2c_get_clientdata(client);
u8 data;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
val->intval = bq24260_get_charging_status(client);
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
val->intval = bq24260_get_charge_type(client);
break;
case POWER_SUPPLY_PROP_HEALTH:
val->intval = bq24260_get_charging_health(client);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (charger->charging_current) {
/* Rsns 0.068 Ohm */
bq24260_i2c_read(client, BQ24260_CURRENT, &data);
val->intval = (data >> 3) * 100 + 500;
} else
val->intval = 0;
dev_dbg(&client->dev,
"%s : set-current(%dmA), current now(%dmA)\n",
__func__, charger->charging_current, val->intval);
break;
default:
return false;
}
return true;
}
bool sec_hal_chg_set_property(struct i2c_client *client,
enum power_supply_property psp,
const union power_supply_propval *val)
{
struct sec_charger_info *charger = i2c_get_clientdata(client);
switch (psp) {
/* val->intval : type */
case POWER_SUPPLY_PROP_ONLINE:
if (charger->pdata->chg_gpio_en) {
if (gpio_request(charger->pdata->chg_gpio_en,
"CHG_EN") < 0) {
dev_err(&client->dev,
"failed to request vbus_in gpio\n");
break;
}
if (charger->cable_type ==
POWER_SUPPLY_TYPE_BATTERY)
gpio_set_value_cansleep(
charger->pdata->chg_gpio_en,
charger->pdata->chg_polarity_en ?
0 : 1);
else
gpio_set_value_cansleep(
charger->pdata->chg_gpio_en,
charger->pdata->chg_polarity_en ?
1 : 0);
gpio_free(charger->pdata->chg_gpio_en);
}
/* val->intval : charging current */
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (charger->charging_current < 0)
bq24260_charger_otg_conrol(client);
else if (charger->charging_current > 0)
bq24260_charger_function_conrol(client);
else {
bq24260_charger_function_conrol(client);
bq24260_charger_otg_conrol(client);
}
bq24260_test_read(client);
break;
default:
return false;
}
return true;
}
ssize_t sec_hal_chg_show_attrs(struct device *dev,
const ptrdiff_t offset, char *buf)
{
struct power_supply *psy = dev_get_drvdata(dev);
struct sec_charger_info *chg =
container_of(psy, struct sec_charger_info, psy_chg);
int i = 0;
char *str = NULL;
switch (offset) {
case CHG_REG:
i += scnprintf(buf + i, PAGE_SIZE - i, "%x\n",
chg->reg_addr);
break;
case CHG_DATA:
i += scnprintf(buf + i, PAGE_SIZE - i, "%x\n",
chg->reg_data);
break;
case CHG_REGS:
str = kzalloc(sizeof(char)*1024, GFP_KERNEL);
if (!str)
return -ENOMEM;
bq24260_read_regs(chg->client, str);
i += scnprintf(buf + i, PAGE_SIZE - i, "%s\n",
str);
kfree(str);
break;
default:
i = -EINVAL;
break;
}
return i;
}
ssize_t sec_hal_chg_store_attrs(struct device *dev,
const ptrdiff_t offset,
const char *buf, size_t count)
{
struct power_supply *psy = dev_get_drvdata(dev);
struct sec_charger_info *chg =
container_of(psy, struct sec_charger_info, psy_chg);
int ret = 0;
int x = 0;
u8 data = 0;
switch (offset) {
case CHG_REG:
if (sscanf(buf, "%x\n", &x) == 1) {
chg->reg_addr = x;
bq24260_i2c_read(chg->client,
chg->reg_addr, &data);
chg->reg_data = data;
dev_dbg(dev, "%s: (read) addr = 0x%x, data = 0x%x\n",
__func__, chg->reg_addr, chg->reg_data);
ret = count;
}
break;
case CHG_DATA:
if (sscanf(buf, "%x\n", &x) == 1) {
data = (u8)x;
dev_dbg(dev, "%s: (write) addr = 0x%x, data = 0x%x\n",
__func__, chg->reg_addr, data);
bq24260_i2c_write(chg->client,
chg->reg_addr, &data);
ret = count;
}
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
| gpl-2.0 |
coolstar/coreboot | src/vendorcode/amd/agesa/f15tn/Proc/CPU/Family/0x15/TN/F15TnLogicalIdTables.c | 26 | 4311 | /* $NoKeywords:$ */
/**
* @file
*
* AMD Family_15 Trinity Logical ID Table
*
* @xrefitem bom "File Content Label" "Release Content"
* @e project: AGESA
* @e sub-project: CPU/Family/0x15/TN
* @e \$Revision: 63425 $ @e \$Date: 2011-12-22 11:24:10 -0600 (Thu, 22 Dec 2011) $
*
*/
/*
******************************************************************************
*
* Copyright (c) 2008 - 2012, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Advanced Micro Devices, Inc. nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
******************************************************************************
*/
/*----------------------------------------------------------------------------------------
* M O D U L E S U S E D
*----------------------------------------------------------------------------------------
*/
#include "AGESA.h"
#include "cpuRegisters.h"
#include "Filecode.h"
CODE_GROUP (G3_DXE)
RDATA_GROUP (G3_DXE)
#define FILECODE PROC_CPU_FAMILY_0X15_TN_F15TNLOGICALIDTABLES_FILECODE
/*----------------------------------------------------------------------------------------
* D E F I N I T I O N S A N D M A C R O S
*----------------------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------------------
* T Y P E D E F S A N D S T R U C T U R E S
*----------------------------------------------------------------------------------------
*/
/*----------------------------------------------------------------------------------------
* P R O T O T Y P E S O F L O C A L F U N C T I O N S
*----------------------------------------------------------------------------------------
*/
VOID
GetF15TnLogicalIdAndRev (
OUT CONST CPU_LOGICAL_ID_XLAT **TnIdPtr,
OUT UINT8 *NumberOfElements,
OUT UINT64 *LogicalFamily,
IN OUT AMD_CONFIG_PARAMS *StdHeader
);
/*----------------------------------------------------------------------------------------
* E X P O R T E D F U N C T I O N S
*----------------------------------------------------------------------------------------
*/
STATIC CONST CPU_LOGICAL_ID_XLAT ROMDATA CpuF15TnLogicalIdAndRevArray[] =
{
{
0x6131,
AMD_F15_TN_A1 // RL_A1 (Richland)
},
{
0x6101,
AMD_F15_TN_A1
},
{
0x6100,
AMD_F15_TN_A0
}
};
VOID
GetF15TnLogicalIdAndRev (
OUT CONST CPU_LOGICAL_ID_XLAT **TnIdPtr,
OUT UINT8 *NumberOfElements,
OUT UINT64 *LogicalFamily,
IN OUT AMD_CONFIG_PARAMS *StdHeader
)
{
*NumberOfElements = (sizeof (CpuF15TnLogicalIdAndRevArray) / sizeof (CPU_LOGICAL_ID_XLAT));
*TnIdPtr = CpuF15TnLogicalIdAndRevArray;
*LogicalFamily = AMD_FAMILY_15_TN;
}
| gpl-2.0 |
prpplague/RCA-DSB772WE | drivers/media/video/zc0301/zc0301_core.c | 26 | 50048 | /***************************************************************************
* Video4Linux2 driver for ZC0301[P] Image Processor and Control Chip *
* *
* Copyright (C) 2006-2007 by Luca Risolia <luca.risolia@studio.unibo.it> *
* *
* Informations about the chip internals needed to enable the I2C protocol *
* have been taken from the documentation of the ZC030x Video4Linux1 *
* driver written by Andrew Birkett <andy@nobugs.org> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the Free Software *
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. *
***************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
#include <linux/poll.h>
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/page-flags.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/uaccess.h>
#include "zc0301.h"
/*****************************************************************************/
#define ZC0301_MODULE_NAME "V4L2 driver for ZC0301[P] " \
"Image Processor and Control Chip"
#define ZC0301_MODULE_AUTHOR "(C) 2006-2007 Luca Risolia"
#define ZC0301_AUTHOR_EMAIL "<luca.risolia@studio.unibo.it>"
#define ZC0301_MODULE_LICENSE "GPL"
#define ZC0301_MODULE_VERSION "1:1.10"
#define ZC0301_MODULE_VERSION_CODE KERNEL_VERSION(1, 1, 10)
/*****************************************************************************/
MODULE_DEVICE_TABLE(usb, zc0301_id_table);
MODULE_AUTHOR(ZC0301_MODULE_AUTHOR " " ZC0301_AUTHOR_EMAIL);
MODULE_DESCRIPTION(ZC0301_MODULE_NAME);
MODULE_VERSION(ZC0301_MODULE_VERSION);
MODULE_LICENSE(ZC0301_MODULE_LICENSE);
static short video_nr[] = {[0 ... ZC0301_MAX_DEVICES-1] = -1};
module_param_array(video_nr, short, NULL, 0444);
MODULE_PARM_DESC(video_nr,
"\n<-1|n[,...]> Specify V4L2 minor mode number."
"\n -1 = use next available (default)"
"\n n = use minor number n (integer >= 0)"
"\nYou can specify up to "
__MODULE_STRING(ZC0301_MAX_DEVICES) " cameras this way."
"\nFor example:"
"\nvideo_nr=-1,2,-1 would assign minor number 2 to"
"\nthe second registered camera and use auto for the first"
"\none and for every other camera."
"\n");
static short force_munmap[] = {[0 ... ZC0301_MAX_DEVICES-1] =
ZC0301_FORCE_MUNMAP};
module_param_array(force_munmap, bool, NULL, 0444);
MODULE_PARM_DESC(force_munmap,
"\n<0|1[,...]> Force the application to unmap previously"
"\nmapped buffer memory before calling any VIDIOC_S_CROP or"
"\nVIDIOC_S_FMT ioctl's. Not all the applications support"
"\nthis feature. This parameter is specific for each"
"\ndetected camera."
"\n 0 = do not force memory unmapping"
"\n 1 = force memory unmapping (save memory)"
"\nDefault value is "__MODULE_STRING(ZC0301_FORCE_MUNMAP)"."
"\n");
static unsigned int frame_timeout[] = {[0 ... ZC0301_MAX_DEVICES-1] =
ZC0301_FRAME_TIMEOUT};
module_param_array(frame_timeout, uint, NULL, 0644);
MODULE_PARM_DESC(frame_timeout,
"\n<n[,...]> Timeout for a video frame in seconds."
"\nThis parameter is specific for each detected camera."
"\nDefault value is "__MODULE_STRING(ZC0301_FRAME_TIMEOUT)"."
"\n");
#ifdef ZC0301_DEBUG
static unsigned short debug = ZC0301_DEBUG_LEVEL;
module_param(debug, ushort, 0644);
MODULE_PARM_DESC(debug,
"\n<n> Debugging information level, from 0 to 3:"
"\n0 = none (use carefully)"
"\n1 = critical errors"
"\n2 = significant informations"
"\n3 = more verbose messages"
"\nLevel 3 is useful for testing only, when only "
"one device is used."
"\nDefault value is "__MODULE_STRING(ZC0301_DEBUG_LEVEL)"."
"\n");
#endif
/*****************************************************************************/
static u32
zc0301_request_buffers(struct zc0301_device* cam, u32 count,
enum zc0301_io_method io)
{
struct v4l2_pix_format* p = &(cam->sensor.pix_format);
struct v4l2_rect* r = &(cam->sensor.cropcap.bounds);
const size_t imagesize = cam->module_param.force_munmap ||
io == IO_READ ?
(p->width * p->height * p->priv) / 8 :
(r->width * r->height * p->priv) / 8;
void* buff = NULL;
u32 i;
if (count > ZC0301_MAX_FRAMES)
count = ZC0301_MAX_FRAMES;
cam->nbuffers = count;
while (cam->nbuffers > 0) {
if ((buff = vmalloc_32_user(cam->nbuffers *
PAGE_ALIGN(imagesize))))
break;
cam->nbuffers--;
}
for (i = 0; i < cam->nbuffers; i++) {
cam->frame[i].bufmem = buff + i*PAGE_ALIGN(imagesize);
cam->frame[i].buf.index = i;
cam->frame[i].buf.m.offset = i*PAGE_ALIGN(imagesize);
cam->frame[i].buf.length = imagesize;
cam->frame[i].buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cam->frame[i].buf.sequence = 0;
cam->frame[i].buf.field = V4L2_FIELD_NONE;
cam->frame[i].buf.memory = V4L2_MEMORY_MMAP;
cam->frame[i].buf.flags = 0;
}
return cam->nbuffers;
}
static void zc0301_release_buffers(struct zc0301_device* cam)
{
if (cam->nbuffers) {
vfree(cam->frame[0].bufmem);
cam->nbuffers = 0;
}
cam->frame_current = NULL;
}
static void zc0301_empty_framequeues(struct zc0301_device* cam)
{
u32 i;
INIT_LIST_HEAD(&cam->inqueue);
INIT_LIST_HEAD(&cam->outqueue);
for (i = 0; i < ZC0301_MAX_FRAMES; i++) {
cam->frame[i].state = F_UNUSED;
cam->frame[i].buf.bytesused = 0;
}
}
static void zc0301_requeue_outqueue(struct zc0301_device* cam)
{
struct zc0301_frame_t *i;
list_for_each_entry(i, &cam->outqueue, frame) {
i->state = F_QUEUED;
list_add(&i->frame, &cam->inqueue);
}
INIT_LIST_HEAD(&cam->outqueue);
}
static void zc0301_queue_unusedframes(struct zc0301_device* cam)
{
unsigned long lock_flags;
u32 i;
for (i = 0; i < cam->nbuffers; i++)
if (cam->frame[i].state == F_UNUSED) {
cam->frame[i].state = F_QUEUED;
spin_lock_irqsave(&cam->queue_lock, lock_flags);
list_add_tail(&cam->frame[i].frame, &cam->inqueue);
spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
}
}
/*****************************************************************************/
int zc0301_write_reg(struct zc0301_device* cam, u16 index, u16 value)
{
struct usb_device* udev = cam->usbdev;
int res;
res = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0xa0, 0x40,
value, index, NULL, 0, ZC0301_CTRL_TIMEOUT);
if (res < 0) {
DBG(3, "Failed to write a register (index 0x%04X, "
"value 0x%02X, error %d)",index, value, res);
return -1;
}
return 0;
}
int zc0301_read_reg(struct zc0301_device* cam, u16 index)
{
struct usb_device* udev = cam->usbdev;
u8* buff = cam->control_buffer;
int res;
res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0xa1, 0xc0,
0x0001, index, buff, 1, ZC0301_CTRL_TIMEOUT);
if (res < 0)
DBG(3, "Failed to read a register (index 0x%04X, error %d)",
index, res);
PDBGG("Read: index 0x%04X, value: 0x%04X", index, (int)(*buff));
return (res >= 0) ? (int)(*buff) : -1;
}
int zc0301_i2c_read(struct zc0301_device* cam, u16 address, u8 length)
{
int err = 0, res, r0, r1;
err += zc0301_write_reg(cam, 0x0092, address);
err += zc0301_write_reg(cam, 0x0090, 0x02);
msleep(1);
res = zc0301_read_reg(cam, 0x0091);
if (res < 0)
err += res;
r0 = zc0301_read_reg(cam, 0x0095);
if (r0 < 0)
err += r0;
r1 = zc0301_read_reg(cam, 0x0096);
if (r1 < 0)
err += r1;
res = (length <= 1) ? r0 : r0 | (r1 << 8);
if (err)
DBG(3, "I2C read failed at address 0x%04X, value: 0x%04X",
address, res);
PDBGG("I2C read: address 0x%04X, value: 0x%04X", address, res);
return err ? -1 : res;
}
int zc0301_i2c_write(struct zc0301_device* cam, u16 address, u16 value)
{
int err = 0, res;
err += zc0301_write_reg(cam, 0x0092, address);
err += zc0301_write_reg(cam, 0x0093, value & 0xff);
err += zc0301_write_reg(cam, 0x0094, value >> 8);
err += zc0301_write_reg(cam, 0x0090, 0x01);
msleep(1);
res = zc0301_read_reg(cam, 0x0091);
if (res < 0)
err += res;
if (err)
DBG(3, "I2C write failed at address 0x%04X, value: 0x%04X",
address, value);
PDBGG("I2C write: address 0x%04X, value: 0x%04X", address, value);
return err ? -1 : 0;
}
/*****************************************************************************/
static void zc0301_urb_complete(struct urb *urb)
{
struct zc0301_device* cam = urb->context;
struct zc0301_frame_t** f;
size_t imagesize;
u8 i;
int err = 0;
if (urb->status == -ENOENT)
return;
f = &cam->frame_current;
if (cam->stream == STREAM_INTERRUPT) {
cam->stream = STREAM_OFF;
if ((*f))
(*f)->state = F_QUEUED;
DBG(3, "Stream interrupted");
wake_up(&cam->wait_stream);
}
if (cam->state & DEV_DISCONNECTED)
return;
if (cam->state & DEV_MISCONFIGURED) {
wake_up_interruptible(&cam->wait_frame);
return;
}
if (cam->stream == STREAM_OFF || list_empty(&cam->inqueue))
goto resubmit_urb;
if (!(*f))
(*f) = list_entry(cam->inqueue.next, struct zc0301_frame_t,
frame);
imagesize = (cam->sensor.pix_format.width *
cam->sensor.pix_format.height *
cam->sensor.pix_format.priv) / 8;
for (i = 0; i < urb->number_of_packets; i++) {
unsigned int len, status;
void *pos;
u16* soi;
u8 sof;
len = urb->iso_frame_desc[i].actual_length;
status = urb->iso_frame_desc[i].status;
pos = urb->iso_frame_desc[i].offset + urb->transfer_buffer;
if (status) {
DBG(3, "Error in isochronous frame");
(*f)->state = F_ERROR;
continue;
}
sof = (*(soi = pos) == 0xd8ff);
PDBGG("Isochrnous frame: length %u, #%u i,", len, i);
if ((*f)->state == F_QUEUED || (*f)->state == F_ERROR)
start_of_frame:
if (sof) {
(*f)->state = F_GRABBING;
(*f)->buf.bytesused = 0;
do_gettimeofday(&(*f)->buf.timestamp);
DBG(3, "SOF detected: new video frame");
}
if ((*f)->state == F_GRABBING) {
if (sof && (*f)->buf.bytesused)
goto end_of_frame;
if ((*f)->buf.bytesused + len > imagesize) {
DBG(3, "Video frame size exceeded");
(*f)->state = F_ERROR;
continue;
}
memcpy((*f)->bufmem+(*f)->buf.bytesused, pos, len);
(*f)->buf.bytesused += len;
if ((*f)->buf.bytesused == imagesize) {
u32 b;
end_of_frame:
b = (*f)->buf.bytesused;
(*f)->state = F_DONE;
(*f)->buf.sequence= ++cam->frame_count;
spin_lock(&cam->queue_lock);
list_move_tail(&(*f)->frame, &cam->outqueue);
if (!list_empty(&cam->inqueue))
(*f) = list_entry(cam->inqueue.next,
struct zc0301_frame_t,
frame);
else
(*f) = NULL;
spin_unlock(&cam->queue_lock);
DBG(3, "Video frame captured: : %lu bytes",
(unsigned long)(b));
if (!(*f))
goto resubmit_urb;
if (sof)
goto start_of_frame;
}
}
}
resubmit_urb:
urb->dev = cam->usbdev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0 && err != -EPERM) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "usb_submit_urb() failed");
}
wake_up_interruptible(&cam->wait_frame);
}
static int zc0301_start_transfer(struct zc0301_device* cam)
{
struct usb_device *udev = cam->usbdev;
struct usb_host_interface* altsetting = usb_altnum_to_altsetting(
usb_ifnum_to_if(udev, 0),
ZC0301_ALTERNATE_SETTING);
const unsigned int psz = le16_to_cpu(altsetting->
endpoint[0].desc.wMaxPacketSize);
struct urb* urb;
s8 i, j;
int err = 0;
for (i = 0; i < ZC0301_URBS; i++) {
cam->transfer_buffer[i] = kzalloc(ZC0301_ISO_PACKETS * psz,
GFP_KERNEL);
if (!cam->transfer_buffer[i]) {
err = -ENOMEM;
DBG(1, "Not enough memory");
goto free_buffers;
}
}
for (i = 0; i < ZC0301_URBS; i++) {
urb = usb_alloc_urb(ZC0301_ISO_PACKETS, GFP_KERNEL);
cam->urb[i] = urb;
if (!urb) {
err = -ENOMEM;
DBG(1, "usb_alloc_urb() failed");
goto free_urbs;
}
urb->dev = udev;
urb->context = cam;
urb->pipe = usb_rcvisocpipe(udev, 1);
urb->transfer_flags = URB_ISO_ASAP;
urb->number_of_packets = ZC0301_ISO_PACKETS;
urb->complete = zc0301_urb_complete;
urb->transfer_buffer = cam->transfer_buffer[i];
urb->transfer_buffer_length = psz * ZC0301_ISO_PACKETS;
urb->interval = 1;
for (j = 0; j < ZC0301_ISO_PACKETS; j++) {
urb->iso_frame_desc[j].offset = psz * j;
urb->iso_frame_desc[j].length = psz;
}
}
err = usb_set_interface(udev, 0, ZC0301_ALTERNATE_SETTING);
if (err) {
DBG(1, "usb_set_interface() failed");
goto free_urbs;
}
cam->frame_current = NULL;
for (i = 0; i < ZC0301_URBS; i++) {
err = usb_submit_urb(cam->urb[i], GFP_KERNEL);
if (err) {
for (j = i-1; j >= 0; j--)
usb_kill_urb(cam->urb[j]);
DBG(1, "usb_submit_urb() failed, error %d", err);
goto free_urbs;
}
}
return 0;
free_urbs:
for (i = 0; (i < ZC0301_URBS) && cam->urb[i]; i++)
usb_free_urb(cam->urb[i]);
free_buffers:
for (i = 0; (i < ZC0301_URBS) && cam->transfer_buffer[i]; i++)
kfree(cam->transfer_buffer[i]);
return err;
}
static int zc0301_stop_transfer(struct zc0301_device* cam)
{
struct usb_device *udev = cam->usbdev;
s8 i;
int err = 0;
if (cam->state & DEV_DISCONNECTED)
return 0;
for (i = ZC0301_URBS-1; i >= 0; i--) {
usb_kill_urb(cam->urb[i]);
usb_free_urb(cam->urb[i]);
kfree(cam->transfer_buffer[i]);
}
err = usb_set_interface(udev, 0, 0); /* 0 Mb/s */
if (err)
DBG(3, "usb_set_interface() failed");
return err;
}
static int zc0301_stream_interrupt(struct zc0301_device* cam)
{
long timeout;
cam->stream = STREAM_INTERRUPT;
timeout = wait_event_timeout(cam->wait_stream,
(cam->stream == STREAM_OFF) ||
(cam->state & DEV_DISCONNECTED),
ZC0301_URB_TIMEOUT);
if (cam->state & DEV_DISCONNECTED)
return -ENODEV;
else if (cam->stream != STREAM_OFF) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "URB timeout reached. The camera is misconfigured. To "
"use it, close and open /dev/video%d again.",
cam->v4ldev->num);
return -EIO;
}
return 0;
}
/*****************************************************************************/
static int
zc0301_set_compression(struct zc0301_device* cam,
struct v4l2_jpegcompression* compression)
{
int r, err = 0;
if ((r = zc0301_read_reg(cam, 0x0008)) < 0)
err += r;
err += zc0301_write_reg(cam, 0x0008, r | 0x11 | compression->quality);
return err ? -EIO : 0;
}
static int zc0301_init(struct zc0301_device* cam)
{
struct zc0301_sensor* s = &cam->sensor;
struct v4l2_control ctrl;
struct v4l2_queryctrl *qctrl;
struct v4l2_rect* rect;
u8 i = 0;
int err = 0;
if (!(cam->state & DEV_INITIALIZED)) {
mutex_init(&cam->open_mutex);
init_waitqueue_head(&cam->wait_open);
qctrl = s->qctrl;
rect = &(s->cropcap.defrect);
cam->compression.quality = ZC0301_COMPRESSION_QUALITY;
} else { /* use current values */
qctrl = s->_qctrl;
rect = &(s->_rect);
}
if (s->init) {
err = s->init(cam);
if (err) {
DBG(3, "Sensor initialization failed");
return err;
}
}
if ((err = zc0301_set_compression(cam, &cam->compression))) {
DBG(3, "set_compression() failed");
return err;
}
if (s->set_crop)
if ((err = s->set_crop(cam, rect))) {
DBG(3, "set_crop() failed");
return err;
}
if (s->set_ctrl) {
for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
if (s->qctrl[i].id != 0 &&
!(s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)) {
ctrl.id = s->qctrl[i].id;
ctrl.value = qctrl[i].default_value;
err = s->set_ctrl(cam, &ctrl);
if (err) {
DBG(3, "Set %s control failed",
s->qctrl[i].name);
return err;
}
DBG(3, "Image sensor supports '%s' control",
s->qctrl[i].name);
}
}
if (!(cam->state & DEV_INITIALIZED)) {
mutex_init(&cam->fileop_mutex);
spin_lock_init(&cam->queue_lock);
init_waitqueue_head(&cam->wait_frame);
init_waitqueue_head(&cam->wait_stream);
cam->nreadbuffers = 2;
memcpy(s->_qctrl, s->qctrl, sizeof(s->qctrl));
memcpy(&(s->_rect), &(s->cropcap.defrect),
sizeof(struct v4l2_rect));
cam->state |= DEV_INITIALIZED;
}
DBG(2, "Initialization succeeded");
return 0;
}
/*****************************************************************************/
static void zc0301_release_resources(struct kref *kref)
{
struct zc0301_device *cam = container_of(kref, struct zc0301_device,
kref);
DBG(2, "V4L2 device /dev/video%d deregistered", cam->v4ldev->num);
video_set_drvdata(cam->v4ldev, NULL);
video_unregister_device(cam->v4ldev);
usb_put_dev(cam->usbdev);
kfree(cam->control_buffer);
kfree(cam);
}
static int zc0301_open(struct inode* inode, struct file* filp)
{
struct zc0301_device* cam;
int err = 0;
if (!down_read_trylock(&zc0301_dev_lock))
return -EAGAIN;
cam = video_drvdata(filp);
if (wait_for_completion_interruptible(&cam->probe)) {
up_read(&zc0301_dev_lock);
return -ERESTARTSYS;
}
kref_get(&cam->kref);
if (mutex_lock_interruptible(&cam->open_mutex)) {
kref_put(&cam->kref, zc0301_release_resources);
up_read(&zc0301_dev_lock);
return -ERESTARTSYS;
}
if (cam->state & DEV_DISCONNECTED) {
DBG(1, "Device not present");
err = -ENODEV;
goto out;
}
if (cam->users) {
DBG(2, "Device /dev/video%d is busy...", cam->v4ldev->num);
DBG(3, "Simultaneous opens are not supported");
if ((filp->f_flags & O_NONBLOCK) ||
(filp->f_flags & O_NDELAY)) {
err = -EWOULDBLOCK;
goto out;
}
DBG(2, "A blocking open() has been requested. Wait for the "
"device to be released...");
up_read(&zc0301_dev_lock);
err = wait_event_interruptible_exclusive(cam->wait_open,
(cam->state & DEV_DISCONNECTED)
|| !cam->users);
down_read(&zc0301_dev_lock);
if (err)
goto out;
if (cam->state & DEV_DISCONNECTED) {
err = -ENODEV;
goto out;
}
}
if (cam->state & DEV_MISCONFIGURED) {
err = zc0301_init(cam);
if (err) {
DBG(1, "Initialization failed again. "
"I will retry on next open().");
goto out;
}
cam->state &= ~DEV_MISCONFIGURED;
}
if ((err = zc0301_start_transfer(cam)))
goto out;
filp->private_data = cam;
cam->users++;
cam->io = IO_NONE;
cam->stream = STREAM_OFF;
cam->nbuffers = 0;
cam->frame_count = 0;
zc0301_empty_framequeues(cam);
DBG(3, "Video device /dev/video%d is open", cam->v4ldev->num);
out:
mutex_unlock(&cam->open_mutex);
if (err)
kref_put(&cam->kref, zc0301_release_resources);
up_read(&zc0301_dev_lock);
return err;
}
static int zc0301_release(struct inode* inode, struct file* filp)
{
struct zc0301_device* cam;
down_write(&zc0301_dev_lock);
cam = video_drvdata(filp);
zc0301_stop_transfer(cam);
zc0301_release_buffers(cam);
cam->users--;
wake_up_interruptible_nr(&cam->wait_open, 1);
DBG(3, "Video device /dev/video%d closed", cam->v4ldev->num);
kref_put(&cam->kref, zc0301_release_resources);
up_write(&zc0301_dev_lock);
return 0;
}
static ssize_t
zc0301_read(struct file* filp, char __user * buf, size_t count, loff_t* f_pos)
{
struct zc0301_device *cam = video_drvdata(filp);
struct zc0301_frame_t* f, * i;
unsigned long lock_flags;
long timeout;
int err = 0;
if (mutex_lock_interruptible(&cam->fileop_mutex))
return -ERESTARTSYS;
if (cam->state & DEV_DISCONNECTED) {
DBG(1, "Device not present");
mutex_unlock(&cam->fileop_mutex);
return -ENODEV;
}
if (cam->state & DEV_MISCONFIGURED) {
DBG(1, "The camera is misconfigured. Close and open it "
"again.");
mutex_unlock(&cam->fileop_mutex);
return -EIO;
}
if (cam->io == IO_MMAP) {
DBG(3, "Close and open the device again to choose the read "
"method");
mutex_unlock(&cam->fileop_mutex);
return -EBUSY;
}
if (cam->io == IO_NONE) {
if (!zc0301_request_buffers(cam, cam->nreadbuffers, IO_READ)) {
DBG(1, "read() failed, not enough memory");
mutex_unlock(&cam->fileop_mutex);
return -ENOMEM;
}
cam->io = IO_READ;
cam->stream = STREAM_ON;
}
if (list_empty(&cam->inqueue)) {
if (!list_empty(&cam->outqueue))
zc0301_empty_framequeues(cam);
zc0301_queue_unusedframes(cam);
}
if (!count) {
mutex_unlock(&cam->fileop_mutex);
return 0;
}
if (list_empty(&cam->outqueue)) {
if (filp->f_flags & O_NONBLOCK) {
mutex_unlock(&cam->fileop_mutex);
return -EAGAIN;
}
timeout = wait_event_interruptible_timeout
( cam->wait_frame,
(!list_empty(&cam->outqueue)) ||
(cam->state & DEV_DISCONNECTED) ||
(cam->state & DEV_MISCONFIGURED),
cam->module_param.frame_timeout *
1000 * msecs_to_jiffies(1) );
if (timeout < 0) {
mutex_unlock(&cam->fileop_mutex);
return timeout;
}
if (cam->state & DEV_DISCONNECTED) {
mutex_unlock(&cam->fileop_mutex);
return -ENODEV;
}
if (!timeout || (cam->state & DEV_MISCONFIGURED)) {
mutex_unlock(&cam->fileop_mutex);
return -EIO;
}
}
f = list_entry(cam->outqueue.prev, struct zc0301_frame_t, frame);
if (count > f->buf.bytesused)
count = f->buf.bytesused;
if (copy_to_user(buf, f->bufmem, count)) {
err = -EFAULT;
goto exit;
}
*f_pos += count;
exit:
spin_lock_irqsave(&cam->queue_lock, lock_flags);
list_for_each_entry(i, &cam->outqueue, frame)
i->state = F_UNUSED;
INIT_LIST_HEAD(&cam->outqueue);
spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
zc0301_queue_unusedframes(cam);
PDBGG("Frame #%lu, bytes read: %zu",
(unsigned long)f->buf.index, count);
mutex_unlock(&cam->fileop_mutex);
return err ? err : count;
}
static unsigned int zc0301_poll(struct file *filp, poll_table *wait)
{
struct zc0301_device *cam = video_drvdata(filp);
struct zc0301_frame_t* f;
unsigned long lock_flags;
unsigned int mask = 0;
if (mutex_lock_interruptible(&cam->fileop_mutex))
return POLLERR;
if (cam->state & DEV_DISCONNECTED) {
DBG(1, "Device not present");
goto error;
}
if (cam->state & DEV_MISCONFIGURED) {
DBG(1, "The camera is misconfigured. Close and open it "
"again.");
goto error;
}
if (cam->io == IO_NONE) {
if (!zc0301_request_buffers(cam, cam->nreadbuffers, IO_READ)) {
DBG(1, "poll() failed, not enough memory");
goto error;
}
cam->io = IO_READ;
cam->stream = STREAM_ON;
}
if (cam->io == IO_READ) {
spin_lock_irqsave(&cam->queue_lock, lock_flags);
list_for_each_entry(f, &cam->outqueue, frame)
f->state = F_UNUSED;
INIT_LIST_HEAD(&cam->outqueue);
spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
zc0301_queue_unusedframes(cam);
}
poll_wait(filp, &cam->wait_frame, wait);
if (!list_empty(&cam->outqueue))
mask |= POLLIN | POLLRDNORM;
mutex_unlock(&cam->fileop_mutex);
return mask;
error:
mutex_unlock(&cam->fileop_mutex);
return POLLERR;
}
static void zc0301_vm_open(struct vm_area_struct* vma)
{
struct zc0301_frame_t* f = vma->vm_private_data;
f->vma_use_count++;
}
static void zc0301_vm_close(struct vm_area_struct* vma)
{
/* NOTE: buffers are not freed here */
struct zc0301_frame_t* f = vma->vm_private_data;
f->vma_use_count--;
}
static struct vm_operations_struct zc0301_vm_ops = {
.open = zc0301_vm_open,
.close = zc0301_vm_close,
};
static int zc0301_mmap(struct file* filp, struct vm_area_struct *vma)
{
struct zc0301_device *cam = video_drvdata(filp);
unsigned long size = vma->vm_end - vma->vm_start,
start = vma->vm_start;
void *pos;
u32 i;
if (mutex_lock_interruptible(&cam->fileop_mutex))
return -ERESTARTSYS;
if (cam->state & DEV_DISCONNECTED) {
DBG(1, "Device not present");
mutex_unlock(&cam->fileop_mutex);
return -ENODEV;
}
if (cam->state & DEV_MISCONFIGURED) {
DBG(1, "The camera is misconfigured. Close and open it "
"again.");
mutex_unlock(&cam->fileop_mutex);
return -EIO;
}
if (!(vma->vm_flags & (VM_WRITE | VM_READ))) {
mutex_unlock(&cam->fileop_mutex);
return -EACCES;
}
if (cam->io != IO_MMAP ||
size != PAGE_ALIGN(cam->frame[0].buf.length)) {
mutex_unlock(&cam->fileop_mutex);
return -EINVAL;
}
for (i = 0; i < cam->nbuffers; i++) {
if ((cam->frame[i].buf.m.offset>>PAGE_SHIFT) == vma->vm_pgoff)
break;
}
if (i == cam->nbuffers) {
mutex_unlock(&cam->fileop_mutex);
return -EINVAL;
}
vma->vm_flags |= VM_IO;
vma->vm_flags |= VM_RESERVED;
pos = cam->frame[i].bufmem;
while (size > 0) { /* size is page-aligned */
if (vm_insert_page(vma, start, vmalloc_to_page(pos))) {
mutex_unlock(&cam->fileop_mutex);
return -EAGAIN;
}
start += PAGE_SIZE;
pos += PAGE_SIZE;
size -= PAGE_SIZE;
}
vma->vm_ops = &zc0301_vm_ops;
vma->vm_private_data = &cam->frame[i];
zc0301_vm_open(vma);
mutex_unlock(&cam->fileop_mutex);
return 0;
}
/*****************************************************************************/
static int
zc0301_vidioc_querycap(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_capability cap = {
.driver = "zc0301",
.version = ZC0301_MODULE_VERSION_CODE,
.capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};
strlcpy(cap.card, cam->v4ldev->name, sizeof(cap.card));
if (usb_make_path(cam->usbdev, cap.bus_info, sizeof(cap.bus_info)) < 0)
strlcpy(cap.bus_info, cam->usbdev->dev.bus_id,
sizeof(cap.bus_info));
if (copy_to_user(arg, &cap, sizeof(cap)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_enuminput(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_input i;
if (copy_from_user(&i, arg, sizeof(i)))
return -EFAULT;
if (i.index)
return -EINVAL;
memset(&i, 0, sizeof(i));
strcpy(i.name, "Camera");
i.type = V4L2_INPUT_TYPE_CAMERA;
if (copy_to_user(arg, &i, sizeof(i)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_g_input(struct zc0301_device* cam, void __user * arg)
{
int index = 0;
if (copy_to_user(arg, &index, sizeof(index)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_s_input(struct zc0301_device* cam, void __user * arg)
{
int index;
if (copy_from_user(&index, arg, sizeof(index)))
return -EFAULT;
if (index != 0)
return -EINVAL;
return 0;
}
static int
zc0301_vidioc_query_ctrl(struct zc0301_device* cam, void __user * arg)
{
struct zc0301_sensor* s = &cam->sensor;
struct v4l2_queryctrl qc;
u8 i;
if (copy_from_user(&qc, arg, sizeof(qc)))
return -EFAULT;
for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
if (qc.id && qc.id == s->qctrl[i].id) {
memcpy(&qc, &(s->qctrl[i]), sizeof(qc));
if (copy_to_user(arg, &qc, sizeof(qc)))
return -EFAULT;
return 0;
}
return -EINVAL;
}
static int
zc0301_vidioc_g_ctrl(struct zc0301_device* cam, void __user * arg)
{
struct zc0301_sensor* s = &cam->sensor;
struct v4l2_control ctrl;
int err = 0;
u8 i;
if (!s->get_ctrl && !s->set_ctrl)
return -EINVAL;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
if (!s->get_ctrl) {
for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
if (ctrl.id == s->qctrl[i].id) {
ctrl.value = s->_qctrl[i].default_value;
goto exit;
}
return -EINVAL;
} else
err = s->get_ctrl(cam, &ctrl);
exit:
if (copy_to_user(arg, &ctrl, sizeof(ctrl)))
return -EFAULT;
return err;
}
static int
zc0301_vidioc_s_ctrl(struct zc0301_device* cam, void __user * arg)
{
struct zc0301_sensor* s = &cam->sensor;
struct v4l2_control ctrl;
u8 i;
int err = 0;
if (!s->set_ctrl)
return -EINVAL;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
for (i = 0; i < ARRAY_SIZE(s->qctrl); i++)
if (ctrl.id == s->qctrl[i].id) {
if (s->qctrl[i].flags & V4L2_CTRL_FLAG_DISABLED)
return -EINVAL;
if (ctrl.value < s->qctrl[i].minimum ||
ctrl.value > s->qctrl[i].maximum)
return -ERANGE;
ctrl.value -= ctrl.value % s->qctrl[i].step;
break;
}
if ((err = s->set_ctrl(cam, &ctrl)))
return err;
s->_qctrl[i].default_value = ctrl.value;
return 0;
}
static int
zc0301_vidioc_cropcap(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_cropcap* cc = &(cam->sensor.cropcap);
cc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
cc->pixelaspect.numerator = 1;
cc->pixelaspect.denominator = 1;
if (copy_to_user(arg, cc, sizeof(*cc)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_g_crop(struct zc0301_device* cam, void __user * arg)
{
struct zc0301_sensor* s = &cam->sensor;
struct v4l2_crop crop = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
};
memcpy(&(crop.c), &(s->_rect), sizeof(struct v4l2_rect));
if (copy_to_user(arg, &crop, sizeof(crop)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_s_crop(struct zc0301_device* cam, void __user * arg)
{
struct zc0301_sensor* s = &cam->sensor;
struct v4l2_crop crop;
struct v4l2_rect* rect;
struct v4l2_rect* bounds = &(s->cropcap.bounds);
const enum zc0301_stream_state stream = cam->stream;
const u32 nbuffers = cam->nbuffers;
u32 i;
int err = 0;
if (copy_from_user(&crop, arg, sizeof(crop)))
return -EFAULT;
rect = &(crop.c);
if (crop.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (cam->module_param.force_munmap)
for (i = 0; i < cam->nbuffers; i++)
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_S_CROP failed. "
"Unmap the buffers first.");
return -EBUSY;
}
if (!s->set_crop) {
memcpy(rect, &(s->_rect), sizeof(*rect));
if (copy_to_user(arg, &crop, sizeof(crop)))
return -EFAULT;
return 0;
}
rect->left &= ~7L;
rect->top &= ~7L;
if (rect->width < 8)
rect->width = 8;
if (rect->height < 8)
rect->height = 8;
if (rect->width > bounds->width)
rect->width = bounds->width;
if (rect->height > bounds->height)
rect->height = bounds->height;
if (rect->left < bounds->left)
rect->left = bounds->left;
if (rect->top < bounds->top)
rect->top = bounds->top;
if (rect->left + rect->width > bounds->left + bounds->width)
rect->left = bounds->left+bounds->width - rect->width;
if (rect->top + rect->height > bounds->top + bounds->height)
rect->top = bounds->top+bounds->height - rect->height;
rect->width &= ~7L;
rect->height &= ~7L;
if (cam->stream == STREAM_ON)
if ((err = zc0301_stream_interrupt(cam)))
return err;
if (copy_to_user(arg, &crop, sizeof(crop))) {
cam->stream = stream;
return -EFAULT;
}
if (cam->module_param.force_munmap || cam->io == IO_READ)
zc0301_release_buffers(cam);
if (s->set_crop)
err += s->set_crop(cam, rect);
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of hardware problems. To "
"use the camera, close and open /dev/video%d again.",
cam->v4ldev->num);
return -EIO;
}
s->pix_format.width = rect->width;
s->pix_format.height = rect->height;
memcpy(&(s->_rect), rect, sizeof(*rect));
if ((cam->module_param.force_munmap || cam->io == IO_READ) &&
nbuffers != zc0301_request_buffers(cam, nbuffers, cam->io)) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_CROP failed because of not enough memory. To "
"use the camera, close and open /dev/video%d again.",
cam->v4ldev->num);
return -ENOMEM;
}
if (cam->io == IO_READ)
zc0301_empty_framequeues(cam);
else if (cam->module_param.force_munmap)
zc0301_requeue_outqueue(cam);
cam->stream = stream;
return 0;
}
static int
zc0301_vidioc_enum_framesizes(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_frmsizeenum frmsize;
if (copy_from_user(&frmsize, arg, sizeof(frmsize)))
return -EFAULT;
if (frmsize.index != 0 && frmsize.index != 1)
return -EINVAL;
if (frmsize.pixel_format != V4L2_PIX_FMT_JPEG)
return -EINVAL;
frmsize.type = V4L2_FRMSIZE_TYPE_DISCRETE;
if (frmsize.index == 1) {
frmsize.discrete.width = cam->sensor.cropcap.defrect.width;
frmsize.discrete.height = cam->sensor.cropcap.defrect.height;
}
memset(&frmsize.reserved, 0, sizeof(frmsize.reserved));
if (copy_to_user(arg, &frmsize, sizeof(frmsize)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_enum_fmt(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_fmtdesc fmtd;
if (copy_from_user(&fmtd, arg, sizeof(fmtd)))
return -EFAULT;
if (fmtd.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (fmtd.index == 0) {
strcpy(fmtd.description, "JPEG");
fmtd.pixelformat = V4L2_PIX_FMT_JPEG;
fmtd.flags = V4L2_FMT_FLAG_COMPRESSED;
} else
return -EINVAL;
fmtd.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
memset(&fmtd.reserved, 0, sizeof(fmtd.reserved));
if (copy_to_user(arg, &fmtd, sizeof(fmtd)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_g_fmt(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_format format;
struct v4l2_pix_format* pfmt = &(cam->sensor.pix_format);
if (copy_from_user(&format, arg, sizeof(format)))
return -EFAULT;
if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
pfmt->bytesperline = 0;
pfmt->sizeimage = pfmt->height * ((pfmt->width*pfmt->priv)/8);
pfmt->field = V4L2_FIELD_NONE;
memcpy(&(format.fmt.pix), pfmt, sizeof(*pfmt));
if (copy_to_user(arg, &format, sizeof(format)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_try_s_fmt(struct zc0301_device* cam, unsigned int cmd,
void __user * arg)
{
struct zc0301_sensor* s = &cam->sensor;
struct v4l2_format format;
struct v4l2_pix_format* pix;
struct v4l2_pix_format* pfmt = &(s->pix_format);
struct v4l2_rect* bounds = &(s->cropcap.bounds);
struct v4l2_rect rect;
const enum zc0301_stream_state stream = cam->stream;
const u32 nbuffers = cam->nbuffers;
u32 i;
int err = 0;
if (copy_from_user(&format, arg, sizeof(format)))
return -EFAULT;
pix = &(format.fmt.pix);
if (format.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
memcpy(&rect, &(s->_rect), sizeof(rect));
if (!s->set_crop) {
pix->width = rect.width;
pix->height = rect.height;
} else {
rect.width = pix->width;
rect.height = pix->height;
}
if (rect.width < 8)
rect.width = 8;
if (rect.height < 8)
rect.height = 8;
if (rect.width > bounds->left + bounds->width - rect.left)
rect.width = bounds->left + bounds->width - rect.left;
if (rect.height > bounds->top + bounds->height - rect.top)
rect.height = bounds->top + bounds->height - rect.top;
rect.width &= ~7L;
rect.height &= ~7L;
pix->width = rect.width;
pix->height = rect.height;
pix->pixelformat = pfmt->pixelformat;
pix->priv = pfmt->priv;
pix->colorspace = pfmt->colorspace;
pix->bytesperline = 0;
pix->sizeimage = pix->height * ((pix->width * pix->priv) / 8);
pix->field = V4L2_FIELD_NONE;
if (cmd == VIDIOC_TRY_FMT) {
if (copy_to_user(arg, &format, sizeof(format)))
return -EFAULT;
return 0;
}
if (cam->module_param.force_munmap)
for (i = 0; i < cam->nbuffers; i++)
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_S_FMT failed. "
"Unmap the buffers first.");
return -EBUSY;
}
if (cam->stream == STREAM_ON)
if ((err = zc0301_stream_interrupt(cam)))
return err;
if (copy_to_user(arg, &format, sizeof(format))) {
cam->stream = stream;
return -EFAULT;
}
if (cam->module_param.force_munmap || cam->io == IO_READ)
zc0301_release_buffers(cam);
if (s->set_crop)
err += s->set_crop(cam, &rect);
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of hardware problems. To "
"use the camera, close and open /dev/video%d again.",
cam->v4ldev->num);
return -EIO;
}
memcpy(pfmt, pix, sizeof(*pix));
memcpy(&(s->_rect), &rect, sizeof(rect));
if ((cam->module_param.force_munmap || cam->io == IO_READ) &&
nbuffers != zc0301_request_buffers(cam, nbuffers, cam->io)) {
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_FMT failed because of not enough memory. To "
"use the camera, close and open /dev/video%d again.",
cam->v4ldev->num);
return -ENOMEM;
}
if (cam->io == IO_READ)
zc0301_empty_framequeues(cam);
else if (cam->module_param.force_munmap)
zc0301_requeue_outqueue(cam);
cam->stream = stream;
return 0;
}
static int
zc0301_vidioc_g_jpegcomp(struct zc0301_device* cam, void __user * arg)
{
if (copy_to_user(arg, &cam->compression, sizeof(cam->compression)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_s_jpegcomp(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_jpegcompression jc;
const enum zc0301_stream_state stream = cam->stream;
int err = 0;
if (copy_from_user(&jc, arg, sizeof(jc)))
return -EFAULT;
if (jc.quality != 0)
return -EINVAL;
if (cam->stream == STREAM_ON)
if ((err = zc0301_stream_interrupt(cam)))
return err;
err += zc0301_set_compression(cam, &jc);
if (err) { /* atomic, no rollback in ioctl() */
cam->state |= DEV_MISCONFIGURED;
DBG(1, "VIDIOC_S_JPEGCOMP failed because of hardware "
"problems. To use the camera, close and open "
"/dev/video%d again.", cam->v4ldev->num);
return -EIO;
}
cam->compression.quality = jc.quality;
cam->stream = stream;
return 0;
}
static int
zc0301_vidioc_reqbufs(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_requestbuffers rb;
u32 i;
int err;
if (copy_from_user(&rb, arg, sizeof(rb)))
return -EFAULT;
if (rb.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
rb.memory != V4L2_MEMORY_MMAP)
return -EINVAL;
if (cam->io == IO_READ) {
DBG(3, "Close and open the device again to choose the mmap "
"I/O method");
return -EBUSY;
}
for (i = 0; i < cam->nbuffers; i++)
if (cam->frame[i].vma_use_count) {
DBG(3, "VIDIOC_REQBUFS failed. "
"Previous buffers are still mapped.");
return -EBUSY;
}
if (cam->stream == STREAM_ON)
if ((err = zc0301_stream_interrupt(cam)))
return err;
zc0301_empty_framequeues(cam);
zc0301_release_buffers(cam);
if (rb.count)
rb.count = zc0301_request_buffers(cam, rb.count, IO_MMAP);
if (copy_to_user(arg, &rb, sizeof(rb))) {
zc0301_release_buffers(cam);
cam->io = IO_NONE;
return -EFAULT;
}
cam->io = rb.count ? IO_MMAP : IO_NONE;
return 0;
}
static int
zc0301_vidioc_querybuf(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_buffer b;
if (copy_from_user(&b, arg, sizeof(b)))
return -EFAULT;
if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
b.index >= cam->nbuffers || cam->io != IO_MMAP)
return -EINVAL;
memcpy(&b, &cam->frame[b.index].buf, sizeof(b));
if (cam->frame[b.index].vma_use_count)
b.flags |= V4L2_BUF_FLAG_MAPPED;
if (cam->frame[b.index].state == F_DONE)
b.flags |= V4L2_BUF_FLAG_DONE;
else if (cam->frame[b.index].state != F_UNUSED)
b.flags |= V4L2_BUF_FLAG_QUEUED;
if (copy_to_user(arg, &b, sizeof(b)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_qbuf(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_buffer b;
unsigned long lock_flags;
if (copy_from_user(&b, arg, sizeof(b)))
return -EFAULT;
if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
b.index >= cam->nbuffers || cam->io != IO_MMAP)
return -EINVAL;
if (cam->frame[b.index].state != F_UNUSED)
return -EINVAL;
cam->frame[b.index].state = F_QUEUED;
spin_lock_irqsave(&cam->queue_lock, lock_flags);
list_add_tail(&cam->frame[b.index].frame, &cam->inqueue);
spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
PDBGG("Frame #%lu queued", (unsigned long)b.index);
return 0;
}
static int
zc0301_vidioc_dqbuf(struct zc0301_device* cam, struct file* filp,
void __user * arg)
{
struct v4l2_buffer b;
struct zc0301_frame_t *f;
unsigned long lock_flags;
long timeout;
if (copy_from_user(&b, arg, sizeof(b)))
return -EFAULT;
if (b.type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io!= IO_MMAP)
return -EINVAL;
if (list_empty(&cam->outqueue)) {
if (cam->stream == STREAM_OFF)
return -EINVAL;
if (filp->f_flags & O_NONBLOCK)
return -EAGAIN;
timeout = wait_event_interruptible_timeout
( cam->wait_frame,
(!list_empty(&cam->outqueue)) ||
(cam->state & DEV_DISCONNECTED) ||
(cam->state & DEV_MISCONFIGURED),
cam->module_param.frame_timeout *
1000 * msecs_to_jiffies(1) );
if (timeout < 0)
return timeout;
if (cam->state & DEV_DISCONNECTED)
return -ENODEV;
if (!timeout || (cam->state & DEV_MISCONFIGURED))
return -EIO;
}
spin_lock_irqsave(&cam->queue_lock, lock_flags);
f = list_entry(cam->outqueue.next, struct zc0301_frame_t, frame);
list_del(cam->outqueue.next);
spin_unlock_irqrestore(&cam->queue_lock, lock_flags);
f->state = F_UNUSED;
memcpy(&b, &f->buf, sizeof(b));
if (f->vma_use_count)
b.flags |= V4L2_BUF_FLAG_MAPPED;
if (copy_to_user(arg, &b, sizeof(b)))
return -EFAULT;
PDBGG("Frame #%lu dequeued", (unsigned long)f->buf.index);
return 0;
}
static int
zc0301_vidioc_streamon(struct zc0301_device* cam, void __user * arg)
{
int type;
if (copy_from_user(&type, arg, sizeof(type)))
return -EFAULT;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
return -EINVAL;
cam->stream = STREAM_ON;
DBG(3, "Stream on");
return 0;
}
static int
zc0301_vidioc_streamoff(struct zc0301_device* cam, void __user * arg)
{
int type, err;
if (copy_from_user(&type, arg, sizeof(type)))
return -EFAULT;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || cam->io != IO_MMAP)
return -EINVAL;
if (cam->stream == STREAM_ON)
if ((err = zc0301_stream_interrupt(cam)))
return err;
zc0301_empty_framequeues(cam);
DBG(3, "Stream off");
return 0;
}
static int
zc0301_vidioc_g_parm(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_streamparm sp;
if (copy_from_user(&sp, arg, sizeof(sp)))
return -EFAULT;
if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
sp.parm.capture.extendedmode = 0;
sp.parm.capture.readbuffers = cam->nreadbuffers;
if (copy_to_user(arg, &sp, sizeof(sp)))
return -EFAULT;
return 0;
}
static int
zc0301_vidioc_s_parm(struct zc0301_device* cam, void __user * arg)
{
struct v4l2_streamparm sp;
if (copy_from_user(&sp, arg, sizeof(sp)))
return -EFAULT;
if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
sp.parm.capture.extendedmode = 0;
if (sp.parm.capture.readbuffers == 0)
sp.parm.capture.readbuffers = cam->nreadbuffers;
if (sp.parm.capture.readbuffers > ZC0301_MAX_FRAMES)
sp.parm.capture.readbuffers = ZC0301_MAX_FRAMES;
if (copy_to_user(arg, &sp, sizeof(sp)))
return -EFAULT;
cam->nreadbuffers = sp.parm.capture.readbuffers;
return 0;
}
static int zc0301_ioctl_v4l2(struct inode* inode, struct file* filp,
unsigned int cmd, void __user * arg)
{
struct zc0301_device *cam = video_drvdata(filp);
switch (cmd) {
case VIDIOC_QUERYCAP:
return zc0301_vidioc_querycap(cam, arg);
case VIDIOC_ENUMINPUT:
return zc0301_vidioc_enuminput(cam, arg);
case VIDIOC_G_INPUT:
return zc0301_vidioc_g_input(cam, arg);
case VIDIOC_S_INPUT:
return zc0301_vidioc_s_input(cam, arg);
case VIDIOC_QUERYCTRL:
return zc0301_vidioc_query_ctrl(cam, arg);
case VIDIOC_G_CTRL:
return zc0301_vidioc_g_ctrl(cam, arg);
case VIDIOC_S_CTRL:
return zc0301_vidioc_s_ctrl(cam, arg);
case VIDIOC_CROPCAP:
return zc0301_vidioc_cropcap(cam, arg);
case VIDIOC_G_CROP:
return zc0301_vidioc_g_crop(cam, arg);
case VIDIOC_S_CROP:
return zc0301_vidioc_s_crop(cam, arg);
case VIDIOC_ENUM_FMT:
return zc0301_vidioc_enum_fmt(cam, arg);
case VIDIOC_G_FMT:
return zc0301_vidioc_g_fmt(cam, arg);
case VIDIOC_TRY_FMT:
case VIDIOC_S_FMT:
return zc0301_vidioc_try_s_fmt(cam, cmd, arg);
case VIDIOC_ENUM_FRAMESIZES:
return zc0301_vidioc_enum_framesizes(cam, arg);
case VIDIOC_G_JPEGCOMP:
return zc0301_vidioc_g_jpegcomp(cam, arg);
case VIDIOC_S_JPEGCOMP:
return zc0301_vidioc_s_jpegcomp(cam, arg);
case VIDIOC_REQBUFS:
return zc0301_vidioc_reqbufs(cam, arg);
case VIDIOC_QUERYBUF:
return zc0301_vidioc_querybuf(cam, arg);
case VIDIOC_QBUF:
return zc0301_vidioc_qbuf(cam, arg);
case VIDIOC_DQBUF:
return zc0301_vidioc_dqbuf(cam, filp, arg);
case VIDIOC_STREAMON:
return zc0301_vidioc_streamon(cam, arg);
case VIDIOC_STREAMOFF:
return zc0301_vidioc_streamoff(cam, arg);
case VIDIOC_G_PARM:
return zc0301_vidioc_g_parm(cam, arg);
case VIDIOC_S_PARM:
return zc0301_vidioc_s_parm(cam, arg);
case VIDIOC_G_STD:
case VIDIOC_S_STD:
case VIDIOC_QUERYSTD:
case VIDIOC_ENUMSTD:
case VIDIOC_QUERYMENU:
case VIDIOC_ENUM_FRAMEINTERVALS:
return -EINVAL;
default:
return -EINVAL;
}
}
static int zc0301_ioctl(struct inode* inode, struct file* filp,
unsigned int cmd, unsigned long arg)
{
struct zc0301_device *cam = video_drvdata(filp);
int err = 0;
if (mutex_lock_interruptible(&cam->fileop_mutex))
return -ERESTARTSYS;
if (cam->state & DEV_DISCONNECTED) {
DBG(1, "Device not present");
mutex_unlock(&cam->fileop_mutex);
return -ENODEV;
}
if (cam->state & DEV_MISCONFIGURED) {
DBG(1, "The camera is misconfigured. Close and open it "
"again.");
mutex_unlock(&cam->fileop_mutex);
return -EIO;
}
V4LDBG(3, "zc0301", cmd);
err = zc0301_ioctl_v4l2(inode, filp, cmd, (void __user *)arg);
mutex_unlock(&cam->fileop_mutex);
return err;
}
static const struct file_operations zc0301_fops = {
.owner = THIS_MODULE,
.open = zc0301_open,
.release = zc0301_release,
.ioctl = zc0301_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = v4l_compat_ioctl32,
#endif
.read = zc0301_read,
.poll = zc0301_poll,
.mmap = zc0301_mmap,
.llseek = no_llseek,
};
/*****************************************************************************/
static int
zc0301_usb_probe(struct usb_interface* intf, const struct usb_device_id* id)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct zc0301_device* cam;
static unsigned int dev_nr;
unsigned int i;
int err = 0;
if (!(cam = kzalloc(sizeof(struct zc0301_device), GFP_KERNEL)))
return -ENOMEM;
cam->usbdev = udev;
if (!(cam->control_buffer = kzalloc(4, GFP_KERNEL))) {
DBG(1, "kmalloc() failed");
err = -ENOMEM;
goto fail;
}
if (!(cam->v4ldev = video_device_alloc())) {
DBG(1, "video_device_alloc() failed");
err = -ENOMEM;
goto fail;
}
DBG(2, "ZC0301[P] Image Processor and Control Chip detected "
"(vid/pid 0x%04X:0x%04X)",id->idVendor, id->idProduct);
for (i = 0; zc0301_sensor_table[i]; i++) {
err = zc0301_sensor_table[i](cam);
if (!err)
break;
}
if (!err)
DBG(2, "%s image sensor detected", cam->sensor.name);
else {
DBG(1, "No supported image sensor detected");
err = -ENODEV;
goto fail;
}
if (zc0301_init(cam)) {
DBG(1, "Initialization failed. I will retry on open().");
cam->state |= DEV_MISCONFIGURED;
}
strcpy(cam->v4ldev->name, "ZC0301[P] PC Camera");
cam->v4ldev->fops = &zc0301_fops;
cam->v4ldev->minor = video_nr[dev_nr];
cam->v4ldev->release = video_device_release;
cam->v4ldev->parent = &udev->dev;
video_set_drvdata(cam->v4ldev, cam);
init_completion(&cam->probe);
err = video_register_device(cam->v4ldev, VFL_TYPE_GRABBER,
video_nr[dev_nr]);
if (err) {
DBG(1, "V4L2 device registration failed");
if (err == -ENFILE && video_nr[dev_nr] == -1)
DBG(1, "Free /dev/videoX node not found");
video_nr[dev_nr] = -1;
dev_nr = (dev_nr < ZC0301_MAX_DEVICES-1) ? dev_nr+1 : 0;
complete_all(&cam->probe);
goto fail;
}
DBG(2, "V4L2 device registered as /dev/video%d", cam->v4ldev->num);
cam->module_param.force_munmap = force_munmap[dev_nr];
cam->module_param.frame_timeout = frame_timeout[dev_nr];
dev_nr = (dev_nr < ZC0301_MAX_DEVICES-1) ? dev_nr+1 : 0;
usb_set_intfdata(intf, cam);
kref_init(&cam->kref);
usb_get_dev(cam->usbdev);
complete_all(&cam->probe);
return 0;
fail:
if (cam) {
kfree(cam->control_buffer);
if (cam->v4ldev)
video_device_release(cam->v4ldev);
kfree(cam);
}
return err;
}
static void zc0301_usb_disconnect(struct usb_interface* intf)
{
struct zc0301_device* cam;
down_write(&zc0301_dev_lock);
cam = usb_get_intfdata(intf);
DBG(2, "Disconnecting %s...", cam->v4ldev->name);
if (cam->users) {
DBG(2, "Device /dev/video%d is open! Deregistration and "
"memory deallocation are deferred.",
cam->v4ldev->num);
cam->state |= DEV_MISCONFIGURED;
zc0301_stop_transfer(cam);
cam->state |= DEV_DISCONNECTED;
wake_up_interruptible(&cam->wait_frame);
wake_up(&cam->wait_stream);
} else
cam->state |= DEV_DISCONNECTED;
wake_up_interruptible_all(&cam->wait_open);
kref_put(&cam->kref, zc0301_release_resources);
up_write(&zc0301_dev_lock);
}
static struct usb_driver zc0301_usb_driver = {
.name = "zc0301",
.id_table = zc0301_id_table,
.probe = zc0301_usb_probe,
.disconnect = zc0301_usb_disconnect,
};
/*****************************************************************************/
static int __init zc0301_module_init(void)
{
int err = 0;
KDBG(2, ZC0301_MODULE_NAME " v" ZC0301_MODULE_VERSION);
KDBG(3, ZC0301_MODULE_AUTHOR);
if ((err = usb_register(&zc0301_usb_driver)))
KDBG(1, "usb_register() failed");
return err;
}
static void __exit zc0301_module_exit(void)
{
usb_deregister(&zc0301_usb_driver);
}
module_init(zc0301_module_init);
module_exit(zc0301_module_exit);
| gpl-2.0 |
hash07/Apollo_X | tools/perf/ui/browsers/hists.c | 2074 | 48994 | #include <stdio.h>
#include "../libslang.h"
#include <stdlib.h>
#include <string.h>
#include <linux/rbtree.h>
#include "../../util/evsel.h"
#include "../../util/evlist.h"
#include "../../util/hist.h"
#include "../../util/pstack.h"
#include "../../util/sort.h"
#include "../../util/util.h"
#include "../../arch/common.h"
#include "../browser.h"
#include "../helpline.h"
#include "../util.h"
#include "../ui.h"
#include "map.h"
struct hist_browser {
struct ui_browser b;
struct hists *hists;
struct hist_entry *he_selection;
struct map_symbol *selection;
int print_seq;
bool show_dso;
bool has_symbols;
};
extern void hist_browser__init_hpp(void);
static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name);
static void hist_browser__refresh_dimensions(struct hist_browser *browser)
{
/* 3 == +/- toggle symbol before actual hist_entry rendering */
browser->b.width = 3 + (hists__sort_list_width(browser->hists) +
sizeof("[k]"));
}
static void hist_browser__reset(struct hist_browser *browser)
{
browser->b.nr_entries = browser->hists->nr_entries;
hist_browser__refresh_dimensions(browser);
ui_browser__reset_index(&browser->b);
}
static char tree__folded_sign(bool unfolded)
{
return unfolded ? '-' : '+';
}
static char map_symbol__folded(const struct map_symbol *ms)
{
return ms->has_children ? tree__folded_sign(ms->unfolded) : ' ';
}
static char hist_entry__folded(const struct hist_entry *he)
{
return map_symbol__folded(&he->ms);
}
static char callchain_list__folded(const struct callchain_list *cl)
{
return map_symbol__folded(&cl->ms);
}
static void map_symbol__set_folding(struct map_symbol *ms, bool unfold)
{
ms->unfolded = unfold ? ms->has_children : false;
}
static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
{
int n = 0;
struct rb_node *nd;
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
char folded_sign = ' '; /* No children */
list_for_each_entry(chain, &child->val, list) {
++n;
/* We need this because we may not have children */
folded_sign = callchain_list__folded(chain);
if (folded_sign == '+')
break;
}
if (folded_sign == '-') /* Have children and they're unfolded */
n += callchain_node__count_rows_rb_tree(child);
}
return n;
}
static int callchain_node__count_rows(struct callchain_node *node)
{
struct callchain_list *chain;
bool unfolded = false;
int n = 0;
list_for_each_entry(chain, &node->val, list) {
++n;
unfolded = chain->ms.unfolded;
}
if (unfolded)
n += callchain_node__count_rows_rb_tree(node);
return n;
}
static int callchain__count_rows(struct rb_root *chain)
{
struct rb_node *nd;
int n = 0;
for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
n += callchain_node__count_rows(node);
}
return n;
}
static bool map_symbol__toggle_fold(struct map_symbol *ms)
{
if (!ms)
return false;
if (!ms->has_children)
return false;
ms->unfolded = !ms->unfolded;
return true;
}
static void callchain_node__init_have_children_rb_tree(struct callchain_node *node)
{
struct rb_node *nd = rb_first(&node->rb_root);
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
bool first = true;
list_for_each_entry(chain, &child->val, list) {
if (first) {
first = false;
chain->ms.has_children = chain->list.next != &child->val ||
!RB_EMPTY_ROOT(&child->rb_root);
} else
chain->ms.has_children = chain->list.next == &child->val &&
!RB_EMPTY_ROOT(&child->rb_root);
}
callchain_node__init_have_children_rb_tree(child);
}
}
static void callchain_node__init_have_children(struct callchain_node *node)
{
struct callchain_list *chain;
list_for_each_entry(chain, &node->val, list)
chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
callchain_node__init_have_children_rb_tree(node);
}
static void callchain__init_have_children(struct rb_root *root)
{
struct rb_node *nd;
for (nd = rb_first(root); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
callchain_node__init_have_children(node);
}
}
static void hist_entry__init_have_children(struct hist_entry *he)
{
if (!he->init_have_children) {
he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
callchain__init_have_children(&he->sorted_chain);
he->init_have_children = true;
}
}
static bool hist_browser__toggle_fold(struct hist_browser *browser)
{
if (map_symbol__toggle_fold(browser->selection)) {
struct hist_entry *he = browser->he_selection;
hist_entry__init_have_children(he);
browser->hists->nr_entries -= he->nr_rows;
if (he->ms.unfolded)
he->nr_rows = callchain__count_rows(&he->sorted_chain);
else
he->nr_rows = 0;
browser->hists->nr_entries += he->nr_rows;
browser->b.nr_entries = browser->hists->nr_entries;
return true;
}
/* If it doesn't have children, no toggling performed */
return false;
}
static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool unfold)
{
int n = 0;
struct rb_node *nd;
for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
struct callchain_list *chain;
bool has_children = false;
list_for_each_entry(chain, &child->val, list) {
++n;
map_symbol__set_folding(&chain->ms, unfold);
has_children = chain->ms.has_children;
}
if (has_children)
n += callchain_node__set_folding_rb_tree(child, unfold);
}
return n;
}
static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
{
struct callchain_list *chain;
bool has_children = false;
int n = 0;
list_for_each_entry(chain, &node->val, list) {
++n;
map_symbol__set_folding(&chain->ms, unfold);
has_children = chain->ms.has_children;
}
if (has_children)
n += callchain_node__set_folding_rb_tree(node, unfold);
return n;
}
static int callchain__set_folding(struct rb_root *chain, bool unfold)
{
struct rb_node *nd;
int n = 0;
for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
n += callchain_node__set_folding(node, unfold);
}
return n;
}
static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
{
hist_entry__init_have_children(he);
map_symbol__set_folding(&he->ms, unfold);
if (he->ms.has_children) {
int n = callchain__set_folding(&he->sorted_chain, unfold);
he->nr_rows = unfold ? n : 0;
} else
he->nr_rows = 0;
}
static void hists__set_folding(struct hists *hists, bool unfold)
{
struct rb_node *nd;
hists->nr_entries = 0;
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
hist_entry__set_folding(he, unfold);
hists->nr_entries += 1 + he->nr_rows;
}
}
static void hist_browser__set_folding(struct hist_browser *browser, bool unfold)
{
hists__set_folding(browser->hists, unfold);
browser->b.nr_entries = browser->hists->nr_entries;
/* Go to the start, we may be way after valid entries after a collapse */
ui_browser__reset_index(&browser->b);
}
static void ui_browser__warn_lost_events(struct ui_browser *browser)
{
ui_browser__warning(browser, 4,
"Events are being lost, check IO/CPU overload!\n\n"
"You may want to run 'perf' using a RT scheduler policy:\n\n"
" perf top -r 80\n\n"
"Or reduce the sampling frequency.");
}
static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
struct hist_browser_timer *hbt)
{
int key;
char title[160];
int delay_secs = hbt ? hbt->refresh : 0;
browser->b.entries = &browser->hists->entries;
browser->b.nr_entries = browser->hists->nr_entries;
hist_browser__refresh_dimensions(browser);
hists__browser_title(browser->hists, title, sizeof(title), ev_name);
if (ui_browser__show(&browser->b, title,
"Press '?' for help on key bindings") < 0)
return -1;
while (1) {
key = ui_browser__run(&browser->b, delay_secs);
switch (key) {
case K_TIMER:
hbt->timer(hbt->arg);
ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
if (browser->hists->stats.nr_lost_warned !=
browser->hists->stats.nr_events[PERF_RECORD_LOST]) {
browser->hists->stats.nr_lost_warned =
browser->hists->stats.nr_events[PERF_RECORD_LOST];
ui_browser__warn_lost_events(&browser->b);
}
hists__browser_title(browser->hists, title, sizeof(title), ev_name);
ui_browser__show_title(&browser->b, title);
continue;
case 'D': { /* Debug */
static int seq;
struct hist_entry *h = rb_entry(browser->b.top,
struct hist_entry, rb_node);
ui_helpline__pop();
ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
seq++, browser->b.nr_entries,
browser->hists->nr_entries,
browser->b.height,
browser->b.index,
browser->b.top_idx,
h->row_offset, h->nr_rows);
}
break;
case 'C':
/* Collapse the whole world. */
hist_browser__set_folding(browser, false);
break;
case 'E':
/* Expand the whole world. */
hist_browser__set_folding(browser, true);
break;
case K_ENTER:
if (hist_browser__toggle_fold(browser))
break;
/* fall thru */
default:
goto out;
}
}
out:
ui_browser__hide(&browser->b);
return key;
}
static char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize, bool show_dso)
{
int printed;
if (cl->ms.sym)
printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name);
else
printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
if (show_dso)
scnprintf(bf + printed, bfsize - printed, " %s",
cl->ms.map ? cl->ms.map->dso->short_name : "unknown");
return bf;
}
#define LEVEL_OFFSET_STEP 3
static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browser,
struct callchain_node *chain_node,
u64 total, int level,
unsigned short row,
off_t *row_offset,
bool *is_current_entry)
{
struct rb_node *node;
int first_row = row, width, offset = level * LEVEL_OFFSET_STEP;
u64 new_total, remaining;
if (callchain_param.mode == CHAIN_GRAPH_REL)
new_total = chain_node->children_hit;
else
new_total = total;
remaining = new_total;
node = rb_first(&chain_node->rb_root);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
u64 cumul = callchain_cumul_hits(child);
struct callchain_list *chain;
char folded_sign = ' ';
int first = true;
int extra_offset = 0;
remaining -= cumul;
list_for_each_entry(chain, &child->val, list) {
char bf[1024], *alloc_str;
const char *str;
int color;
bool was_first = first;
if (first)
first = false;
else
extra_offset = LEVEL_OFFSET_STEP;
folded_sign = callchain_list__folded(chain);
if (*row_offset != 0) {
--*row_offset;
goto do_next;
}
alloc_str = NULL;
str = callchain_list__sym_name(chain, bf, sizeof(bf),
browser->show_dso);
if (was_first) {
double percent = cumul * 100.0 / new_total;
if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
str = "Not enough memory!";
else
str = alloc_str;
}
color = HE_COLORSET_NORMAL;
width = browser->b.width - (offset + extra_offset + 2);
if (ui_browser__is_current_entry(&browser->b, row)) {
browser->selection = &chain->ms;
color = HE_COLORSET_SELECTED;
*is_current_entry = true;
}
ui_browser__set_color(&browser->b, color);
ui_browser__gotorc(&browser->b, row, 0);
slsmg_write_nstring(" ", offset + extra_offset);
slsmg_printf("%c ", folded_sign);
slsmg_write_nstring(str, width);
free(alloc_str);
if (++row == browser->b.height)
goto out;
do_next:
if (folded_sign == '+')
break;
}
if (folded_sign == '-') {
const int new_level = level + (extra_offset ? 2 : 1);
row += hist_browser__show_callchain_node_rb_tree(browser, child, new_total,
new_level, row, row_offset,
is_current_entry);
}
if (row == browser->b.height)
goto out;
node = next;
}
out:
return row - first_row;
}
static int hist_browser__show_callchain_node(struct hist_browser *browser,
struct callchain_node *node,
int level, unsigned short row,
off_t *row_offset,
bool *is_current_entry)
{
struct callchain_list *chain;
int first_row = row,
offset = level * LEVEL_OFFSET_STEP,
width = browser->b.width - offset;
char folded_sign = ' ';
list_for_each_entry(chain, &node->val, list) {
char bf[1024], *s;
int color;
folded_sign = callchain_list__folded(chain);
if (*row_offset != 0) {
--*row_offset;
continue;
}
color = HE_COLORSET_NORMAL;
if (ui_browser__is_current_entry(&browser->b, row)) {
browser->selection = &chain->ms;
color = HE_COLORSET_SELECTED;
*is_current_entry = true;
}
s = callchain_list__sym_name(chain, bf, sizeof(bf),
browser->show_dso);
ui_browser__gotorc(&browser->b, row, 0);
ui_browser__set_color(&browser->b, color);
slsmg_write_nstring(" ", offset);
slsmg_printf("%c ", folded_sign);
slsmg_write_nstring(s, width - 2);
if (++row == browser->b.height)
goto out;
}
if (folded_sign == '-')
row += hist_browser__show_callchain_node_rb_tree(browser, node,
browser->hists->stats.total_period,
level + 1, row,
row_offset,
is_current_entry);
out:
return row - first_row;
}
static int hist_browser__show_callchain(struct hist_browser *browser,
struct rb_root *chain,
int level, unsigned short row,
off_t *row_offset,
bool *is_current_entry)
{
struct rb_node *nd;
int first_row = row;
for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
row += hist_browser__show_callchain_node(browser, node, level,
row, row_offset,
is_current_entry);
if (row == browser->b.height)
break;
}
return row - first_row;
}
struct hpp_arg {
struct ui_browser *b;
char folded_sign;
bool current_entry;
};
static int __hpp__color_callchain(struct hpp_arg *arg)
{
if (!symbol_conf.use_callchain)
return 0;
slsmg_printf("%c ", arg->folded_sign);
return 2;
}
static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
u64 (*get_field)(struct hist_entry *),
int (*callchain_cb)(struct hpp_arg *))
{
int ret = 0;
double percent = 0.0;
struct hists *hists = he->hists;
struct hpp_arg *arg = hpp->ptr;
if (hists->stats.total_period)
percent = 100.0 * get_field(he) / hists->stats.total_period;
ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
if (callchain_cb)
ret += callchain_cb(arg);
ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
slsmg_printf("%s", hpp->buf);
if (symbol_conf.event_group) {
int prev_idx, idx_delta;
struct perf_evsel *evsel = hists_to_evsel(hists);
struct hist_entry *pair;
int nr_members = evsel->nr_members;
if (nr_members <= 1)
goto out;
prev_idx = perf_evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
u64 period = get_field(pair);
u64 total = pair->hists->stats.total_period;
if (!total)
continue;
evsel = hists_to_evsel(pair->hists);
idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members in the middle which
* have no sample
*/
ui_browser__set_percent_color(arg->b, 0.0,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", 0.0);
slsmg_printf("%s", hpp->buf);
}
percent = 100.0 * period / total;
ui_browser__set_percent_color(arg->b, percent,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", percent);
slsmg_printf("%s", hpp->buf);
prev_idx = perf_evsel__group_idx(evsel);
}
idx_delta = nr_members - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members at last which have no sample
*/
ui_browser__set_percent_color(arg->b, 0.0,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", 0.0);
slsmg_printf("%s", hpp->buf);
}
}
out:
if (!arg->current_entry || !arg->b->navkeypressed)
ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
return ret;
}
#define __HPP_COLOR_PERCENT_FN(_type, _field, _cb) \
static u64 __hpp_get_##_field(struct hist_entry *he) \
{ \
return he->stat._field; \
} \
\
static int hist_browser__hpp_color_##_type(struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb); \
}
__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain)
__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL)
__HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL)
__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL)
__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL)
#undef __HPP_COLOR_PERCENT_FN
void hist_browser__init_hpp(void)
{
perf_hpp__column_enable(PERF_HPP__OVERHEAD);
perf_hpp__init();
perf_hpp__format[PERF_HPP__OVERHEAD].color =
hist_browser__hpp_color_overhead;
perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color =
hist_browser__hpp_color_overhead_sys;
perf_hpp__format[PERF_HPP__OVERHEAD_US].color =
hist_browser__hpp_color_overhead_us;
perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color =
hist_browser__hpp_color_overhead_guest_sys;
perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color =
hist_browser__hpp_color_overhead_guest_us;
}
static int hist_browser__show_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row)
{
char s[256];
int printed = 0;
int width = browser->b.width;
char folded_sign = ' ';
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
off_t row_offset = entry->row_offset;
bool first = true;
struct perf_hpp_fmt *fmt;
if (current_entry) {
browser->he_selection = entry;
browser->selection = &entry->ms;
}
if (symbol_conf.use_callchain) {
hist_entry__init_have_children(entry);
folded_sign = hist_entry__folded(entry);
}
if (row_offset == 0) {
struct hpp_arg arg = {
.b = &browser->b,
.folded_sign = folded_sign,
.current_entry = current_entry,
};
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
.ptr = &arg,
};
ui_browser__gotorc(&browser->b, row, 0);
perf_hpp__for_each_format(fmt) {
if (!first) {
slsmg_printf(" ");
width -= 2;
}
first = false;
if (fmt->color) {
width -= fmt->color(&hpp, entry);
} else {
width -= fmt->entry(&hpp, entry);
slsmg_printf("%s", s);
}
}
/* The scroll bar isn't being used */
if (!browser->b.navkeypressed)
width += 1;
hist_entry__sort_snprintf(entry, s, sizeof(s), browser->hists);
slsmg_write_nstring(s, width);
++row;
++printed;
} else
--row_offset;
if (folded_sign == '-' && row != browser->b.height) {
printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
1, row, &row_offset,
¤t_entry);
if (current_entry)
browser->he_selection = entry;
}
return printed;
}
static void ui_browser__hists_init_top(struct ui_browser *browser)
{
if (browser->top == NULL) {
struct hist_browser *hb;
hb = container_of(browser, struct hist_browser, b);
browser->top = rb_first(&hb->hists->entries);
}
}
static unsigned int hist_browser__refresh(struct ui_browser *browser)
{
unsigned row = 0;
struct rb_node *nd;
struct hist_browser *hb = container_of(browser, struct hist_browser, b);
ui_browser__hists_init_top(browser);
for (nd = browser->top; nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (h->filtered)
continue;
row += hist_browser__show_entry(hb, h, row);
if (row == browser->height)
break;
}
return row;
}
static struct rb_node *hists__filter_entries(struct rb_node *nd)
{
while (nd != NULL) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (!h->filtered)
return nd;
nd = rb_next(nd);
}
return NULL;
}
static struct rb_node *hists__filter_prev_entries(struct rb_node *nd)
{
while (nd != NULL) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
if (!h->filtered)
return nd;
nd = rb_prev(nd);
}
return NULL;
}
static void ui_browser__hists_seek(struct ui_browser *browser,
off_t offset, int whence)
{
struct hist_entry *h;
struct rb_node *nd;
bool first = true;
if (browser->nr_entries == 0)
return;
ui_browser__hists_init_top(browser);
switch (whence) {
case SEEK_SET:
nd = hists__filter_entries(rb_first(browser->entries));
break;
case SEEK_CUR:
nd = browser->top;
goto do_offset;
case SEEK_END:
nd = hists__filter_prev_entries(rb_last(browser->entries));
first = false;
break;
default:
return;
}
/*
* Moves not relative to the first visible entry invalidates its
* row_offset:
*/
h = rb_entry(browser->top, struct hist_entry, rb_node);
h->row_offset = 0;
/*
* Here we have to check if nd is expanded (+), if it is we can't go
* the next top level hist_entry, instead we must compute an offset of
* what _not_ to show and not change the first visible entry.
*
* This offset increments when we are going from top to bottom and
* decreases when we're going from bottom to top.
*
* As we don't have backpointers to the top level in the callchains
* structure, we need to always print the whole hist_entry callchain,
* skipping the first ones that are before the first visible entry
* and stop when we printed enough lines to fill the screen.
*/
do_offset:
if (offset > 0) {
do {
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->ms.unfolded) {
u16 remaining = h->nr_rows - h->row_offset;
if (offset > remaining) {
offset -= remaining;
h->row_offset = 0;
} else {
h->row_offset += offset;
offset = 0;
browser->top = nd;
break;
}
}
nd = hists__filter_entries(rb_next(nd));
if (nd == NULL)
break;
--offset;
browser->top = nd;
} while (offset != 0);
} else if (offset < 0) {
while (1) {
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->ms.unfolded) {
if (first) {
if (-offset > h->row_offset) {
offset += h->row_offset;
h->row_offset = 0;
} else {
h->row_offset += offset;
offset = 0;
browser->top = nd;
break;
}
} else {
if (-offset > h->nr_rows) {
offset += h->nr_rows;
h->row_offset = 0;
} else {
h->row_offset = h->nr_rows + offset;
offset = 0;
browser->top = nd;
break;
}
}
}
nd = hists__filter_prev_entries(rb_prev(nd));
if (nd == NULL)
break;
++offset;
browser->top = nd;
if (offset == 0) {
/*
* Last unfiltered hist_entry, check if it is
* unfolded, if it is then we should have
* row_offset at its last entry.
*/
h = rb_entry(nd, struct hist_entry, rb_node);
if (h->ms.unfolded)
h->row_offset = h->nr_rows;
break;
}
first = false;
}
} else {
browser->top = nd;
h = rb_entry(nd, struct hist_entry, rb_node);
h->row_offset = 0;
}
}
static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *browser,
struct callchain_node *chain_node,
u64 total, int level,
FILE *fp)
{
struct rb_node *node;
int offset = level * LEVEL_OFFSET_STEP;
u64 new_total, remaining;
int printed = 0;
if (callchain_param.mode == CHAIN_GRAPH_REL)
new_total = chain_node->children_hit;
else
new_total = total;
remaining = new_total;
node = rb_first(&chain_node->rb_root);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
struct rb_node *next = rb_next(node);
u64 cumul = callchain_cumul_hits(child);
struct callchain_list *chain;
char folded_sign = ' ';
int first = true;
int extra_offset = 0;
remaining -= cumul;
list_for_each_entry(chain, &child->val, list) {
char bf[1024], *alloc_str;
const char *str;
bool was_first = first;
if (first)
first = false;
else
extra_offset = LEVEL_OFFSET_STEP;
folded_sign = callchain_list__folded(chain);
alloc_str = NULL;
str = callchain_list__sym_name(chain, bf, sizeof(bf),
browser->show_dso);
if (was_first) {
double percent = cumul * 100.0 / new_total;
if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
str = "Not enough memory!";
else
str = alloc_str;
}
printed += fprintf(fp, "%*s%c %s\n", offset + extra_offset, " ", folded_sign, str);
free(alloc_str);
if (folded_sign == '+')
break;
}
if (folded_sign == '-') {
const int new_level = level + (extra_offset ? 2 : 1);
printed += hist_browser__fprintf_callchain_node_rb_tree(browser, child, new_total,
new_level, fp);
}
node = next;
}
return printed;
}
static int hist_browser__fprintf_callchain_node(struct hist_browser *browser,
struct callchain_node *node,
int level, FILE *fp)
{
struct callchain_list *chain;
int offset = level * LEVEL_OFFSET_STEP;
char folded_sign = ' ';
int printed = 0;
list_for_each_entry(chain, &node->val, list) {
char bf[1024], *s;
folded_sign = callchain_list__folded(chain);
s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso);
printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s);
}
if (folded_sign == '-')
printed += hist_browser__fprintf_callchain_node_rb_tree(browser, node,
browser->hists->stats.total_period,
level + 1, fp);
return printed;
}
static int hist_browser__fprintf_callchain(struct hist_browser *browser,
struct rb_root *chain, int level, FILE *fp)
{
struct rb_node *nd;
int printed = 0;
for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
printed += hist_browser__fprintf_callchain_node(browser, node, level, fp);
}
return printed;
}
static int hist_browser__fprintf_entry(struct hist_browser *browser,
struct hist_entry *he, FILE *fp)
{
char s[8192];
double percent;
int printed = 0;
char folded_sign = ' ';
if (symbol_conf.use_callchain)
folded_sign = hist_entry__folded(he);
hist_entry__sort_snprintf(he, s, sizeof(s), browser->hists);
percent = (he->stat.period * 100.0) / browser->hists->stats.total_period;
if (symbol_conf.use_callchain)
printed += fprintf(fp, "%c ", folded_sign);
printed += fprintf(fp, " %5.2f%%", percent);
if (symbol_conf.show_nr_samples)
printed += fprintf(fp, " %11u", he->stat.nr_events);
if (symbol_conf.show_total_period)
printed += fprintf(fp, " %12" PRIu64, he->stat.period);
printed += fprintf(fp, "%s\n", rtrim(s));
if (folded_sign == '-')
printed += hist_browser__fprintf_callchain(browser, &he->sorted_chain, 1, fp);
return printed;
}
static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp)
{
struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries));
int printed = 0;
while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
printed += hist_browser__fprintf_entry(browser, h, fp);
nd = hists__filter_entries(rb_next(nd));
}
return printed;
}
static int hist_browser__dump(struct hist_browser *browser)
{
char filename[64];
FILE *fp;
while (1) {
scnprintf(filename, sizeof(filename), "perf.hist.%d", browser->print_seq);
if (access(filename, F_OK))
break;
/*
* XXX: Just an arbitrary lazy upper limit
*/
if (++browser->print_seq == 8192) {
ui_helpline__fpush("Too many perf.hist.N files, nothing written!");
return -1;
}
}
fp = fopen(filename, "w");
if (fp == NULL) {
char bf[64];
const char *err = strerror_r(errno, bf, sizeof(bf));
ui_helpline__fpush("Couldn't write to %s: %s", filename, err);
return -1;
}
++browser->print_seq;
hist_browser__fprintf(browser, fp);
fclose(fp);
ui_helpline__fpush("%s written!", filename);
return 0;
}
static struct hist_browser *hist_browser__new(struct hists *hists)
{
struct hist_browser *browser = zalloc(sizeof(*browser));
if (browser) {
browser->hists = hists;
browser->b.refresh = hist_browser__refresh;
browser->b.seek = ui_browser__hists_seek;
browser->b.use_navkeypressed = true;
if (sort__branch_mode == 1)
browser->has_symbols = sort_sym_from.list.next != NULL;
else
browser->has_symbols = sort_sym.list.next != NULL;
}
return browser;
}
static void hist_browser__delete(struct hist_browser *browser)
{
free(browser);
}
static struct hist_entry *hist_browser__selected_entry(struct hist_browser *browser)
{
return browser->he_selection;
}
static struct thread *hist_browser__selected_thread(struct hist_browser *browser)
{
return browser->he_selection->thread;
}
static int hists__browser_title(struct hists *hists, char *bf, size_t size,
const char *ev_name)
{
char unit;
int printed;
const struct dso *dso = hists->dso_filter;
const struct thread *thread = hists->thread_filter;
unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
u64 nr_events = hists->stats.total_period;
struct perf_evsel *evsel = hists_to_evsel(hists);
char buf[512];
size_t buflen = sizeof(buf);
if (perf_evsel__is_group_event(evsel)) {
struct perf_evsel *pos;
perf_evsel__group_desc(evsel, buf, buflen);
ev_name = buf;
for_each_group_member(pos, evsel) {
nr_samples += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
nr_events += pos->hists.stats.total_period;
}
}
nr_samples = convert_unit(nr_samples, &unit);
printed = scnprintf(bf, size,
"Samples: %lu%c of event '%s', Event count (approx.): %lu",
nr_samples, unit, ev_name, nr_events);
if (hists->uid_filter_str)
printed += snprintf(bf + printed, size - printed,
", UID: %s", hists->uid_filter_str);
if (thread)
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
(thread->comm_set ? thread->comm : ""),
thread->pid);
if (dso)
printed += scnprintf(bf + printed, size - printed,
", DSO: %s", dso->short_name);
return printed;
}
static inline void free_popup_options(char **options, int n)
{
int i;
for (i = 0; i < n; ++i) {
free(options[i]);
options[i] = NULL;
}
}
/* Check whether the browser is for 'top' or 'report' */
static inline bool is_report_browser(void *timer)
{
return timer == NULL;
}
/*
* Only runtime switching of perf data file will make "input_name" point
* to a malloced buffer. So add "is_input_name_malloced" flag to decide
* whether we need to call free() for current "input_name" during the switch.
*/
static bool is_input_name_malloced = false;
static int switch_data_file(void)
{
char *pwd, *options[32], *abs_path[32], *tmp;
DIR *pwd_dir;
int nr_options = 0, choice = -1, ret = -1;
struct dirent *dent;
pwd = getenv("PWD");
if (!pwd)
return ret;
pwd_dir = opendir(pwd);
if (!pwd_dir)
return ret;
memset(options, 0, sizeof(options));
memset(options, 0, sizeof(abs_path));
while ((dent = readdir(pwd_dir))) {
char path[PATH_MAX];
u64 magic;
char *name = dent->d_name;
FILE *file;
if (!(dent->d_type == DT_REG))
continue;
snprintf(path, sizeof(path), "%s/%s", pwd, name);
file = fopen(path, "r");
if (!file)
continue;
if (fread(&magic, 1, 8, file) < 8)
goto close_file_and_continue;
if (is_perf_magic(magic)) {
options[nr_options] = strdup(name);
if (!options[nr_options])
goto close_file_and_continue;
abs_path[nr_options] = strdup(path);
if (!abs_path[nr_options]) {
free(options[nr_options]);
ui__warning("Can't search all data files due to memory shortage.\n");
fclose(file);
break;
}
nr_options++;
}
close_file_and_continue:
fclose(file);
if (nr_options >= 32) {
ui__warning("Too many perf data files in PWD!\n"
"Only the first 32 files will be listed.\n");
break;
}
}
closedir(pwd_dir);
if (nr_options) {
choice = ui__popup_menu(nr_options, options);
if (choice < nr_options && choice >= 0) {
tmp = strdup(abs_path[choice]);
if (tmp) {
if (is_input_name_malloced)
free((void *)input_name);
input_name = tmp;
is_input_name_malloced = true;
ret = 0;
} else
ui__warning("Data switch failed due to memory shortage!\n");
}
}
free_popup_options(options, nr_options);
free_popup_options(abs_path, nr_options);
return ret;
}
static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
const char *helpline, const char *ev_name,
bool left_exits,
struct hist_browser_timer *hbt,
struct perf_session_env *env)
{
struct hists *hists = &evsel->hists;
struct hist_browser *browser = hist_browser__new(hists);
struct branch_info *bi;
struct pstack *fstack;
char *options[16];
int nr_options = 0;
int key = -1;
char buf[64];
char script_opt[64];
int delay_secs = hbt ? hbt->refresh : 0;
if (browser == NULL)
return -1;
fstack = pstack__new(2);
if (fstack == NULL)
goto out;
ui_helpline__push(helpline);
memset(options, 0, sizeof(options));
while (1) {
const struct thread *thread = NULL;
const struct dso *dso = NULL;
int choice = 0,
annotate = -2, zoom_dso = -2, zoom_thread = -2,
annotate_f = -2, annotate_t = -2, browse_map = -2;
int scripts_comm = -2, scripts_symbol = -2,
scripts_all = -2, switch_data = -2;
nr_options = 0;
key = hist_browser__run(browser, ev_name, hbt);
if (browser->he_selection != NULL) {
thread = hist_browser__selected_thread(browser);
dso = browser->selection->map ? browser->selection->map->dso : NULL;
}
switch (key) {
case K_TAB:
case K_UNTAB:
if (nr_events == 1)
continue;
/*
* Exit the browser, let hists__browser_tree
* go to the next or previous
*/
goto out_free_stack;
case 'a':
if (!browser->has_symbols) {
ui_browser__warning(&browser->b, delay_secs * 2,
"Annotation is only available for symbolic views, "
"include \"sym*\" in --sort to use it.");
continue;
}
if (browser->selection == NULL ||
browser->selection->sym == NULL ||
browser->selection->map->dso->annotate_warned)
continue;
goto do_annotate;
case 'P':
hist_browser__dump(browser);
continue;
case 'd':
goto zoom_dso;
case 'V':
browser->show_dso = !browser->show_dso;
continue;
case 't':
goto zoom_thread;
case '/':
if (ui_browser__input_window("Symbol to show",
"Please enter the name of symbol you want to see",
buf, "ENTER: OK, ESC: Cancel",
delay_secs * 2) == K_ENTER) {
hists->symbol_filter_str = *buf ? buf : NULL;
hists__filter_by_symbol(hists);
hist_browser__reset(browser);
}
continue;
case 'r':
if (is_report_browser(hbt))
goto do_scripts;
continue;
case 's':
if (is_report_browser(hbt))
goto do_data_switch;
continue;
case K_F1:
case 'h':
case '?':
ui_browser__help_window(&browser->b,
"h/?/F1 Show this window\n"
"UP/DOWN/PGUP\n"
"PGDN/SPACE Navigate\n"
"q/ESC/CTRL+C Exit browser\n\n"
"For multiple event sessions:\n\n"
"TAB/UNTAB Switch events\n\n"
"For symbolic views (--sort has sym):\n\n"
"-> Zoom into DSO/Threads & Annotate current symbol\n"
"<- Zoom out\n"
"a Annotate current symbol\n"
"C Collapse all callchains\n"
"E Expand all callchains\n"
"d Zoom into current DSO\n"
"t Zoom into current Thread\n"
"r Run available scripts('perf report' only)\n"
"s Switch to another data file in PWD ('perf report' only)\n"
"P Print histograms to perf.hist.N\n"
"V Verbose (DSO names in callchains, etc)\n"
"/ Filter symbol by name");
continue;
case K_ENTER:
case K_RIGHT:
/* menu */
break;
case K_LEFT: {
const void *top;
if (pstack__empty(fstack)) {
/*
* Go back to the perf_evsel_menu__run or other user
*/
if (left_exits)
goto out_free_stack;
continue;
}
top = pstack__pop(fstack);
if (top == &browser->hists->dso_filter)
goto zoom_out_dso;
if (top == &browser->hists->thread_filter)
goto zoom_out_thread;
continue;
}
case K_ESC:
if (!left_exits &&
!ui_browser__dialog_yesno(&browser->b,
"Do you really want to exit?"))
continue;
/* Fall thru */
case 'q':
case CTRL('c'):
goto out_free_stack;
default:
continue;
}
if (!browser->has_symbols)
goto add_exit_option;
if (sort__branch_mode == 1) {
bi = browser->he_selection->branch_info;
if (browser->selection != NULL &&
bi &&
bi->from.sym != NULL &&
!bi->from.map->dso->annotate_warned &&
asprintf(&options[nr_options], "Annotate %s",
bi->from.sym->name) > 0)
annotate_f = nr_options++;
if (browser->selection != NULL &&
bi &&
bi->to.sym != NULL &&
!bi->to.map->dso->annotate_warned &&
(bi->to.sym != bi->from.sym ||
bi->to.map->dso != bi->from.map->dso) &&
asprintf(&options[nr_options], "Annotate %s",
bi->to.sym->name) > 0)
annotate_t = nr_options++;
} else {
if (browser->selection != NULL &&
browser->selection->sym != NULL &&
!browser->selection->map->dso->annotate_warned &&
asprintf(&options[nr_options], "Annotate %s",
browser->selection->sym->name) > 0)
annotate = nr_options++;
}
if (thread != NULL &&
asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
(browser->hists->thread_filter ? "out of" : "into"),
(thread->comm_set ? thread->comm : ""),
thread->pid) > 0)
zoom_thread = nr_options++;
if (dso != NULL &&
asprintf(&options[nr_options], "Zoom %s %s DSO",
(browser->hists->dso_filter ? "out of" : "into"),
(dso->kernel ? "the Kernel" : dso->short_name)) > 0)
zoom_dso = nr_options++;
if (browser->selection != NULL &&
browser->selection->map != NULL &&
asprintf(&options[nr_options], "Browse map details") > 0)
browse_map = nr_options++;
/* perf script support */
if (browser->he_selection) {
struct symbol *sym;
if (asprintf(&options[nr_options], "Run scripts for samples of thread [%s]",
browser->he_selection->thread->comm) > 0)
scripts_comm = nr_options++;
sym = browser->he_selection->ms.sym;
if (sym && sym->namelen &&
asprintf(&options[nr_options], "Run scripts for samples of symbol [%s]",
sym->name) > 0)
scripts_symbol = nr_options++;
}
if (asprintf(&options[nr_options], "Run scripts for all samples") > 0)
scripts_all = nr_options++;
if (is_report_browser(hbt) && asprintf(&options[nr_options],
"Switch to another data file in PWD") > 0)
switch_data = nr_options++;
add_exit_option:
options[nr_options++] = (char *)"Exit";
retry_popup_menu:
choice = ui__popup_menu(nr_options, options);
if (choice == nr_options - 1)
break;
if (choice == -1) {
free_popup_options(options, nr_options - 1);
continue;
}
if (choice == annotate || choice == annotate_t || choice == annotate_f) {
struct hist_entry *he;
int err;
do_annotate:
if (!objdump_path && perf_session_env__lookup_objdump(env))
continue;
he = hist_browser__selected_entry(browser);
if (he == NULL)
continue;
/*
* we stash the branch_info symbol + map into the
* the ms so we don't have to rewrite all the annotation
* code to use branch_info.
* in branch mode, the ms struct is not used
*/
if (choice == annotate_f) {
he->ms.sym = he->branch_info->from.sym;
he->ms.map = he->branch_info->from.map;
} else if (choice == annotate_t) {
he->ms.sym = he->branch_info->to.sym;
he->ms.map = he->branch_info->to.map;
}
/*
* Don't let this be freed, say, by hists__decay_entry.
*/
he->used = true;
err = hist_entry__tui_annotate(he, evsel, hbt);
he->used = false;
/*
* offer option to annotate the other branch source or target
* (if they exists) when returning from annotate
*/
if ((err == 'q' || err == CTRL('c'))
&& annotate_t != -2 && annotate_f != -2)
goto retry_popup_menu;
ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
if (err)
ui_browser__handle_resize(&browser->b);
} else if (choice == browse_map)
map__browse(browser->selection->map);
else if (choice == zoom_dso) {
zoom_dso:
if (browser->hists->dso_filter) {
pstack__remove(fstack, &browser->hists->dso_filter);
zoom_out_dso:
ui_helpline__pop();
browser->hists->dso_filter = NULL;
sort_dso.elide = false;
} else {
if (dso == NULL)
continue;
ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
dso->kernel ? "the Kernel" : dso->short_name);
browser->hists->dso_filter = dso;
sort_dso.elide = true;
pstack__push(fstack, &browser->hists->dso_filter);
}
hists__filter_by_dso(hists);
hist_browser__reset(browser);
} else if (choice == zoom_thread) {
zoom_thread:
if (browser->hists->thread_filter) {
pstack__remove(fstack, &browser->hists->thread_filter);
zoom_out_thread:
ui_helpline__pop();
browser->hists->thread_filter = NULL;
sort_thread.elide = false;
} else {
ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
thread->comm_set ? thread->comm : "",
thread->pid);
browser->hists->thread_filter = thread;
sort_thread.elide = true;
pstack__push(fstack, &browser->hists->thread_filter);
}
hists__filter_by_thread(hists);
hist_browser__reset(browser);
}
/* perf scripts support */
else if (choice == scripts_all || choice == scripts_comm ||
choice == scripts_symbol) {
do_scripts:
memset(script_opt, 0, 64);
if (choice == scripts_comm)
sprintf(script_opt, " -c %s ", browser->he_selection->thread->comm);
if (choice == scripts_symbol)
sprintf(script_opt, " -S %s ", browser->he_selection->ms.sym->name);
script_browse(script_opt);
}
/* Switch to another data file */
else if (choice == switch_data) {
do_data_switch:
if (!switch_data_file()) {
key = K_SWITCH_INPUT_DATA;
break;
} else
ui__warning("Won't switch the data files due to\n"
"no valid data file get selected!\n");
}
}
out_free_stack:
pstack__delete(fstack);
out:
hist_browser__delete(browser);
free_popup_options(options, nr_options - 1);
return key;
}
struct perf_evsel_menu {
struct ui_browser b;
struct perf_evsel *selection;
bool lost_events, lost_events_warned;
struct perf_session_env *env;
};
static void perf_evsel_menu__write(struct ui_browser *browser,
void *entry, int row)
{
struct perf_evsel_menu *menu = container_of(browser,
struct perf_evsel_menu, b);
struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
bool current_entry = ui_browser__is_current_entry(browser, row);
unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
const char *ev_name = perf_evsel__name(evsel);
char bf[256], unit;
const char *warn = " ";
size_t printed;
ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
HE_COLORSET_NORMAL);
if (perf_evsel__is_group_event(evsel)) {
struct perf_evsel *pos;
ev_name = perf_evsel__group_name(evsel);
for_each_group_member(pos, evsel) {
nr_events += pos->hists.stats.nr_events[PERF_RECORD_SAMPLE];
}
}
nr_events = convert_unit(nr_events, &unit);
printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
unit, unit == ' ' ? "" : " ", ev_name);
slsmg_printf("%s", bf);
nr_events = evsel->hists.stats.nr_events[PERF_RECORD_LOST];
if (nr_events != 0) {
menu->lost_events = true;
if (!current_entry)
ui_browser__set_color(browser, HE_COLORSET_TOP);
nr_events = convert_unit(nr_events, &unit);
printed += scnprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!",
nr_events, unit, unit == ' ' ? "" : " ");
warn = bf;
}
slsmg_write_nstring(warn, browser->width - printed);
if (current_entry)
menu->selection = evsel;
}
static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
int nr_events, const char *help,
struct hist_browser_timer *hbt)
{
struct perf_evlist *evlist = menu->b.priv;
struct perf_evsel *pos;
const char *ev_name, *title = "Available samples";
int delay_secs = hbt ? hbt->refresh : 0;
int key;
if (ui_browser__show(&menu->b, title,
"ESC: exit, ENTER|->: Browse histograms") < 0)
return -1;
while (1) {
key = ui_browser__run(&menu->b, delay_secs);
switch (key) {
case K_TIMER:
hbt->timer(hbt->arg);
if (!menu->lost_events_warned && menu->lost_events) {
ui_browser__warn_lost_events(&menu->b);
menu->lost_events_warned = true;
}
continue;
case K_RIGHT:
case K_ENTER:
if (!menu->selection)
continue;
pos = menu->selection;
browse_hists:
perf_evlist__set_selected(evlist, pos);
/*
* Give the calling tool a chance to populate the non
* default evsel resorted hists tree.
*/
if (hbt)
hbt->timer(hbt->arg);
ev_name = perf_evsel__name(pos);
key = perf_evsel__hists_browse(pos, nr_events, help,
ev_name, true, hbt,
menu->env);
ui_browser__show_title(&menu->b, title);
switch (key) {
case K_TAB:
if (pos->node.next == &evlist->entries)
pos = list_entry(evlist->entries.next, struct perf_evsel, node);
else
pos = list_entry(pos->node.next, struct perf_evsel, node);
goto browse_hists;
case K_UNTAB:
if (pos->node.prev == &evlist->entries)
pos = list_entry(evlist->entries.prev, struct perf_evsel, node);
else
pos = list_entry(pos->node.prev, struct perf_evsel, node);
goto browse_hists;
case K_ESC:
if (!ui_browser__dialog_yesno(&menu->b,
"Do you really want to exit?"))
continue;
/* Fall thru */
case K_SWITCH_INPUT_DATA:
case 'q':
case CTRL('c'):
goto out;
default:
continue;
}
case K_LEFT:
continue;
case K_ESC:
if (!ui_browser__dialog_yesno(&menu->b,
"Do you really want to exit?"))
continue;
/* Fall thru */
case 'q':
case CTRL('c'):
goto out;
default:
continue;
}
}
out:
ui_browser__hide(&menu->b);
return key;
}
static bool filter_group_entries(struct ui_browser *self __maybe_unused,
void *entry)
{
struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
if (symbol_conf.event_group && !perf_evsel__is_group_leader(evsel))
return true;
return false;
}
static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
int nr_entries, const char *help,
struct hist_browser_timer *hbt,
struct perf_session_env *env)
{
struct perf_evsel *pos;
struct perf_evsel_menu menu = {
.b = {
.entries = &evlist->entries,
.refresh = ui_browser__list_head_refresh,
.seek = ui_browser__list_head_seek,
.write = perf_evsel_menu__write,
.filter = filter_group_entries,
.nr_entries = nr_entries,
.priv = evlist,
},
.env = env,
};
ui_helpline__push("Press ESC to exit");
list_for_each_entry(pos, &evlist->entries, node) {
const char *ev_name = perf_evsel__name(pos);
size_t line_len = strlen(ev_name) + 7;
if (menu.b.width < line_len)
menu.b.width = line_len;
}
return perf_evsel_menu__run(&menu, nr_entries, help, hbt);
}
int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
struct hist_browser_timer *hbt,
struct perf_session_env *env)
{
int nr_entries = evlist->nr_entries;
single_entry:
if (nr_entries == 1) {
struct perf_evsel *first = list_entry(evlist->entries.next,
struct perf_evsel, node);
const char *ev_name = perf_evsel__name(first);
return perf_evsel__hists_browse(first, nr_entries, help,
ev_name, false, hbt, env);
}
if (symbol_conf.event_group) {
struct perf_evsel *pos;
nr_entries = 0;
list_for_each_entry(pos, &evlist->entries, node)
if (perf_evsel__is_group_leader(pos))
nr_entries++;
if (nr_entries == 1)
goto single_entry;
}
return __perf_evlist__tui_browse_hists(evlist, nr_entries, help,
hbt, env);
}
| gpl-2.0 |
BoyGau/linux | sound/pci/lola/lola_clock.c | 2074 | 8216 | /*
* Support for Digigram Lola PCI-e boards
*
* Copyright (c) 2011 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "lola.h"
unsigned int lola_sample_rate_convert(unsigned int coded)
{
unsigned int freq;
/* base frequency */
switch (coded & 0x3) {
case 0: freq = 48000; break;
case 1: freq = 44100; break;
case 2: freq = 32000; break;
default: return 0; /* error */
}
/* multiplier / devisor */
switch (coded & 0x1c) {
case (0 << 2): break;
case (4 << 2): break;
case (1 << 2): freq *= 2; break;
case (2 << 2): freq *= 4; break;
case (5 << 2): freq /= 2; break;
case (6 << 2): freq /= 4; break;
default: return 0; /* error */
}
/* ajustement */
switch (coded & 0x60) {
case (0 << 5): break;
case (1 << 5): freq = (freq * 999) / 1000; break;
case (2 << 5): freq = (freq * 1001) / 1000; break;
default: return 0; /* error */
}
return freq;
}
/*
* Granualrity
*/
#define LOLA_MAXFREQ_AT_GRANULARITY_MIN 48000
#define LOLA_MAXFREQ_AT_GRANULARITY_BELOW_MAX 96000
static bool check_gran_clock_compatibility(struct lola *chip,
unsigned int val,
unsigned int freq)
{
if (!chip->granularity)
return true;
if (val < LOLA_GRANULARITY_MIN || val > LOLA_GRANULARITY_MAX ||
(val % LOLA_GRANULARITY_STEP) != 0)
return false;
if (val == LOLA_GRANULARITY_MIN) {
if (freq > LOLA_MAXFREQ_AT_GRANULARITY_MIN)
return false;
} else if (val < LOLA_GRANULARITY_MAX) {
if (freq > LOLA_MAXFREQ_AT_GRANULARITY_BELOW_MAX)
return false;
}
return true;
}
int lola_set_granularity(struct lola *chip, unsigned int val, bool force)
{
int err;
if (!force) {
if (val == chip->granularity)
return 0;
#if 0
/* change Gran only if there are no streams allocated ! */
if (chip->audio_in_alloc_mask || chip->audio_out_alloc_mask)
return -EBUSY;
#endif
if (!check_gran_clock_compatibility(chip, val,
chip->clock.cur_freq))
return -EINVAL;
}
chip->granularity = val;
val /= LOLA_GRANULARITY_STEP;
/* audio function group */
err = lola_codec_write(chip, 1, LOLA_VERB_SET_GRANULARITY_STEPS,
val, 0);
if (err < 0)
return err;
/* this can be a very slow function !!! */
usleep_range(400 * val, 20000);
return lola_codec_flush(chip);
}
/*
* Clock widget handling
*/
int lola_init_clock_widget(struct lola *chip, int nid)
{
unsigned int val;
int i, j, nitems, nb_verbs, idx, idx_list;
int err;
err = lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val);
if (err < 0) {
dev_err(chip->card->dev, "Can't read wcaps for 0x%x\n", nid);
return err;
}
if ((val & 0xfff00000) != 0x01f00000) { /* test SubType and Type */
dev_dbg(chip->card->dev, "No valid clock widget\n");
return 0;
}
chip->clock.nid = nid;
chip->clock.items = val & 0xff;
dev_dbg(chip->card->dev, "clock_list nid=%x, entries=%d\n", nid,
chip->clock.items);
if (chip->clock.items > MAX_SAMPLE_CLOCK_COUNT) {
dev_err(chip->card->dev, "CLOCK_LIST too big: %d\n",
chip->clock.items);
return -EINVAL;
}
nitems = chip->clock.items;
nb_verbs = (nitems + 3) / 4;
idx = 0;
idx_list = 0;
for (i = 0; i < nb_verbs; i++) {
unsigned int res_ex;
unsigned short items[4];
err = lola_codec_read(chip, nid, LOLA_VERB_GET_CLOCK_LIST,
idx, 0, &val, &res_ex);
if (err < 0) {
dev_err(chip->card->dev, "Can't read CLOCK_LIST\n");
return -EINVAL;
}
items[0] = val & 0xfff;
items[1] = (val >> 16) & 0xfff;
items[2] = res_ex & 0xfff;
items[3] = (res_ex >> 16) & 0xfff;
for (j = 0; j < 4; j++) {
unsigned char type = items[j] >> 8;
unsigned int freq = items[j] & 0xff;
int format = LOLA_CLOCK_FORMAT_NONE;
bool add_clock = true;
if (type == LOLA_CLOCK_TYPE_INTERNAL) {
freq = lola_sample_rate_convert(freq);
if (freq < chip->sample_rate_min)
add_clock = false;
else if (freq == 48000) {
chip->clock.cur_index = idx_list;
chip->clock.cur_freq = 48000;
chip->clock.cur_valid = true;
}
} else if (type == LOLA_CLOCK_TYPE_VIDEO) {
freq = lola_sample_rate_convert(freq);
if (freq < chip->sample_rate_min)
add_clock = false;
/* video clock has a format (0:NTSC, 1:PAL)*/
if (items[j] & 0x80)
format = LOLA_CLOCK_FORMAT_NTSC;
else
format = LOLA_CLOCK_FORMAT_PAL;
}
if (add_clock) {
struct lola_sample_clock *sc;
sc = &chip->clock.sample_clock[idx_list];
sc->type = type;
sc->format = format;
sc->freq = freq;
/* keep the index used with the board */
chip->clock.idx_lookup[idx_list] = idx;
idx_list++;
} else {
chip->clock.items--;
}
if (++idx >= nitems)
break;
}
}
return 0;
}
/* enable unsolicited events of the clock widget */
int lola_enable_clock_events(struct lola *chip)
{
unsigned int res;
int err;
err = lola_codec_read(chip, chip->clock.nid,
LOLA_VERB_SET_UNSOLICITED_ENABLE,
LOLA_UNSOLICITED_ENABLE | LOLA_UNSOLICITED_TAG,
0, &res, NULL);
if (err < 0)
return err;
if (res) {
dev_warn(chip->card->dev, "error in enable_clock_events %d\n",
res);
return -EINVAL;
}
return 0;
}
int lola_set_clock_index(struct lola *chip, unsigned int idx)
{
unsigned int res;
int err;
err = lola_codec_read(chip, chip->clock.nid,
LOLA_VERB_SET_CLOCK_SELECT,
chip->clock.idx_lookup[idx],
0, &res, NULL);
if (err < 0)
return err;
if (res) {
dev_warn(chip->card->dev, "error in set_clock %d\n", res);
return -EINVAL;
}
return 0;
}
bool lola_update_ext_clock_freq(struct lola *chip, unsigned int val)
{
unsigned int tag;
/* the current EXTERNAL clock information gets updated by interrupt
* with an unsolicited response
*/
if (!val)
return false;
tag = (val >> LOLA_UNSOL_RESP_TAG_OFFSET) & LOLA_UNSOLICITED_TAG_MASK;
if (tag != LOLA_UNSOLICITED_TAG)
return false;
/* only for current = external clocks */
if (chip->clock.sample_clock[chip->clock.cur_index].type !=
LOLA_CLOCK_TYPE_INTERNAL) {
chip->clock.cur_freq = lola_sample_rate_convert(val & 0x7f);
chip->clock.cur_valid = (val & 0x100) != 0;
}
return true;
}
int lola_set_clock(struct lola *chip, int idx)
{
int freq = 0;
bool valid = false;
if (idx == chip->clock.cur_index) {
/* current clock is allowed */
freq = chip->clock.cur_freq;
valid = chip->clock.cur_valid;
} else if (chip->clock.sample_clock[idx].type ==
LOLA_CLOCK_TYPE_INTERNAL) {
/* internal clocks allowed */
freq = chip->clock.sample_clock[idx].freq;
valid = true;
}
if (!freq || !valid)
return -EINVAL;
if (!check_gran_clock_compatibility(chip, chip->granularity, freq))
return -EINVAL;
if (idx != chip->clock.cur_index) {
int err = lola_set_clock_index(chip, idx);
if (err < 0)
return err;
/* update new settings */
chip->clock.cur_index = idx;
chip->clock.cur_freq = freq;
chip->clock.cur_valid = true;
}
return 0;
}
int lola_set_sample_rate(struct lola *chip, int rate)
{
int i;
if (chip->clock.cur_freq == rate && chip->clock.cur_valid)
return 0;
/* search for new dwClockIndex */
for (i = 0; i < chip->clock.items; i++) {
if (chip->clock.sample_clock[i].type == LOLA_CLOCK_TYPE_INTERNAL &&
chip->clock.sample_clock[i].freq == rate)
break;
}
if (i >= chip->clock.items)
return -EINVAL;
return lola_set_clock(chip, i);
}
| gpl-2.0 |
mahound/bricked-S7275-kernel | fs/hfsplus/wrapper.c | 3354 | 7404 | /*
* linux/fs/hfsplus/wrapper.c
*
* Copyright (C) 2001
* Brad Boyer (flar@allandria.com)
* (C) 2003 Ardis Technologies <roman@ardistech.com>
*
* Handling of HFS wrappers around HFS+ volumes
*/
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/cdrom.h>
#include <linux/genhd.h>
#include <asm/unaligned.h>
#include "hfsplus_fs.h"
#include "hfsplus_raw.h"
struct hfsplus_wd {
u32 ablk_size;
u16 ablk_start;
u16 embed_start;
u16 embed_count;
};
static void hfsplus_end_io_sync(struct bio *bio, int err)
{
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
complete(bio->bi_private);
}
/*
* hfsplus_submit_bio - Perfrom block I/O
* @sb: super block of volume for I/O
* @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
* @buf: buffer for I/O
* @data: output pointer for location of requested data
* @rw: direction of I/O
*
* The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
* HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
* @data will return a pointer to the start of the requested sector,
* which may not be the same location as @buf.
*
* If @sector is not aligned to the bdev logical block size it will
* be rounded down. For writes this means that @buf should contain data
* that starts at the rounded-down address. As long as the data was
* read using hfsplus_submit_bio() and the same buffer is used things
* will work correctly.
*/
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
void *buf, void **data, int rw)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct bio *bio;
int ret = 0;
unsigned int io_size;
loff_t start;
int offset;
/*
* Align sector to hardware sector size and find offset. We
* assume that io_size is a power of two, which _should_
* be true.
*/
io_size = hfsplus_min_io_size(sb);
start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
offset = start & (io_size - 1);
sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = sector;
bio->bi_bdev = sb->s_bdev;
bio->bi_end_io = hfsplus_end_io_sync;
bio->bi_private = &wait;
if (!(rw & WRITE) && data)
*data = (u8 *)buf + offset;
while (io_size > 0) {
unsigned int page_offset = offset_in_page(buf);
unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
io_size);
ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
if (ret != len) {
ret = -EIO;
goto out;
}
io_size -= len;
buf = (u8 *)buf + len;
}
submit_bio(rw, bio);
wait_for_completion(&wait);
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
out:
bio_put(bio);
return ret < 0 ? ret : 0;
}
static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
{
u32 extent;
u16 attrib;
__be16 sig;
sig = *(__be16 *)(bufptr + HFSP_WRAPOFF_EMBEDSIG);
if (sig != cpu_to_be16(HFSPLUS_VOLHEAD_SIG) &&
sig != cpu_to_be16(HFSPLUS_VOLHEAD_SIGX))
return 0;
attrib = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ATTRIB));
if (!(attrib & HFSP_WRAP_ATTRIB_SLOCK) ||
!(attrib & HFSP_WRAP_ATTRIB_SPARED))
return 0;
wd->ablk_size =
be32_to_cpu(*(__be32 *)(bufptr + HFSP_WRAPOFF_ABLKSIZE));
if (wd->ablk_size < HFSPLUS_SECTOR_SIZE)
return 0;
if (wd->ablk_size % HFSPLUS_SECTOR_SIZE)
return 0;
wd->ablk_start =
be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART));
extent = get_unaligned_be32(bufptr + HFSP_WRAPOFF_EMBEDEXT);
wd->embed_start = (extent >> 16) & 0xFFFF;
wd->embed_count = extent & 0xFFFF;
return 1;
}
static int hfsplus_get_last_session(struct super_block *sb,
sector_t *start, sector_t *size)
{
struct cdrom_multisession ms_info;
struct cdrom_tocentry te;
int res;
/* default values */
*start = 0;
*size = sb->s_bdev->bd_inode->i_size >> 9;
if (HFSPLUS_SB(sb)->session >= 0) {
te.cdte_track = HFSPLUS_SB(sb)->session;
te.cdte_format = CDROM_LBA;
res = ioctl_by_bdev(sb->s_bdev,
CDROMREADTOCENTRY, (unsigned long)&te);
if (!res && (te.cdte_ctrl & CDROM_DATA_TRACK) == 4) {
*start = (sector_t)te.cdte_addr.lba << 2;
return 0;
}
printk(KERN_ERR "hfs: invalid session number or type of track\n");
return -EINVAL;
}
ms_info.addr_format = CDROM_LBA;
res = ioctl_by_bdev(sb->s_bdev, CDROMMULTISESSION,
(unsigned long)&ms_info);
if (!res && ms_info.xa_flag)
*start = (sector_t)ms_info.addr.lba << 2;
return 0;
}
/* Find the volume header and fill in some minimum bits in superblock */
/* Takes in super block, returns true if good data read */
int hfsplus_read_wrapper(struct super_block *sb)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_wd wd;
sector_t part_start, part_size;
u32 blocksize;
int error = 0;
error = -EINVAL;
blocksize = sb_min_blocksize(sb, HFSPLUS_SECTOR_SIZE);
if (!blocksize)
goto out;
if (hfsplus_get_last_session(sb, &part_start, &part_size))
goto out;
error = -ENOMEM;
sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
if (!sbi->s_vhdr_buf)
goto out;
sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
if (!sbi->s_backup_vhdr_buf)
goto out_free_vhdr;
reread:
error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
READ);
if (error)
goto out_free_backup_vhdr;
error = -EINVAL;
switch (sbi->s_vhdr->signature) {
case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX):
set_bit(HFSPLUS_SB_HFSX, &sbi->flags);
/*FALLTHRU*/
case cpu_to_be16(HFSPLUS_VOLHEAD_SIG):
break;
case cpu_to_be16(HFSP_WRAP_MAGIC):
if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
goto out_free_backup_vhdr;
wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
part_start += (sector_t)wd.ablk_start +
(sector_t)wd.embed_start * wd.ablk_size;
part_size = (sector_t)wd.embed_count * wd.ablk_size;
goto reread;
default:
/*
* Check for a partition block.
*
* (should do this only for cdrom/loop though)
*/
if (hfs_part_find(sb, &part_start, &part_size))
goto out_free_backup_vhdr;
goto reread;
}
error = hfsplus_submit_bio(sb, part_start + part_size - 2,
sbi->s_backup_vhdr_buf,
(void **)&sbi->s_backup_vhdr, READ);
if (error)
goto out_free_backup_vhdr;
error = -EINVAL;
if (sbi->s_backup_vhdr->signature != sbi->s_vhdr->signature) {
printk(KERN_WARNING
"hfs: invalid secondary volume header\n");
goto out_free_backup_vhdr;
}
blocksize = be32_to_cpu(sbi->s_vhdr->blocksize);
/*
* Block size must be at least as large as a sector and a multiple of 2.
*/
if (blocksize < HFSPLUS_SECTOR_SIZE || ((blocksize - 1) & blocksize))
goto out_free_backup_vhdr;
sbi->alloc_blksz = blocksize;
sbi->alloc_blksz_shift = 0;
while ((blocksize >>= 1) != 0)
sbi->alloc_blksz_shift++;
blocksize = min(sbi->alloc_blksz, (u32)PAGE_SIZE);
/*
* Align block size to block offset.
*/
while (part_start & ((blocksize >> HFSPLUS_SECTOR_SHIFT) - 1))
blocksize >>= 1;
if (sb_set_blocksize(sb, blocksize) != blocksize) {
printk(KERN_ERR "hfs: unable to set blocksize to %u!\n",
blocksize);
goto out_free_backup_vhdr;
}
sbi->blockoffset =
part_start >> (sb->s_blocksize_bits - HFSPLUS_SECTOR_SHIFT);
sbi->part_start = part_start;
sbi->sect_count = part_size;
sbi->fs_shift = sbi->alloc_blksz_shift - sb->s_blocksize_bits;
return 0;
out_free_backup_vhdr:
kfree(sbi->s_backup_vhdr_buf);
out_free_vhdr:
kfree(sbi->s_vhdr_buf);
out:
return error;
}
| gpl-2.0 |
TeaMSeven/android_kernel_htc_m7-sense | drivers/target/iscsi/iscsi_target_auth.c | 3610 | 11786 | /*******************************************************************************
* This file houses the main functions for the iSCSI CHAP support
*
* \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
#include "iscsi_target_core.h"
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
int j = DIV_ROUND_UP(len, 2), rc;
rc = hex2bin(dst, src, j);
if (rc < 0)
pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
}
static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
{
int i;
for (i = 0; i < src_len; i++) {
sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
}
}
static void chap_set_random(char *data, int length)
{
long r;
unsigned n;
while (length > 0) {
get_random_bytes(&r, sizeof(long));
r = r ^ (r >> 8);
r = r ^ (r >> 4);
n = r & 0x7;
get_random_bytes(&r, sizeof(long));
r = r ^ (r >> 8);
r = r ^ (r >> 5);
n = (n << 3) | (r & 0x7);
get_random_bytes(&r, sizeof(long));
r = r ^ (r >> 8);
r = r ^ (r >> 5);
n = (n << 2) | (r & 0x3);
*data++ = n;
length--;
}
}
static void chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
char *c_str,
unsigned int *c_len)
{
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
struct iscsi_chap *chap = conn->auth_protocol;
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
chap_set_random(chap->challenge, CHAP_CHALLENGE_LENGTH);
chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
*/
*c_len += sprintf(c_str + *c_len, "CHAP_C=0x%s", challenge_asciihex);
*c_len += 1;
pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client",
challenge_asciihex);
}
static struct iscsi_chap *chap_server_open(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
const char *a_str,
char *aic_str,
unsigned int *aic_len)
{
struct iscsi_chap *chap;
if (!(auth->naf_flags & NAF_USERID_SET) ||
!(auth->naf_flags & NAF_PASSWORD_SET)) {
pr_err("CHAP user or password not set for"
" Initiator ACL\n");
return NULL;
}
conn->auth_protocol = kzalloc(sizeof(struct iscsi_chap), GFP_KERNEL);
if (!conn->auth_protocol)
return NULL;
chap = conn->auth_protocol;
/*
* We only support MD5 MDA presently.
*/
if (strncmp(a_str, "CHAP_A=5", 8)) {
pr_err("CHAP_A is not MD5.\n");
return NULL;
}
pr_debug("[server] Got CHAP_A=5\n");
/*
* Send back CHAP_A set to MD5.
*/
*aic_len = sprintf(aic_str, "CHAP_A=5");
*aic_len += 1;
chap->digest_type = CHAP_DIGEST_MD5;
pr_debug("[server] Sending CHAP_A=%d\n", chap->digest_type);
/*
* Set Identifier.
*/
chap->id = ISCSI_TPG_C(conn)->tpg_chap_id++;
*aic_len += sprintf(aic_str + *aic_len, "CHAP_I=%d", chap->id);
*aic_len += 1;
pr_debug("[server] Sending CHAP_I=%d\n", chap->id);
/*
* Generate Challenge.
*/
chap_gen_challenge(conn, 1, aic_str, aic_len);
return chap;
}
static void chap_close(struct iscsi_conn *conn)
{
kfree(conn->auth_protocol);
conn->auth_protocol = NULL;
}
static int chap_server_compute_md5(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
char *nr_out_ptr,
unsigned int *nr_out_len)
{
char *endptr;
unsigned long id;
unsigned char digest[MD5_SIGNATURE_SIZE];
unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
unsigned char identifier[10], *challenge = NULL;
unsigned char *challenge_binhex = NULL;
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
struct scatterlist sg;
int auth_ret = -1, ret, challenge_len;
memset(identifier, 0, 10);
memset(chap_n, 0, MAX_CHAP_N_SIZE);
memset(chap_r, 0, MAX_RESPONSE_LENGTH);
memset(digest, 0, MD5_SIGNATURE_SIZE);
memset(response, 0, MD5_SIGNATURE_SIZE * 2 + 2);
memset(client_digest, 0, MD5_SIGNATURE_SIZE);
memset(server_digest, 0, MD5_SIGNATURE_SIZE);
challenge = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
if (!challenge) {
pr_err("Unable to allocate challenge buffer\n");
goto out;
}
challenge_binhex = kzalloc(CHAP_CHALLENGE_STR_LEN, GFP_KERNEL);
if (!challenge_binhex) {
pr_err("Unable to allocate challenge_binhex buffer\n");
goto out;
}
/*
* Extract CHAP_N.
*/
if (extract_param(nr_in_ptr, "CHAP_N", MAX_CHAP_N_SIZE, chap_n,
&type) < 0) {
pr_err("Could not find CHAP_N.\n");
goto out;
}
if (type == HEX) {
pr_err("Could not find CHAP_N.\n");
goto out;
}
if (memcmp(chap_n, auth->userid, strlen(auth->userid)) != 0) {
pr_err("CHAP_N values do not match!\n");
goto out;
}
pr_debug("[server] Got CHAP_N=%s\n", chap_n);
/*
* Extract CHAP_R.
*/
if (extract_param(nr_in_ptr, "CHAP_R", MAX_RESPONSE_LENGTH, chap_r,
&type) < 0) {
pr_err("Could not find CHAP_R.\n");
goto out;
}
if (type != HEX) {
pr_err("Could not find CHAP_R.\n");
goto out;
}
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("Unable to allocate struct crypto_hash\n");
goto out;
}
desc.tfm = tfm;
desc.flags = 0;
ret = crypto_hash_init(&desc);
if (ret < 0) {
pr_err("crypto_hash_init() failed\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, &chap->id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, &auth->password, strlen(auth->password));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
if (ret < 0) {
pr_err("crypto_hash_update() failed for password\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
if (ret < 0) {
pr_err("crypto_hash_update() failed for challenge\n");
crypto_free_hash(tfm);
goto out;
}
ret = crypto_hash_final(&desc, server_digest);
if (ret < 0) {
pr_err("crypto_hash_final() failed for server digest\n");
crypto_free_hash(tfm);
goto out;
}
crypto_free_hash(tfm);
chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
pr_debug("[server] MD5 Digests do not match!\n\n");
goto out;
} else
pr_debug("[server] MD5 Digests match, CHAP connetication"
" successful.\n\n");
/*
* One way authentication has succeeded, return now if mutual
* authentication is not enabled.
*/
if (!auth->authenticate_target) {
kfree(challenge);
kfree(challenge_binhex);
return 0;
}
/*
* Get CHAP_I.
*/
if (extract_param(nr_in_ptr, "CHAP_I", 10, identifier, &type) < 0) {
pr_err("Could not find CHAP_I.\n");
goto out;
}
if (type == HEX)
id = simple_strtoul(&identifier[2], &endptr, 0);
else
id = simple_strtoul(identifier, &endptr, 0);
if (id > 255) {
pr_err("chap identifier: %lu greater than 255\n", id);
goto out;
}
/*
* RFC 1994 says Identifier is no more than octet (8 bits).
*/
pr_debug("[server] Got CHAP_I=%lu\n", id);
/*
* Get CHAP_C.
*/
if (extract_param(nr_in_ptr, "CHAP_C", CHAP_CHALLENGE_STR_LEN,
challenge, &type) < 0) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
if (type != HEX) {
pr_err("Could not find CHAP_C.\n");
goto out;
}
pr_debug("[server] Got CHAP_C=%s\n", challenge);
challenge_len = chap_string_to_hex(challenge_binhex, challenge,
strlen(challenge));
if (!challenge_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
}
/*
* Generate CHAP_N and CHAP_R for mutual authentication.
*/
tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("Unable to allocate struct crypto_hash\n");
goto out;
}
desc.tfm = tfm;
desc.flags = 0;
ret = crypto_hash_init(&desc);
if (ret < 0) {
pr_err("crypto_hash_init() failed\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, &id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
crypto_free_hash(tfm);
goto out;
}
sg_init_one(&sg, auth->password_mutual,
strlen(auth->password_mutual));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
if (ret < 0) {
pr_err("crypto_hash_update() failed for"
" password_mutual\n");
crypto_free_hash(tfm);
goto out;
}
/*
* Convert received challenge to binary hex.
*/
sg_init_one(&sg, challenge_binhex, challenge_len);
ret = crypto_hash_update(&desc, &sg, challenge_len);
if (ret < 0) {
pr_err("crypto_hash_update() failed for ma challenge\n");
crypto_free_hash(tfm);
goto out;
}
ret = crypto_hash_final(&desc, digest);
if (ret < 0) {
pr_err("crypto_hash_final() failed for ma digest\n");
crypto_free_hash(tfm);
goto out;
}
crypto_free_hash(tfm);
/*
* Generate CHAP_N and CHAP_R.
*/
*nr_out_len = sprintf(nr_out_ptr, "CHAP_N=%s", auth->userid_mutual);
*nr_out_len += 1;
pr_debug("[server] Sending CHAP_N=%s\n", auth->userid_mutual);
/*
* Convert response from binary hex to ascii hext.
*/
chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
pr_debug("[server] Sending CHAP_R=0x%s\n", response);
auth_ret = 0;
out:
kfree(challenge);
kfree(challenge_binhex);
return auth_ret;
}
static int chap_got_response(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *nr_in_ptr,
char *nr_out_ptr,
unsigned int *nr_out_len)
{
struct iscsi_chap *chap = conn->auth_protocol;
switch (chap->digest_type) {
case CHAP_DIGEST_MD5:
if (chap_server_compute_md5(conn, auth, nr_in_ptr,
nr_out_ptr, nr_out_len) < 0)
return -1;
return 0;
default:
pr_err("Unknown CHAP digest type %d!\n",
chap->digest_type);
return -1;
}
}
u32 chap_main_loop(
struct iscsi_conn *conn,
struct iscsi_node_auth *auth,
char *in_text,
char *out_text,
int *in_len,
int *out_len)
{
struct iscsi_chap *chap = conn->auth_protocol;
if (!chap) {
chap = chap_server_open(conn, auth, in_text, out_text, out_len);
if (!chap)
return 2;
chap->chap_state = CHAP_STAGE_SERVER_AIC;
return 0;
} else if (chap->chap_state == CHAP_STAGE_SERVER_AIC) {
convert_null_to_semi(in_text, *in_len);
if (chap_got_response(conn, auth, in_text, out_text,
out_len) < 0) {
chap_close(conn);
return 2;
}
if (auth->authenticate_target)
chap->chap_state = CHAP_STAGE_SERVER_NR;
else
*out_len = 0;
chap_close(conn);
return 1;
}
return 2;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.