repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
jasonzhong/linux | drivers/thermal/x86_pkg_temp_thermal.c | 221 | 16812 | /*
* x86_pkg_temp_thermal driver
* Copyright (c) 2013, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/param.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/pm.h>
#include <linux/thermal.h>
#include <linux/debugfs.h>
#include <asm/cpu_device_id.h>
#include <asm/mce.h>
/*
* Rate control delay: Idea is to introduce denounce effect
* This should be long enough to avoid reduce events, when
* threshold is set to a temperature, which is constantly
* violated, but at the short enough to take any action.
* The action can be remove threshold or change it to next
* interesting setting. Based on experiments, in around
* every 5 seconds under load will give us a significant
* temperature change.
*/
#define PKG_TEMP_THERMAL_NOTIFY_DELAY 5000
static int notify_delay_ms = PKG_TEMP_THERMAL_NOTIFY_DELAY;
module_param(notify_delay_ms, int, 0644);
MODULE_PARM_DESC(notify_delay_ms,
"User space notification delay in milli seconds.");
/* Number of trip points in thermal zone. Currently it can't
* be more than 2. MSR can allow setting and getting notifications
* for only 2 thresholds. This define enforces this, if there
* is some wrong values returned by cpuid for number of thresholds.
*/
#define MAX_NUMBER_OF_TRIPS 2
/* Limit number of package temp zones */
#define MAX_PKG_TEMP_ZONE_IDS 256
struct phy_dev_entry {
struct list_head list;
u16 phys_proc_id;
u16 first_cpu;
u32 tj_max;
int ref_cnt;
u32 start_pkg_therm_low;
u32 start_pkg_therm_high;
struct thermal_zone_device *tzone;
};
static struct thermal_zone_params pkg_temp_tz_params = {
.no_hwmon = true,
};
/* List maintaining number of package instances */
static LIST_HEAD(phy_dev_list);
static DEFINE_MUTEX(phy_dev_list_mutex);
/* Interrupt to work function schedule queue */
static DEFINE_PER_CPU(struct delayed_work, pkg_temp_thermal_threshold_work);
/* To track if the work is already scheduled on a package */
static u8 *pkg_work_scheduled;
/* Spin lock to prevent races with pkg_work_scheduled */
static spinlock_t pkg_work_lock;
static u16 max_phy_id;
/* Debug counters to show using debugfs */
static struct dentry *debugfs;
static unsigned int pkg_interrupt_cnt;
static unsigned int pkg_work_cnt;
static int pkg_temp_debugfs_init(void)
{
struct dentry *d;
debugfs = debugfs_create_dir("pkg_temp_thermal", NULL);
if (!debugfs)
return -ENOENT;
d = debugfs_create_u32("pkg_thres_interrupt", S_IRUGO, debugfs,
(u32 *)&pkg_interrupt_cnt);
if (!d)
goto err_out;
d = debugfs_create_u32("pkg_thres_work", S_IRUGO, debugfs,
(u32 *)&pkg_work_cnt);
if (!d)
goto err_out;
return 0;
err_out:
debugfs_remove_recursive(debugfs);
return -ENOENT;
}
static struct phy_dev_entry
*pkg_temp_thermal_get_phy_entry(unsigned int cpu)
{
u16 phys_proc_id = topology_physical_package_id(cpu);
struct phy_dev_entry *phy_ptr;
mutex_lock(&phy_dev_list_mutex);
list_for_each_entry(phy_ptr, &phy_dev_list, list)
if (phy_ptr->phys_proc_id == phys_proc_id) {
mutex_unlock(&phy_dev_list_mutex);
return phy_ptr;
}
mutex_unlock(&phy_dev_list_mutex);
return NULL;
}
/*
* tj-max is is interesting because threshold is set relative to this
* temperature.
*/
static int get_tj_max(int cpu, u32 *tj_max)
{
u32 eax, edx;
u32 val;
int err;
err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (err)
goto err_ret;
else {
val = (eax >> 16) & 0xff;
if (val)
*tj_max = val * 1000;
else {
err = -EINVAL;
goto err_ret;
}
}
return 0;
err_ret:
*tj_max = 0;
return err;
}
static int sys_get_curr_temp(struct thermal_zone_device *tzd, unsigned long *temp)
{
u32 eax, edx;
struct phy_dev_entry *phy_dev_entry;
phy_dev_entry = tzd->devdata;
rdmsr_on_cpu(phy_dev_entry->first_cpu, MSR_IA32_PACKAGE_THERM_STATUS,
&eax, &edx);
if (eax & 0x80000000) {
*temp = phy_dev_entry->tj_max -
((eax >> 16) & 0x7f) * 1000;
pr_debug("sys_get_curr_temp %ld\n", *temp);
return 0;
}
return -EINVAL;
}
static int sys_get_trip_temp(struct thermal_zone_device *tzd,
int trip, unsigned long *temp)
{
u32 eax, edx;
struct phy_dev_entry *phy_dev_entry;
u32 mask, shift;
unsigned long thres_reg_value;
int ret;
if (trip >= MAX_NUMBER_OF_TRIPS)
return -EINVAL;
phy_dev_entry = tzd->devdata;
if (trip) {
mask = THERM_MASK_THRESHOLD1;
shift = THERM_SHIFT_THRESHOLD1;
} else {
mask = THERM_MASK_THRESHOLD0;
shift = THERM_SHIFT_THRESHOLD0;
}
ret = rdmsr_on_cpu(phy_dev_entry->first_cpu,
MSR_IA32_PACKAGE_THERM_INTERRUPT, &eax, &edx);
if (ret < 0)
return -EINVAL;
thres_reg_value = (eax & mask) >> shift;
if (thres_reg_value)
*temp = phy_dev_entry->tj_max - thres_reg_value * 1000;
else
*temp = 0;
pr_debug("sys_get_trip_temp %ld\n", *temp);
return 0;
}
static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
unsigned long temp)
{
u32 l, h;
struct phy_dev_entry *phy_dev_entry;
u32 mask, shift, intr;
int ret;
phy_dev_entry = tzd->devdata;
if (trip >= MAX_NUMBER_OF_TRIPS || temp >= phy_dev_entry->tj_max)
return -EINVAL;
ret = rdmsr_on_cpu(phy_dev_entry->first_cpu,
MSR_IA32_PACKAGE_THERM_INTERRUPT,
&l, &h);
if (ret < 0)
return -EINVAL;
if (trip) {
mask = THERM_MASK_THRESHOLD1;
shift = THERM_SHIFT_THRESHOLD1;
intr = THERM_INT_THRESHOLD1_ENABLE;
} else {
mask = THERM_MASK_THRESHOLD0;
shift = THERM_SHIFT_THRESHOLD0;
intr = THERM_INT_THRESHOLD0_ENABLE;
}
l &= ~mask;
/*
* When users space sets a trip temperature == 0, which is indication
* that, it is no longer interested in receiving notifications.
*/
if (!temp)
l &= ~intr;
else {
l |= (phy_dev_entry->tj_max - temp)/1000 << shift;
l |= intr;
}
return wrmsr_on_cpu(phy_dev_entry->first_cpu,
MSR_IA32_PACKAGE_THERM_INTERRUPT,
l, h);
}
static int sys_get_trip_type(struct thermal_zone_device *thermal,
int trip, enum thermal_trip_type *type)
{
*type = THERMAL_TRIP_PASSIVE;
return 0;
}
/* Thermal zone callback registry */
static struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
.get_trip_temp = sys_get_trip_temp,
.get_trip_type = sys_get_trip_type,
.set_trip_temp = sys_set_trip_temp,
};
static bool pkg_temp_thermal_platform_thermal_rate_control(void)
{
return true;
}
/* Enable threshold interrupt on local package/cpu */
static inline void enable_pkg_thres_interrupt(void)
{
u32 l, h;
u8 thres_0, thres_1;
rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
/* only enable/disable if it had valid threshold value */
thres_0 = (l & THERM_MASK_THRESHOLD0) >> THERM_SHIFT_THRESHOLD0;
thres_1 = (l & THERM_MASK_THRESHOLD1) >> THERM_SHIFT_THRESHOLD1;
if (thres_0)
l |= THERM_INT_THRESHOLD0_ENABLE;
if (thres_1)
l |= THERM_INT_THRESHOLD1_ENABLE;
wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
}
/* Disable threshold interrupt on local package/cpu */
static inline void disable_pkg_thres_interrupt(void)
{
u32 l, h;
rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT,
l & (~THERM_INT_THRESHOLD0_ENABLE) &
(~THERM_INT_THRESHOLD1_ENABLE), h);
}
static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
{
__u64 msr_val;
int cpu = smp_processor_id();
int phy_id = topology_physical_package_id(cpu);
struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
bool notify = false;
unsigned long flags;
if (!phdev)
return;
spin_lock_irqsave(&pkg_work_lock, flags);
++pkg_work_cnt;
if (unlikely(phy_id > max_phy_id)) {
spin_unlock_irqrestore(&pkg_work_lock, flags);
return;
}
pkg_work_scheduled[phy_id] = 0;
spin_unlock_irqrestore(&pkg_work_lock, flags);
enable_pkg_thres_interrupt();
rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
if (msr_val & THERM_LOG_THRESHOLD0) {
wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS,
msr_val & ~THERM_LOG_THRESHOLD0);
notify = true;
}
if (msr_val & THERM_LOG_THRESHOLD1) {
wrmsrl(MSR_IA32_PACKAGE_THERM_STATUS,
msr_val & ~THERM_LOG_THRESHOLD1);
notify = true;
}
if (notify) {
pr_debug("thermal_zone_device_update\n");
thermal_zone_device_update(phdev->tzone);
}
}
static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
{
unsigned long flags;
int cpu = smp_processor_id();
int phy_id = topology_physical_package_id(cpu);
/*
* When a package is in interrupted state, all CPU's in that package
* are in the same interrupt state. So scheduling on any one CPU in
* the package is enough and simply return for others.
*/
spin_lock_irqsave(&pkg_work_lock, flags);
++pkg_interrupt_cnt;
if (unlikely(phy_id > max_phy_id) || unlikely(!pkg_work_scheduled) ||
pkg_work_scheduled[phy_id]) {
disable_pkg_thres_interrupt();
spin_unlock_irqrestore(&pkg_work_lock, flags);
return -EINVAL;
}
pkg_work_scheduled[phy_id] = 1;
spin_unlock_irqrestore(&pkg_work_lock, flags);
disable_pkg_thres_interrupt();
schedule_delayed_work_on(cpu,
&per_cpu(pkg_temp_thermal_threshold_work, cpu),
msecs_to_jiffies(notify_delay_ms));
return 0;
}
static int find_siblings_cpu(int cpu)
{
int i;
int id = topology_physical_package_id(cpu);
for_each_online_cpu(i)
if (i != cpu && topology_physical_package_id(i) == id)
return i;
return 0;
}
static int pkg_temp_thermal_device_add(unsigned int cpu)
{
int err;
u32 tj_max;
struct phy_dev_entry *phy_dev_entry;
int thres_count;
u32 eax, ebx, ecx, edx;
u8 *temp;
unsigned long flags;
cpuid(6, &eax, &ebx, &ecx, &edx);
thres_count = ebx & 0x07;
if (!thres_count)
return -ENODEV;
if (topology_physical_package_id(cpu) > MAX_PKG_TEMP_ZONE_IDS)
return -ENODEV;
thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS);
err = get_tj_max(cpu, &tj_max);
if (err)
goto err_ret;
mutex_lock(&phy_dev_list_mutex);
phy_dev_entry = kzalloc(sizeof(*phy_dev_entry), GFP_KERNEL);
if (!phy_dev_entry) {
err = -ENOMEM;
goto err_ret_unlock;
}
spin_lock_irqsave(&pkg_work_lock, flags);
if (topology_physical_package_id(cpu) > max_phy_id)
max_phy_id = topology_physical_package_id(cpu);
temp = krealloc(pkg_work_scheduled,
(max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
if (!temp) {
spin_unlock_irqrestore(&pkg_work_lock, flags);
err = -ENOMEM;
goto err_ret_free;
}
pkg_work_scheduled = temp;
pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
spin_unlock_irqrestore(&pkg_work_lock, flags);
phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
phy_dev_entry->first_cpu = cpu;
phy_dev_entry->tj_max = tj_max;
phy_dev_entry->ref_cnt = 1;
phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ?
0x03 : 0x01,
phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
if (IS_ERR(phy_dev_entry->tzone)) {
err = PTR_ERR(phy_dev_entry->tzone);
goto err_ret_free;
}
/* Store MSR value for package thermal interrupt, to restore at exit */
rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
&phy_dev_entry->start_pkg_therm_low,
&phy_dev_entry->start_pkg_therm_high);
list_add_tail(&phy_dev_entry->list, &phy_dev_list);
pr_debug("pkg_temp_thermal_device_add :phy_id %d cpu %d\n",
phy_dev_entry->phys_proc_id, cpu);
mutex_unlock(&phy_dev_list_mutex);
return 0;
err_ret_free:
kfree(phy_dev_entry);
err_ret_unlock:
mutex_unlock(&phy_dev_list_mutex);
err_ret:
return err;
}
static int pkg_temp_thermal_device_remove(unsigned int cpu)
{
struct phy_dev_entry *n;
u16 phys_proc_id = topology_physical_package_id(cpu);
struct phy_dev_entry *phdev =
pkg_temp_thermal_get_phy_entry(cpu);
if (!phdev)
return -ENODEV;
mutex_lock(&phy_dev_list_mutex);
/* If we are loosing the first cpu for this package, we need change */
if (phdev->first_cpu == cpu) {
phdev->first_cpu = find_siblings_cpu(cpu);
pr_debug("thermal_device_remove: first cpu switched %d\n",
phdev->first_cpu);
}
/*
* It is possible that no siblings left as this was the last cpu
* going offline. We don't need to worry about this assignment
* as the phydev entry will be removed in this case and
* thermal zone is removed.
*/
--phdev->ref_cnt;
pr_debug("thermal_device_remove: pkg: %d cpu %d ref_cnt %d\n",
phys_proc_id, cpu, phdev->ref_cnt);
if (!phdev->ref_cnt)
list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
if (phdev->phys_proc_id == phys_proc_id) {
thermal_zone_device_unregister(phdev->tzone);
list_del(&phdev->list);
kfree(phdev);
break;
}
}
mutex_unlock(&phy_dev_list_mutex);
return 0;
}
static int get_core_online(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
/* Check if there is already an instance for this package */
if (!phdev) {
if (!cpu_has(c, X86_FEATURE_DTHERM) ||
!cpu_has(c, X86_FEATURE_PTS))
return -ENODEV;
if (pkg_temp_thermal_device_add(cpu))
return -ENODEV;
} else {
mutex_lock(&phy_dev_list_mutex);
++phdev->ref_cnt;
pr_debug("get_core_online: cpu %d ref_cnt %d\n",
cpu, phdev->ref_cnt);
mutex_unlock(&phy_dev_list_mutex);
}
INIT_DELAYED_WORK(&per_cpu(pkg_temp_thermal_threshold_work, cpu),
pkg_temp_thermal_threshold_work_fn);
pr_debug("get_core_online: cpu %d successful\n", cpu);
return 0;
}
static void put_core_offline(unsigned int cpu)
{
if (!pkg_temp_thermal_device_remove(cpu))
cancel_delayed_work_sync(
&per_cpu(pkg_temp_thermal_threshold_work, cpu));
pr_debug("put_core_offline: cpu %d\n", cpu);
}
static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long) hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
get_core_online(cpu);
break;
case CPU_DOWN_PREPARE:
put_core_offline(cpu);
break;
}
return NOTIFY_OK;
}
static struct notifier_block pkg_temp_thermal_notifier __refdata = {
.notifier_call = pkg_temp_thermal_cpu_callback,
};
static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = {
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS },
{}
};
MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids);
static int __init pkg_temp_thermal_init(void)
{
int i;
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
spin_lock_init(&pkg_work_lock);
platform_thermal_package_notify =
pkg_temp_thermal_platform_thermal_notify;
platform_thermal_package_rate_control =
pkg_temp_thermal_platform_thermal_rate_control;
cpu_notifier_register_begin();
for_each_online_cpu(i)
if (get_core_online(i))
goto err_ret;
__register_hotcpu_notifier(&pkg_temp_thermal_notifier);
cpu_notifier_register_done();
pkg_temp_debugfs_init(); /* Don't care if fails */
return 0;
err_ret:
for_each_online_cpu(i)
put_core_offline(i);
cpu_notifier_register_done();
kfree(pkg_work_scheduled);
platform_thermal_package_notify = NULL;
platform_thermal_package_rate_control = NULL;
return -ENODEV;
}
static void __exit pkg_temp_thermal_exit(void)
{
struct phy_dev_entry *phdev, *n;
int i;
cpu_notifier_register_begin();
__unregister_hotcpu_notifier(&pkg_temp_thermal_notifier);
mutex_lock(&phy_dev_list_mutex);
list_for_each_entry_safe(phdev, n, &phy_dev_list, list) {
/* Retore old MSR value for package thermal interrupt */
wrmsr_on_cpu(phdev->first_cpu,
MSR_IA32_PACKAGE_THERM_INTERRUPT,
phdev->start_pkg_therm_low,
phdev->start_pkg_therm_high);
thermal_zone_device_unregister(phdev->tzone);
list_del(&phdev->list);
kfree(phdev);
}
mutex_unlock(&phy_dev_list_mutex);
platform_thermal_package_notify = NULL;
platform_thermal_package_rate_control = NULL;
for_each_online_cpu(i)
cancel_delayed_work_sync(
&per_cpu(pkg_temp_thermal_threshold_work, i));
cpu_notifier_register_done();
kfree(pkg_work_scheduled);
debugfs_remove_recursive(debugfs);
}
module_init(pkg_temp_thermal_init)
module_exit(pkg_temp_thermal_exit)
MODULE_DESCRIPTION("X86 PKG TEMP Thermal Driver");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jakeclawson/linux | arch/xtensa/kernel/pci-dma.c | 221 | 5805 | /*
* DMA coherent memory allocation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
* Copyright (C) 2015 Cadence Design Systems Inc.
*
* Based on version for i386.
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_BIDIRECTIONAL:
__flush_invalidate_dcache_range((unsigned long)vaddr, size);
break;
case DMA_FROM_DEVICE:
__invalidate_dcache_range((unsigned long)vaddr, size);
break;
case DMA_TO_DEVICE:
__flush_dcache_range((unsigned long)vaddr, size);
break;
case DMA_NONE:
BUG();
break;
}
}
EXPORT_SYMBOL(dma_cache_sync);
static void xtensa_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
void *vaddr;
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_FROM_DEVICE:
vaddr = bus_to_virt(dma_handle);
__invalidate_dcache_range((unsigned long)vaddr, size);
break;
case DMA_NONE:
BUG();
break;
default:
break;
}
}
static void xtensa_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
void *vaddr;
switch (dir) {
case DMA_BIDIRECTIONAL:
case DMA_TO_DEVICE:
vaddr = bus_to_virt(dma_handle);
__flush_dcache_range((unsigned long)vaddr, size);
break;
case DMA_NONE:
BUG();
break;
default:
break;
}
}
static void xtensa_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
sg_dma_len(s), dir);
}
}
static void xtensa_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
xtensa_sync_single_for_device(dev, sg_dma_address(s),
sg_dma_len(s), dir);
}
}
/*
* Note: We assume that the full memory space is always mapped to 'kseg'
* Otherwise we have to use page attributes (not implemented).
*/
static void *xtensa_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag,
struct dma_attrs *attrs)
{
unsigned long ret;
unsigned long uncached = 0;
/* ignore region speicifiers */
flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
flag |= GFP_DMA;
ret = (unsigned long)__get_free_pages(flag, get_order(size));
if (ret == 0)
return NULL;
/* We currently don't support coherent memory outside KSEG */
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
*handle = virt_to_bus((void *)ret);
__invalidate_dcache_range(ret, size);
return (void *)uncached;
}
static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle, struct dma_attrs *attrs)
{
unsigned long addr = (unsigned long)vaddr +
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
free_pages(addr, get_order(size));
}
static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t dma_handle = page_to_phys(page) + offset;
BUG_ON(PageHighMem(page));
xtensa_sync_single_for_device(dev, dma_handle, size, dir);
return dma_handle;
}
static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
}
static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
s->length, dir, attrs);
}
return nents;
}
static void xtensa_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
xtensa_unmap_page(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
}
int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
struct dma_map_ops xtensa_dma_map_ops = {
.alloc = xtensa_dma_alloc,
.free = xtensa_dma_free,
.map_page = xtensa_map_page,
.unmap_page = xtensa_unmap_page,
.map_sg = xtensa_map_sg,
.unmap_sg = xtensa_unmap_sg,
.sync_single_for_cpu = xtensa_sync_single_for_cpu,
.sync_single_for_device = xtensa_sync_single_for_device,
.sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
.sync_sg_for_device = xtensa_sync_sg_for_device,
.mapping_error = xtensa_dma_mapping_error,
};
EXPORT_SYMBOL(xtensa_dma_map_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init xtensa_dma_init(void)
{
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
return 0;
}
fs_initcall(xtensa_dma_init);
| gpl-2.0 |
farindk/linux-sunxi | drivers/cpuidle/cpuidle-clps711x.c | 477 | 1657 | /*
* CLPS711X CPU idle driver
*
* Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/cpuidle.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#define CLPS711X_CPUIDLE_NAME "clps711x-cpuidle"
static void __iomem *clps711x_halt;
static int clps711x_cpuidle_halt(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
writel(0xaa, clps711x_halt);
return index;
}
static struct cpuidle_driver clps711x_idle_driver = {
.name = CLPS711X_CPUIDLE_NAME,
.owner = THIS_MODULE,
.states[0] = {
.name = "HALT",
.desc = "CLPS711X HALT",
.enter = clps711x_cpuidle_halt,
.exit_latency = 1,
},
.state_count = 1,
};
static int __init clps711x_cpuidle_probe(struct platform_device *pdev)
{
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
clps711x_halt = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(clps711x_halt))
return PTR_ERR(clps711x_halt);
return cpuidle_register(&clps711x_idle_driver, NULL);
}
static struct platform_driver clps711x_cpuidle_driver = {
.driver = {
.name = CLPS711X_CPUIDLE_NAME,
.owner = THIS_MODULE,
},
};
module_platform_driver_probe(clps711x_cpuidle_driver, clps711x_cpuidle_probe);
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X CPU idle driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
winpih/Riderism-2.6.35 | drivers/media/dvb/mantis/hopper_cards.c | 989 | 6952 | /*
Hopper PCI bridge driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <asm/irq.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "mantis_common.h"
#include "hopper_vp3028.h"
#include "mantis_dma.h"
#include "mantis_dvb.h"
#include "mantis_uart.h"
#include "mantis_ioc.h"
#include "mantis_pci.h"
#include "mantis_i2c.h"
#include "mantis_reg.h"
static unsigned int verbose;
module_param(verbose, int, 0644);
MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
#define DRIVER_NAME "Hopper"
static char *label[10] = {
"DMA",
"IRQ-0",
"IRQ-1",
"OCERR",
"PABRT",
"RIPRR",
"PPERR",
"FTRGT",
"RISCI",
"RACK"
};
static int devs;
static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
{
u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
u32 rst_stat = 0, rst_mask = 0;
struct mantis_pci *mantis;
struct mantis_ca *ca;
mantis = (struct mantis_pci *) dev_id;
if (unlikely(mantis == NULL)) {
dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
return IRQ_NONE;
}
ca = mantis->mantis_ca;
stat = mmread(MANTIS_INT_STAT);
mask = mmread(MANTIS_INT_MASK);
mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
if (!(stat & mask))
return IRQ_NONE;
rst_mask = MANTIS_GPIF_WRACK |
MANTIS_GPIF_OTHERR |
MANTIS_SBUF_WSTO |
MANTIS_GPIF_EXTIRQ;
rst_stat = mmread(MANTIS_GPIF_STATUS);
rst_stat &= rst_mask;
mmwrite(rst_stat, MANTIS_GPIF_STATUS);
mantis->mantis_int_stat = stat;
mantis->mantis_int_mask = mask;
dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask);
if (stat & MANTIS_INT_RISCEN) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]);
}
if (stat & MANTIS_INT_IRQ0) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]);
mantis->gpif_status = rst_stat;
wake_up(&ca->hif_write_wq);
schedule_work(&ca->hif_evm_work);
}
if (stat & MANTIS_INT_IRQ1) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]);
schedule_work(&mantis->uart_work);
}
if (stat & MANTIS_INT_OCERR) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]);
}
if (stat & MANTIS_INT_PABORT) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]);
}
if (stat & MANTIS_INT_RIPERR) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]);
}
if (stat & MANTIS_INT_PPERR) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]);
}
if (stat & MANTIS_INT_FTRGT) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]);
}
if (stat & MANTIS_INT_RISCI) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
tasklet_schedule(&mantis->tasklet);
}
if (stat & MANTIS_INT_I2CDONE) {
dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]);
wake_up(&mantis->i2c_wq);
}
mmwrite(stat, MANTIS_INT_STAT);
stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE |
MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 |
MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 |
MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 |
MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 |
MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 |
MANTIS_INT_IRQ0 | MANTIS_INT_OCERR |
MANTIS_INT_PABORT | MANTIS_INT_RIPERR |
MANTIS_INT_PPERR | MANTIS_INT_FTRGT |
MANTIS_INT_RISCI);
if (stat)
dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask);
dprintk(MANTIS_DEBUG, 0, "\n");
return IRQ_HANDLED;
}
static int __devinit hopper_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
{
struct mantis_pci *mantis;
struct mantis_hwconfig *config;
int err = 0;
mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
if (mantis == NULL) {
printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
err = -ENOMEM;
goto fail0;
}
mantis->num = devs;
mantis->verbose = verbose;
mantis->pdev = pdev;
config = (struct mantis_hwconfig *) pci_id->driver_data;
config->irq_handler = &hopper_irq_handler;
mantis->hwconfig = config;
err = mantis_pci_init(mantis);
if (err) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err);
goto fail1;
}
err = mantis_stream_control(mantis, STREAM_TO_HIF);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err);
goto fail1;
}
err = mantis_i2c_init(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err);
goto fail2;
}
err = mantis_get_mac(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err);
goto fail2;
}
err = mantis_dma_init(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err);
goto fail3;
}
err = mantis_dvb_init(mantis);
if (err < 0) {
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err);
goto fail4;
}
devs++;
return err;
fail4:
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err);
mantis_dma_exit(mantis);
fail3:
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err);
mantis_i2c_exit(mantis);
fail2:
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err);
mantis_pci_exit(mantis);
fail1:
dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err);
kfree(mantis);
fail0:
return err;
}
static void __devexit hopper_pci_remove(struct pci_dev *pdev)
{
struct mantis_pci *mantis = pci_get_drvdata(pdev);
if (mantis) {
mantis_dvb_exit(mantis);
mantis_dma_exit(mantis);
mantis_i2c_exit(mantis);
mantis_pci_exit(mantis);
kfree(mantis);
}
return;
}
static struct pci_device_id hopper_pci_table[] = {
MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3028_DVB_T, &vp3028_config),
{ }
};
static struct pci_driver hopper_pci_driver = {
.name = DRIVER_NAME,
.id_table = hopper_pci_table,
.probe = hopper_pci_probe,
.remove = hopper_pci_remove,
};
static int __devinit hopper_init(void)
{
return pci_register_driver(&hopper_pci_driver);
}
static void __devexit hopper_exit(void)
{
return pci_unregister_driver(&hopper_pci_driver);
}
module_init(hopper_init);
module_exit(hopper_exit);
MODULE_DESCRIPTION("HOPPER driver");
MODULE_AUTHOR("Manu Abraham");
MODULE_LICENSE("GPL");
| gpl-2.0 |
andrewoko-odion/linux | drivers/gpu/drm/gma500/psb_intel_sdvo.c | 989 | 82247 | /*
* Copyright 2006 Dave Airlie <airlied@linux.ie>
* Copyright © 2006-2007 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "psb_intel_drv.h"
#include <drm/gma_drm.h>
#include "psb_drv.h"
#include "psb_intel_sdvo_regs.h"
#include "psb_intel_reg.h"
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
SDVO_TV_MASK)
#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
"PAL_B" , "PAL_D" , "PAL_G" ,
"PAL_H" , "PAL_I" , "PAL_M" ,
"PAL_N" , "PAL_NC" , "PAL_60" ,
"SECAM_B" , "SECAM_D" , "SECAM_G" ,
"SECAM_K" , "SECAM_K1", "SECAM_L" ,
"SECAM_60"
};
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
struct psb_intel_sdvo {
struct gma_encoder base;
struct i2c_adapter *i2c;
u8 slave_addr;
struct i2c_adapter ddc;
/* Register for the SDVO device: SDVOB or SDVOC */
int sdvo_reg;
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
/*
* Capabilities of the SDVO device returned by
* i830_sdvo_get_capabilities()
*/
struct psb_intel_sdvo_caps caps;
/* Pixel clock limitations reported by the SDVO device, in kHz */
int pixel_clock_min, pixel_clock_max;
/*
* For multiple function SDVO device,
* this is for current attached outputs.
*/
uint16_t attached_output;
/**
* This is used to select the color range of RBG outputs in HDMI mode.
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
/**
* This is set if we're going to treat the device as TV-out.
*
* While we have these nice friendly flags for output types that ought
* to decide this for us, the S-Video output on our HDMI+S-Video card
* shows up as RGB1 (VGA).
*/
bool is_tv;
/* This is for current tv format name */
int tv_format_index;
/**
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
/**
* This is set if we detect output of sdvo device as LVDS and
* have a valid fixed mode to use with the panel.
*/
bool is_lvds;
/**
* This is sdvo fixed pannel mode pointer
*/
struct drm_display_mode *sdvo_lvds_fixed_mode;
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
/* Input timings for adjusted_mode */
struct psb_intel_sdvo_dtd input_dtd;
/* Saved SDVO output states */
uint32_t saveSDVO; /* Can be SDVOB or SDVOC depending on sdvo_reg */
};
struct psb_intel_sdvo_connector {
struct gma_connector base;
/* Mark the type of connector */
uint16_t output_flag;
int force_audio;
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
int format_supported_num;
struct drm_property *tv_format;
/* add the property for the SDVO-TV */
struct drm_property *left;
struct drm_property *right;
struct drm_property *top;
struct drm_property *bottom;
struct drm_property *hpos;
struct drm_property *vpos;
struct drm_property *contrast;
struct drm_property *saturation;
struct drm_property *hue;
struct drm_property *sharpness;
struct drm_property *flicker_filter;
struct drm_property *flicker_filter_adaptive;
struct drm_property *flicker_filter_2d;
struct drm_property *tv_chroma_filter;
struct drm_property *tv_luma_filter;
struct drm_property *dot_crawl;
/* add the property for the SDVO-TV/LVDS */
struct drm_property *brightness;
/* Add variable to record current setting for the above property */
u32 left_margin, right_margin, top_margin, bottom_margin;
/* this is to get the range of margin.*/
u32 max_hscan, max_vscan;
u32 max_hpos, cur_hpos;
u32 max_vpos, cur_vpos;
u32 cur_brightness, max_brightness;
u32 cur_contrast, max_contrast;
u32 cur_saturation, max_saturation;
u32 cur_hue, max_hue;
u32 cur_sharpness, max_sharpness;
u32 cur_flicker_filter, max_flicker_filter;
u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
u32 cur_flicker_filter_2d, max_flicker_filter_2d;
u32 cur_tv_chroma_filter, max_tv_chroma_filter;
u32 cur_tv_luma_filter, max_tv_luma_filter;
u32 cur_dot_crawl, max_dot_crawl;
};
static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
{
return container_of(encoder, struct psb_intel_sdvo, base.base);
}
static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
return container_of(gma_attached_encoder(connector),
struct psb_intel_sdvo, base);
}
static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
{
return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
}
static bool
psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
static bool
psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
int type);
static bool
psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
/**
* Writes the SDVOB or SDVOC with the given value, but always writes both
* SDVOB and SDVOC to work around apparent hardware issues (according to
* comments in the BIOS).
*/
static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
u32 bval = val, cval = val;
int i, j;
int need_aux = IS_MRST(dev) ? 1 : 0;
for (j = 0; j <= need_aux; j++) {
if (psb_intel_sdvo->sdvo_reg == SDVOB)
cval = REG_READ_WITH_AUX(SDVOC, j);
else
bval = REG_READ_WITH_AUX(SDVOB, j);
/*
* Write the registers twice for luck. Sometimes,
* writing them only once doesn't appear to 'stick'.
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++) {
REG_WRITE_WITH_AUX(SDVOB, bval, j);
REG_READ_WITH_AUX(SDVOB, j);
REG_WRITE_WITH_AUX(SDVOC, cval, j);
REG_READ_WITH_AUX(SDVOC, j);
}
}
}
static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
{
struct i2c_msg msgs[] = {
{
.addr = psb_intel_sdvo->slave_addr,
.flags = 0,
.len = 1,
.buf = &addr,
},
{
.addr = psb_intel_sdvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = ch,
}
};
int ret;
if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
return true;
DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
return false;
}
#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
/** Mapping of command numbers to names, for debug output */
static const struct _sdvo_cmd_name {
u8 cmd;
const char *name;
} sdvo_cmd_names[] = {
SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
/* Add the op code for SDVO enhancements */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
/* HDMI op code */
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
#define IS_SDVOB(reg) (reg == SDVOB)
#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
const void *args, int args_len)
{
int i;
DRM_DEBUG_KMS("%s: W: %02X ",
SDVO_NAME(psb_intel_sdvo), cmd);
for (i = 0; i < args_len; i++)
DRM_DEBUG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
DRM_DEBUG_KMS(" ");
for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
DRM_DEBUG_KMS("(%s)", sdvo_cmd_names[i].name);
break;
}
}
if (i == ARRAY_SIZE(sdvo_cmd_names))
DRM_DEBUG_KMS("(%02X)", cmd);
DRM_DEBUG_KMS("\n");
}
static const char *cmd_status_names[] = {
"Power on",
"Success",
"Not supported",
"Invalid arg",
"Pending",
"Target not specified",
"Scaling not supported"
};
static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
const void *args, int args_len)
{
u8 buf[args_len*2 + 2], status;
struct i2c_msg msgs[args_len + 3];
int i, ret;
psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
for (i = 0; i < args_len; i++) {
msgs[i].addr = psb_intel_sdvo->slave_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2 *i;
buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
buf[2*i + 1] = ((u8*)args)[i];
}
msgs[i].addr = psb_intel_sdvo->slave_addr;
msgs[i].flags = 0;
msgs[i].len = 2;
msgs[i].buf = buf + 2*i;
buf[2*i + 0] = SDVO_I2C_OPCODE;
buf[2*i + 1] = cmd;
/* the following two are to read the response */
status = SDVO_I2C_CMD_STATUS;
msgs[i+1].addr = psb_intel_sdvo->slave_addr;
msgs[i+1].flags = 0;
msgs[i+1].len = 1;
msgs[i+1].buf = &status;
msgs[i+2].addr = psb_intel_sdvo->slave_addr;
msgs[i+2].flags = I2C_M_RD;
msgs[i+2].len = 1;
msgs[i+2].buf = &status;
ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
return false;
}
if (ret != i+3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
return false;
}
return true;
}
static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
void *response, int response_len)
{
u8 retry = 5;
u8 status;
int i;
DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
/*
* The documentation states that all commands will be
* processed within 15µs, and that we need only poll
* the status byte a maximum of 3 times in order for the
* command to be complete.
*
* Check 5 times in case the hardware failed to read the docs.
*/
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
goto log_fail;
while ((status == SDVO_CMD_STATUS_PENDING ||
status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
udelay(15);
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
goto log_fail;
}
if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
DRM_DEBUG_KMS("(%s)", cmd_status_names[status]);
else
DRM_DEBUG_KMS("(??? %d)", status);
if (status != SDVO_CMD_STATUS_SUCCESS)
goto log_fail;
/* Read the command response */
for (i = 0; i < response_len; i++) {
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_RETURN_0 + i,
&((u8 *)response)[i]))
goto log_fail;
DRM_DEBUG_KMS(" %02X", ((u8 *)response)[i]);
}
DRM_DEBUG_KMS("\n");
return true;
log_fail:
DRM_DEBUG_KMS("... failed\n");
return false;
}
static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
{
if (mode->clock >= 100000)
return 1;
else if (mode->clock >= 50000)
return 2;
else
return 4;
}
static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
u8 ddc_bus)
{
/* This must be the immediately preceding write before the i2c xfer */
return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
SDVO_CMD_SET_CONTROL_BUS_SWITCH,
&ddc_bus, 1);
}
static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
{
if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
return false;
return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
}
static bool
psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
{
if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
return false;
return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
}
static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
{
struct psb_intel_sdvo_set_target_input_args targets = {0};
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_TARGET_INPUT,
&targets, sizeof(targets));
}
/**
* Return whether each input is trained.
*
* This function is making an assumption about the layout of the response,
* which should be checked against the docs.
*/
static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
{
struct psb_intel_sdvo_get_trained_inputs_response response;
BUILD_BUG_ON(sizeof(response) != 1);
if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
&response, sizeof(response)))
return false;
*input_1 = response.input0_trained;
*input_2 = response.input1_trained;
return true;
}
static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
u16 outputs)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_ACTIVE_OUTPUTS,
&outputs, sizeof(outputs));
}
static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
int mode)
{
u8 state = SDVO_ENCODER_STATE_ON;
switch (mode) {
case DRM_MODE_DPMS_ON:
state = SDVO_ENCODER_STATE_ON;
break;
case DRM_MODE_DPMS_STANDBY:
state = SDVO_ENCODER_STATE_STANDBY;
break;
case DRM_MODE_DPMS_SUSPEND:
state = SDVO_ENCODER_STATE_SUSPEND;
break;
case DRM_MODE_DPMS_OFF:
state = SDVO_ENCODER_STATE_OFF;
break;
}
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
}
static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
int *clock_min,
int *clock_max)
{
struct psb_intel_sdvo_pixel_clock_range clocks;
BUILD_BUG_ON(sizeof(clocks) != 4);
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
&clocks, sizeof(clocks)))
return false;
/* Convert the values from units of 10 kHz to kHz. */
*clock_min = clocks.min * 10;
*clock_max = clocks.max * 10;
return true;
}
static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
u16 outputs)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_TARGET_OUTPUT,
&outputs, sizeof(outputs));
}
static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
}
static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_set_timing(psb_intel_sdvo,
SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
}
static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_dtd *dtd)
{
return psb_intel_sdvo_set_timing(psb_intel_sdvo,
SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
}
static bool
psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
uint16_t clock,
uint16_t width,
uint16_t height)
{
struct psb_intel_sdvo_preferred_input_timing_args args;
memset(&args, 0, sizeof(args));
args.clock = clock;
args.width = width;
args.height = height;
args.interlace = 0;
if (psb_intel_sdvo->is_lvds &&
(psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
args.scaled = 1;
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
&args, sizeof(args));
}
static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_dtd *dtd)
{
BUILD_BUG_ON(sizeof(dtd->part1) != 8);
BUILD_BUG_ON(sizeof(dtd->part2) != 8);
return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
&dtd->part1, sizeof(dtd->part1)) &&
psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
&dtd->part2, sizeof(dtd->part2));
}
static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
}
static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
const struct drm_display_mode *mode)
{
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
uint16_t h_sync_offset, v_sync_offset;
width = mode->crtc_hdisplay;
height = mode->crtc_vdisplay;
/* do some mode translations */
h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
dtd->part1.clock = mode->clock / 10;
dtd->part1.h_active = width & 0xff;
dtd->part1.h_blank = h_blank_len & 0xff;
dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
((h_blank_len >> 8) & 0xf);
dtd->part1.v_active = height & 0xff;
dtd->part1.v_blank = v_blank_len & 0xff;
dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
((v_blank_len >> 8) & 0xf);
dtd->part2.h_sync_off = h_sync_offset & 0xff;
dtd->part2.h_sync_width = h_sync_len & 0xff;
dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
(v_sync_len & 0xf);
dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
dtd->part2.dtd_flags |= 0x2;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= 0x4;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
dtd->part2.reserved = 0;
}
static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
const struct psb_intel_sdvo_dtd *dtd)
{
mode->hdisplay = dtd->part1.h_active;
mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
mode->htotal = mode->hdisplay + dtd->part1.h_blank;
mode->htotal += (dtd->part1.h_high & 0xf) << 8;
mode->vdisplay = dtd->part1.v_active;
mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
mode->vsync_start = mode->vdisplay;
mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
mode->vsync_end = mode->vsync_start +
(dtd->part2.v_sync_off_width & 0xf);
mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
mode->clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & 0x2)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (dtd->part2.dtd_flags & 0x4)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
{
struct psb_intel_sdvo_encode encode;
BUILD_BUG_ON(sizeof(encode) != 2);
return psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_SUPP_ENCODE,
&encode, sizeof(encode));
}
static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
uint8_t mode)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
}
static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
uint8_t mode)
{
return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
#if 0
static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
{
int i, j;
uint8_t set_buf_index[2];
uint8_t av_split;
uint8_t buf_size;
uint8_t buf[48];
uint8_t *pos;
psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
for (i = 0; i <= av_split; i++) {
set_buf_index[0] = i; set_buf_index[1] = 0;
psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2);
psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
psb_intel_sdvo_read_response(encoder, &buf_size, 1);
pos = buf;
for (j = 0; j <= buf_size; j += 8) {
psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
NULL, 0);
psb_intel_sdvo_read_response(encoder, pos, 8);
pos += 8;
}
}
}
#endif
static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
{
DRM_INFO("HDMI is not supported yet");
return false;
#if 0
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
uint8_t set_buf_index[2] = { 1, 0 };
uint64_t *data = (uint64_t *)&avi_if;
unsigned i;
intel_dip_infoframe_csum(&avi_if);
if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
for (i = 0; i < sizeof(avi_if); i += 8) {
if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_HBUF_DATA,
data, 8))
return false;
data++;
}
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_HBUF_TXRATE,
&tx_rate, 1);
#endif
}
static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
{
struct psb_intel_sdvo_tv_format format;
uint32_t format_map;
format_map = 1 << psb_intel_sdvo->tv_format_index;
memset(&format, 0, sizeof(format));
memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
BUILD_BUG_ON(sizeof(format) != 6);
return psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_TV_FORMAT,
&format, sizeof(format));
}
static bool
psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
const struct drm_display_mode *mode)
{
struct psb_intel_sdvo_dtd output_dtd;
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
psb_intel_sdvo->attached_output))
return false;
psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
return false;
return true;
}
static bool
psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* Reset the input timing to the screen. Assume always input 0. */
if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
return false;
if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
mode->clock / 10,
mode->hdisplay,
mode->vdisplay))
return false;
if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
&psb_intel_sdvo->input_dtd))
return false;
psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
int multiplier;
/* We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
* timings, even though this isn't really the right place in
* the sequence to do it. Oh well.
*/
if (psb_intel_sdvo->is_tv) {
if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
return false;
(void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
mode,
adjusted_mode);
} else if (psb_intel_sdvo->is_lvds) {
if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
psb_intel_sdvo->sdvo_lvds_fixed_mode))
return false;
(void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
mode,
adjusted_mode);
}
/* Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
*/
multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
return true;
}
static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
u32 sdvox;
struct psb_intel_sdvo_in_out_map in_out;
struct psb_intel_sdvo_dtd input_dtd;
int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
int rate;
int need_aux = IS_MRST(dev) ? 1 : 0;
if (!mode)
return;
/* First, set the input mapping for the first input to our controlled
* output. This is only correct if we're a single-input device, in
* which case the first input is the output from the appropriate SDVO
* channel on the motherboard. In a two-input device, the first input
* will be SDVOB and the second SDVOC.
*/
in_out.in0 = psb_intel_sdvo->attached_output;
in_out.in1 = 0;
psb_intel_sdvo_set_value(psb_intel_sdvo,
SDVO_CMD_SET_IN_OUT_MAP,
&in_out, sizeof(in_out));
/* Set the output timings to the screen */
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
psb_intel_sdvo->attached_output))
return;
/* We have tried to get input timing in mode_fixup, and filled into
* adjusted_mode.
*/
if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
input_dtd = psb_intel_sdvo->input_dtd;
} else {
/* Set the output timing to the screen */
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
psb_intel_sdvo->attached_output))
return;
psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
(void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
}
/* Set the input timing to the screen. Assume always input 0. */
if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
return;
if (psb_intel_sdvo->has_hdmi_monitor) {
psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
SDVO_COLORIMETRY_RGB256);
psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
} else
psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
if (psb_intel_sdvo->is_tv &&
!psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
return;
(void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
switch (pixel_multiplier) {
default:
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
}
if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
return;
/* Set the SDVO control regs. */
if (need_aux)
sdvox = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
switch (psb_intel_sdvo->sdvo_reg) {
case SDVOB:
sdvox &= SDVOB_PRESERVE_MASK;
break;
case SDVOC:
sdvox &= SDVOC_PRESERVE_MASK;
break;
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
if (gma_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
if (psb_intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
/* FIXME: Check if this is needed for PSB
sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
*/
if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
sdvox |= SDVO_STALL_SELECT;
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
}
static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
u32 temp;
int i;
int need_aux = IS_MRST(dev) ? 1 : 0;
switch (mode) {
case DRM_MODE_DPMS_ON:
DRM_DEBUG("DPMS_ON");
break;
case DRM_MODE_DPMS_OFF:
DRM_DEBUG("DPMS_OFF");
break;
default:
DRM_DEBUG("DPMS: %d", mode);
}
if (mode != DRM_MODE_DPMS_ON) {
psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
if (0)
psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
if (mode == DRM_MODE_DPMS_OFF) {
if (need_aux)
temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
}
}
} else {
bool input1, input2;
u8 status;
if (need_aux)
temp = REG_READ_AUX(psb_intel_sdvo->sdvo_reg);
else
temp = REG_READ(psb_intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
gma_wait_for_vblank(dev);
status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
* A lot of SDVO devices fail to notify of sync, but it's
* a given it the status is a success, we succeeded.
*/
if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
DRM_DEBUG_KMS("First %s output reported failure to "
"sync\n", SDVO_NAME(psb_intel_sdvo));
}
if (0)
psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
}
return;
}
static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (psb_intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
if (psb_intel_sdvo->pixel_clock_max < mode->clock)
return MODE_CLOCK_HIGH;
if (psb_intel_sdvo->is_lvds) {
if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
return MODE_PANEL;
}
return MODE_OK;
}
static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
{
BUILD_BUG_ON(sizeof(*caps) != 8);
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_DEVICE_CAPS,
caps, sizeof(*caps)))
return false;
DRM_DEBUG_KMS("SDVO capabilities:\n"
" vendor_id: %d\n"
" device_id: %d\n"
" device_rev_id: %d\n"
" sdvo_version_major: %d\n"
" sdvo_version_minor: %d\n"
" sdvo_inputs_mask: %d\n"
" smooth_scaling: %d\n"
" sharp_scaling: %d\n"
" up_scaling: %d\n"
" down_scaling: %d\n"
" stall_support: %d\n"
" output_flags: %d\n",
caps->vendor_id,
caps->device_id,
caps->device_rev_id,
caps->sdvo_version_major,
caps->sdvo_version_minor,
caps->sdvo_inputs_mask,
caps->smooth_scaling,
caps->sharp_scaling,
caps->up_scaling,
caps->down_scaling,
caps->stall_support,
caps->output_flags);
return true;
}
/* No use! */
#if 0
struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
{
struct drm_connector *connector = NULL;
struct psb_intel_sdvo *iout = NULL;
struct psb_intel_sdvo *sdvo;
/* find the sdvo connector */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
iout = to_psb_intel_sdvo(connector);
if (iout->type != INTEL_OUTPUT_SDVO)
continue;
sdvo = iout->dev_priv;
if (sdvo->sdvo_reg == SDVOB && sdvoB)
return connector;
if (sdvo->sdvo_reg == SDVOC && !sdvoB)
return connector;
}
return NULL;
}
int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
{
u8 response[2];
u8 status;
struct psb_intel_sdvo *psb_intel_sdvo;
DRM_DEBUG_KMS("\n");
if (!connector)
return 0;
psb_intel_sdvo = to_psb_intel_sdvo(connector);
return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0];
}
void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
{
u8 response[2];
u8 status;
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
if (on) {
psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
} else {
response[0] = 0;
response[1] = 0;
psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
}
psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
}
#endif
static bool
psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
{
/* Is there more than one type of output? */
int caps = psb_intel_sdvo->caps.output_flags & 0xf;
return caps & -caps;
}
static struct edid *
psb_intel_sdvo_get_edid(struct drm_connector *connector)
{
struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
return drm_get_edid(connector, &sdvo->ddc);
}
/* Mac mini hack -- use the same DDC as the analog connector */
static struct edid *
psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
struct drm_psb_private *dev_priv = connector->dev->dev_private;
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
}
static enum drm_connector_status
psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
enum drm_connector_status status;
struct edid *edid;
edid = psb_intel_sdvo_get_edid(connector);
if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
/*
* Don't use the 1 as the argument of DDC bus switch to get
* the EDID. It is used for SDVO SPD ROM.
*/
for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
psb_intel_sdvo->ddc_bus = ddc;
edid = psb_intel_sdvo_get_edid(connector);
if (edid)
break;
}
/*
* If we found the EDID on the other bus,
* assume that is the correct DDC bus.
*/
if (edid == NULL)
psb_intel_sdvo->ddc_bus = saved_ddc;
}
/*
* When there is no edid and no monitor is connected with VGA
* port, try to use the CRT ddc to read the EDID for DVI-connector.
*/
if (edid == NULL)
edid = psb_intel_sdvo_get_analog_edid(connector);
status = connector_status_unknown;
if (edid != NULL) {
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
if (psb_intel_sdvo->is_hdmi) {
psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
}
} else
status = connector_status_disconnected;
kfree(edid);
}
if (status == connector_status_connected) {
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
if (psb_intel_sdvo_connector->force_audio)
psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
}
return status;
}
static enum drm_connector_status
psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
{
uint16_t response;
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
enum drm_connector_status ret;
if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
/* add 30ms delay when the output type might be TV */
if (psb_intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
return connector_status_unknown;
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
response & 0xff, response >> 8,
psb_intel_sdvo_connector->output_flag);
if (response == 0)
return connector_status_disconnected;
psb_intel_sdvo->attached_output = response;
psb_intel_sdvo->has_hdmi_monitor = false;
psb_intel_sdvo->has_hdmi_audio = false;
if ((psb_intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
else if (IS_TMDS(psb_intel_sdvo_connector))
ret = psb_intel_sdvo_hdmi_sink_detect(connector);
else {
struct edid *edid;
/* if we have an edid check it matches the connection */
edid = psb_intel_sdvo_get_edid(connector);
if (edid == NULL)
edid = psb_intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
ret = connector_status_disconnected;
else
ret = connector_status_connected;
kfree(edid);
} else
ret = connector_status_connected;
}
/* May update encoder flag for like clock for SDVO TV, etc.*/
if (ret == connector_status_connected) {
psb_intel_sdvo->is_tv = false;
psb_intel_sdvo->is_lvds = false;
psb_intel_sdvo->base.needs_tv_clock = false;
if (response & SDVO_TV_MASK) {
psb_intel_sdvo->is_tv = true;
psb_intel_sdvo->base.needs_tv_clock = true;
}
if (response & SDVO_LVDS_MASK)
psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
}
return ret;
}
static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
{
struct edid *edid;
/* set the bus switch and get the modes */
edid = psb_intel_sdvo_get_edid(connector);
/*
* Mac mini hack. On this device, the DVI-I connector shares one DDC
* link between analog and digital outputs. So, if the regular SDVO
* DDC fails, check to see if the analog output is disconnected, in
* which case we'll look there for the digital DDC data.
*/
if (edid == NULL)
edid = psb_intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
if (connector_is_digital == monitor_is_digital) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
kfree(edid);
}
}
/*
* Set of SDVO TV modes.
* Note! This is in reply order (see loop in get_tv_modes).
* XXX: all 60Hz refresh?
*/
static const struct drm_display_mode sdvo_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
416, 0, 200, 201, 232, 233, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
416, 0, 240, 241, 272, 273, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
496, 0, 300, 301, 332, 333, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
736, 0, 350, 351, 382, 383, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
736, 0, 400, 401, 432, 433, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
736, 0, 480, 481, 512, 513, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
800, 0, 480, 481, 512, 513, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
800, 0, 576, 577, 608, 609, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
816, 0, 350, 351, 382, 383, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
816, 0, 400, 401, 432, 433, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
816, 0, 480, 481, 512, 513, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
816, 0, 540, 541, 572, 573, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
816, 0, 576, 577, 608, 609, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
864, 0, 576, 577, 608, 609, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
896, 0, 600, 601, 632, 633, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
928, 0, 624, 625, 656, 657, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
1016, 0, 766, 767, 798, 799, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
1120, 0, 768, 769, 800, 801, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
1376, 0, 1024, 1025, 1056, 1057, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_sdtv_resolution_request tv_res;
uint32_t reply = 0, format_map = 0;
int i;
/* Read the list of supported input resolutions for the selected TV
* format.
*/
format_map = 1 << psb_intel_sdvo->tv_format_index;
memcpy(&tv_res, &format_map,
min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
return;
BUILD_BUG_ON(sizeof(tv_res) != 3);
if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
&tv_res, sizeof(tv_res)))
return;
if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
return;
for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
if (reply & (1 << i)) {
struct drm_display_mode *nmode;
nmode = drm_mode_duplicate(connector->dev,
&sdvo_tv_modes[i]);
if (nmode)
drm_mode_probed_add(connector, nmode);
}
}
static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct drm_psb_private *dev_priv = connector->dev->dev_private;
struct drm_display_mode *newmode;
/*
* Attempt to get the mode list from DDC.
* Assume that the preferred modes are
* arranged in priority order.
*/
psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
if (list_empty(&connector->probed_modes) == false)
goto end;
/* Fetch modes from VBT */
if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
newmode = drm_mode_duplicate(connector->dev,
dev_priv->sdvo_lvds_vbt_mode);
if (newmode != NULL) {
/* Guarantee the mode is preferred */
newmode->type = (DRM_MODE_TYPE_PREFERRED |
DRM_MODE_TYPE_DRIVER);
drm_mode_probed_add(connector, newmode);
}
}
end:
list_for_each_entry(newmode, &connector->probed_modes, head) {
if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
psb_intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
0);
psb_intel_sdvo->is_lvds = true;
break;
}
}
}
static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
if (IS_TV(psb_intel_sdvo_connector))
psb_intel_sdvo_get_tv_modes(connector);
else if (IS_LVDS(psb_intel_sdvo_connector))
psb_intel_sdvo_get_lvds_modes(connector);
else
psb_intel_sdvo_get_ddc_modes(connector);
return !list_empty(&connector->probed_modes);
}
static void psb_intel_sdvo_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct edid *edid;
bool has_audio = false;
if (!psb_intel_sdvo->is_hdmi)
return false;
edid = psb_intel_sdvo_get_edid(connector);
if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
return has_audio;
}
static int
psb_intel_sdvo_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
struct drm_psb_private *dev_priv = connector->dev->dev_private;
uint16_t temp_value;
uint8_t cmd;
int ret;
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
if (i == psb_intel_sdvo_connector->force_audio)
return 0;
psb_intel_sdvo_connector->force_audio = i;
if (i == 0)
has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
else
has_audio = i > 0;
if (has_audio == psb_intel_sdvo->has_hdmi_audio)
return 0;
psb_intel_sdvo->has_hdmi_audio = has_audio;
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!psb_intel_sdvo->color_range)
return 0;
psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
goto done;
}
#define CHECK_PROPERTY(name, NAME) \
if (psb_intel_sdvo_connector->name == property) { \
if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
cmd = SDVO_CMD_SET_##NAME; \
psb_intel_sdvo_connector->cur_##name = temp_value; \
goto set_value; \
}
if (property == psb_intel_sdvo_connector->tv_format) {
if (val >= TV_FORMAT_NUM)
return -EINVAL;
if (psb_intel_sdvo->tv_format_index ==
psb_intel_sdvo_connector->tv_format_supported[val])
return 0;
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
goto done;
} else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
temp_value = val;
if (psb_intel_sdvo_connector->left == property) {
drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->right, val);
if (psb_intel_sdvo_connector->left_margin == temp_value)
return 0;
psb_intel_sdvo_connector->left_margin = temp_value;
psb_intel_sdvo_connector->right_margin = temp_value;
temp_value = psb_intel_sdvo_connector->max_hscan -
psb_intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->right == property) {
drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->left, val);
if (psb_intel_sdvo_connector->right_margin == temp_value)
return 0;
psb_intel_sdvo_connector->left_margin = temp_value;
psb_intel_sdvo_connector->right_margin = temp_value;
temp_value = psb_intel_sdvo_connector->max_hscan -
psb_intel_sdvo_connector->left_margin;
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (psb_intel_sdvo_connector->top == property) {
drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->bottom, val);
if (psb_intel_sdvo_connector->top_margin == temp_value)
return 0;
psb_intel_sdvo_connector->top_margin = temp_value;
psb_intel_sdvo_connector->bottom_margin = temp_value;
temp_value = psb_intel_sdvo_connector->max_vscan -
psb_intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (psb_intel_sdvo_connector->bottom == property) {
drm_object_property_set_value(&connector->base,
psb_intel_sdvo_connector->top, val);
if (psb_intel_sdvo_connector->bottom_margin == temp_value)
return 0;
psb_intel_sdvo_connector->top_margin = temp_value;
psb_intel_sdvo_connector->bottom_margin = temp_value;
temp_value = psb_intel_sdvo_connector->max_vscan -
psb_intel_sdvo_connector->top_margin;
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
}
CHECK_PROPERTY(hpos, HPOS)
CHECK_PROPERTY(vpos, VPOS)
CHECK_PROPERTY(saturation, SATURATION)
CHECK_PROPERTY(contrast, CONTRAST)
CHECK_PROPERTY(hue, HUE)
CHECK_PROPERTY(brightness, BRIGHTNESS)
CHECK_PROPERTY(sharpness, SHARPNESS)
CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
}
return -EINVAL; /* unknown property */
set_value:
if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
return -EIO;
done:
if (psb_intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y, crtc->primary->fb);
}
return 0;
#undef CHECK_PROPERTY
}
static void psb_intel_sdvo_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
}
static void psb_intel_sdvo_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
struct drm_crtc *crtc = encoder->crtc;
REG_WRITE(sdvo->sdvo_reg, sdvo->saveSDVO);
/* Force a full mode set on the crtc. We're supposed to have the
mode_config lock already. */
if (connector->status == connector_status_connected)
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
NULL);
}
static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
.dpms = psb_intel_sdvo_dpms,
.mode_fixup = psb_intel_sdvo_mode_fixup,
.prepare = gma_encoder_prepare,
.mode_set = psb_intel_sdvo_mode_set,
.commit = gma_encoder_commit,
};
static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = psb_intel_sdvo_save,
.restore = psb_intel_sdvo_restore,
.detect = psb_intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = psb_intel_sdvo_set_property,
.destroy = psb_intel_sdvo_destroy,
};
static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
.get_modes = psb_intel_sdvo_get_modes,
.mode_valid = psb_intel_sdvo_mode_valid,
.best_encoder = gma_best_encoder,
};
static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
psb_intel_sdvo->sdvo_lvds_fixed_mode);
i2c_del_adapter(&psb_intel_sdvo->ddc);
gma_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
.destroy = psb_intel_sdvo_enc_destroy,
};
static void
psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
{
/* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
* We need to figure out if this is true for all available poulsbo
* hardware, or if we need to fiddle with the guessing code above.
* The problem might go away if we can parse sdvo mappings from bios */
sdvo->ddc_bus = 2;
#if 0
uint16_t mask = 0;
unsigned int num_bits;
/* Make a mask of outputs less than or equal to our own priority in the
* list.
*/
switch (sdvo->controlled_output) {
case SDVO_OUTPUT_LVDS1:
mask |= SDVO_OUTPUT_LVDS1;
case SDVO_OUTPUT_LVDS0:
mask |= SDVO_OUTPUT_LVDS0;
case SDVO_OUTPUT_TMDS1:
mask |= SDVO_OUTPUT_TMDS1;
case SDVO_OUTPUT_TMDS0:
mask |= SDVO_OUTPUT_TMDS0;
case SDVO_OUTPUT_RGB1:
mask |= SDVO_OUTPUT_RGB1;
case SDVO_OUTPUT_RGB0:
mask |= SDVO_OUTPUT_RGB0;
break;
}
/* Count bits to find what number we are in the priority list. */
mask &= sdvo->caps.output_flags;
num_bits = hweight16(mask);
/* If more than 3 outputs, default to DDC bus 3 for now. */
if (num_bits > 3)
num_bits = 3;
/* Corresponds to SDVO_CONTROL_BUS_DDCx */
sdvo->ddc_bus = 1 << num_bits;
#endif
}
/**
* Choose the appropriate DDC bus for control bus switch command for this
* SDVO output based on the controlled output.
*
* DDC bus number assignment is in a priority order of RGB outputs, then TMDS
* outputs, then LVDS outputs.
*/
static void
psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
struct psb_intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
if (IS_SDVOB(reg))
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
if (mapping->initialized)
sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
else
psb_intel_sdvo_guess_ddc_bus(sdvo);
}
static void
psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
struct psb_intel_sdvo *sdvo, u32 reg)
{
struct sdvo_device_mapping *mapping;
u8 pin, speed;
if (IS_SDVOB(reg))
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
pin = GMBUS_PORT_DPB;
speed = GMBUS_RATE_1MHZ >> 8;
if (mapping->initialized) {
pin = mapping->i2c_pin;
speed = mapping->i2c_speed;
}
if (pin < GMBUS_NUM_PORTS) {
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
gma_intel_gmbus_set_speed(sdvo->i2c, speed);
gma_intel_gmbus_force_bit(sdvo->i2c, true);
} else
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
}
static bool
psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
}
static u8
psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (IS_SDVOB(sdvo_reg)) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
my_mapping = &dev_priv->sdvo_mappings[1];
other_mapping = &dev_priv->sdvo_mappings[0];
}
/* If the BIOS described our SDVO device, take advantage of it. */
if (my_mapping->slave_addr)
return my_mapping->slave_addr;
/* If the BIOS only described a different SDVO device, use the
* address that it isn't using.
*/
if (other_mapping->slave_addr) {
if (other_mapping->slave_addr == 0x70)
return 0x72;
else
return 0x70;
}
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
if (IS_SDVOB(sdvo_reg))
return 0x70;
else
return 0x72;
}
static void
psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
struct psb_intel_sdvo *encoder)
{
drm_connector_init(encoder->base.base.dev,
&connector->base.base,
&psb_intel_sdvo_connector_funcs,
connector->base.base.connector_type);
drm_connector_helper_add(&connector->base.base,
&psb_intel_sdvo_connector_helper_funcs);
connector->base.base.interlace_allowed = 0;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
gma_connector_attach_encoder(&connector->base, &encoder->base);
drm_connector_register(&connector->base.base);
}
static void
psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
{
/* FIXME: We don't support HDMI at the moment
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
intel_attach_broadcast_rgb_property(&connector->base.base);
*/
}
static bool
psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
if (!psb_intel_sdvo_connector)
return false;
if (device == 0) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
} else if (device == 1) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
intel_connector = &psb_intel_sdvo_connector->base;
connector = &intel_connector->base;
// connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
psb_intel_sdvo->is_hdmi = true;
}
psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
if (psb_intel_sdvo->is_hdmi)
psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
return true;
}
static bool
psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
if (!psb_intel_sdvo_connector)
return false;
intel_connector = &psb_intel_sdvo_connector->base;
connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
psb_intel_sdvo->controlled_output |= type;
psb_intel_sdvo_connector->output_flag = type;
psb_intel_sdvo->is_tv = true;
psb_intel_sdvo->base.needs_tv_clock = true;
psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
goto err;
if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
goto err;
return true;
err:
psb_intel_sdvo_destroy(connector);
return false;
}
static bool
psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
if (!psb_intel_sdvo_connector)
return false;
intel_connector = &psb_intel_sdvo_connector->base;
connector = &intel_connector->base;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA;
if (device == 0) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
} else if (device == 1) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
psb_intel_sdvo);
return true;
}
static bool
psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
if (!psb_intel_sdvo_connector)
return false;
intel_connector = &psb_intel_sdvo_connector->base;
connector = &intel_connector->base;
encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
if (device == 0) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
} else if (device == 1) {
psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT));
psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
goto err;
return true;
err:
psb_intel_sdvo_destroy(connector);
return false;
}
static bool
psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
{
psb_intel_sdvo->is_tv = false;
psb_intel_sdvo->base.needs_tv_clock = false;
psb_intel_sdvo->is_lvds = false;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (flags & SDVO_OUTPUT_TMDS0)
if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
return false;
if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
return false;
/* TV has no XXX1 function block */
if (flags & SDVO_OUTPUT_SVID0)
if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
return false;
if (flags & SDVO_OUTPUT_CVBS0)
if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
if (flags & SDVO_OUTPUT_RGB0)
if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
return false;
if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
return false;
if (flags & SDVO_OUTPUT_LVDS0)
if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
return false;
if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
return false;
if ((flags & SDVO_OUTPUT_MASK) == 0) {
unsigned char bytes[2];
psb_intel_sdvo->controlled_output = 0;
memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(psb_intel_sdvo),
bytes[0], bytes[1]);
return false;
}
psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
return true;
}
static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
int type)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
struct psb_intel_sdvo_tv_format format;
uint32_t format_map, i;
if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
return false;
BUILD_BUG_ON(sizeof(format) != 6);
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
&format, sizeof(format)))
return false;
memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
if (format_map == 0)
return false;
psb_intel_sdvo_connector->format_supported_num = 0;
for (i = 0 ; i < TV_FORMAT_NUM; i++)
if (format_map & (1 << i))
psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
psb_intel_sdvo_connector->tv_format =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
"mode", psb_intel_sdvo_connector->format_supported_num);
if (!psb_intel_sdvo_connector->tv_format)
return false;
for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
drm_property_add_enum(
psb_intel_sdvo_connector->tv_format, i,
i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
psb_intel_sdvo_connector->tv_format, 0);
return true;
}
#define ENHANCEMENT(name, NAME) do { \
if (enhancements.name) { \
if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
return false; \
psb_intel_sdvo_connector->max_##name = data_value[0]; \
psb_intel_sdvo_connector->cur_##name = response; \
psb_intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!psb_intel_sdvo_connector->name) return false; \
drm_object_attach_property(&connector->base, \
psb_intel_sdvo_connector->name, \
psb_intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
data_value[0], data_value[1], response); \
} \
} while(0)
static bool
psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
struct psb_intel_sdvo_enhancements_reply enhancements)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
/* when horizontal overscan is supported, Add the left/right property */
if (enhancements.overscan_h) {
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_MAX_OVERSCAN_H,
&data_value, 4))
return false;
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_OVERSCAN_H,
&response, 2))
return false;
psb_intel_sdvo_connector->max_hscan = data_value[0];
psb_intel_sdvo_connector->left_margin = data_value[0] - response;
psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
psb_intel_sdvo_connector->left =
drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->left)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->left,
psb_intel_sdvo_connector->left_margin);
psb_intel_sdvo_connector->right =
drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->right)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->right,
psb_intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
if (enhancements.overscan_v) {
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_MAX_OVERSCAN_V,
&data_value, 4))
return false;
if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_OVERSCAN_V,
&response, 2))
return false;
psb_intel_sdvo_connector->max_vscan = data_value[0];
psb_intel_sdvo_connector->top_margin = data_value[0] - response;
psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
psb_intel_sdvo_connector->top =
drm_property_create_range(dev, 0, "top_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->top)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->top,
psb_intel_sdvo_connector->top_margin);
psb_intel_sdvo_connector->bottom =
drm_property_create_range(dev, 0, "bottom_margin", 0, data_value[0]);
if (!psb_intel_sdvo_connector->bottom)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->bottom,
psb_intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
"default %d, current %d\n",
data_value[0], data_value[1], response);
}
ENHANCEMENT(hpos, HPOS);
ENHANCEMENT(vpos, VPOS);
ENHANCEMENT(saturation, SATURATION);
ENHANCEMENT(contrast, CONTRAST);
ENHANCEMENT(hue, HUE);
ENHANCEMENT(sharpness, SHARPNESS);
ENHANCEMENT(brightness, BRIGHTNESS);
ENHANCEMENT(flicker_filter, FLICKER_FILTER);
ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
if (enhancements.dot_crawl) {
if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
return false;
psb_intel_sdvo_connector->max_dot_crawl = 1;
psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
psb_intel_sdvo_connector->dot_crawl =
drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
if (!psb_intel_sdvo_connector->dot_crawl)
return false;
drm_object_attach_property(&connector->base,
psb_intel_sdvo_connector->dot_crawl,
psb_intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
}
return true;
}
static bool
psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
struct psb_intel_sdvo_enhancements_reply enhancements)
{
struct drm_device *dev = psb_intel_sdvo->base.base.dev;
struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
uint16_t response, data_value[2];
ENHANCEMENT(brightness, BRIGHTNESS);
return true;
}
#undef ENHANCEMENT
static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
{
union {
struct psb_intel_sdvo_enhancements_reply reply;
uint16_t response;
} enhancements;
BUILD_BUG_ON(sizeof(enhancements) != 2);
enhancements.response = 0;
psb_intel_sdvo_get_value(psb_intel_sdvo,
SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
&enhancements, sizeof(enhancements));
if (enhancements.response == 0) {
DRM_DEBUG_KMS("No enhancement is supported\n");
return true;
}
if (IS_TV(psb_intel_sdvo_connector))
return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
else if(IS_LVDS(psb_intel_sdvo_connector))
return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
else
return true;
}
static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
struct psb_intel_sdvo *sdvo = adapter->algo_data;
if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
return -EIO;
return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
}
static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
{
struct psb_intel_sdvo *sdvo = adapter->algo_data;
return sdvo->i2c->algo->functionality(sdvo->i2c);
}
static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
.master_xfer = psb_intel_sdvo_ddc_proxy_xfer,
.functionality = psb_intel_sdvo_ddc_proxy_func
};
static bool
psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
struct drm_device *dev)
{
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
sdvo->ddc.dev.parent = &dev->pdev->dev;
sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
return i2c_add_adapter(&sdvo->ddc) == 0;
}
bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct gma_encoder *gma_encoder;
struct psb_intel_sdvo *psb_intel_sdvo;
int i;
psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
if (!psb_intel_sdvo)
return false;
psb_intel_sdvo->sdvo_reg = sdvo_reg;
psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
kfree(psb_intel_sdvo);
return false;
}
/* encoder type will be decided later */
gma_encoder = &psb_intel_sdvo->base;
gma_encoder->type = INTEL_OUTPUT_SDVO;
drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
u8 byte;
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err;
}
}
if (IS_SDVOB(sdvo_reg))
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
goto err;
if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
psb_intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
goto err;
}
psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
/* Set the input timing to the screen. Assume always input 0. */
if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
goto err;
if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
&psb_intel_sdvo->pixel_clock_min,
&psb_intel_sdvo->pixel_clock_max))
goto err;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
SDVO_NAME(psb_intel_sdvo),
psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
psb_intel_sdvo->caps.device_rev_id,
psb_intel_sdvo->pixel_clock_min / 1000,
psb_intel_sdvo->pixel_clock_max / 1000,
(psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
(psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
/* check currently supported outputs */
psb_intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
psb_intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
err:
drm_encoder_cleanup(&gma_encoder->base);
i2c_del_adapter(&psb_intel_sdvo->ddc);
kfree(psb_intel_sdvo);
return false;
}
| gpl-2.0 |
Xmister/linux-sunxi | drivers/net/wireless/hostap/hostap_cs.c | 2269 | 18239 | #define PRISM2_PCCARD
#include <linux/module.h>
#include <linux/init.h>
#include <linux/if.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/workqueue.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include <asm/io.h>
#include "hostap_wlan.h"
static char *dev_info = "hostap_cs";
MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
"cards (PC Card).");
MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)");
MODULE_LICENSE("GPL");
static int ignore_cis_vcc;
module_param(ignore_cis_vcc, int, 0444);
MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
/* struct local_info::hw_priv */
struct hostap_cs_priv {
struct pcmcia_device *link;
int sandisk_connectplus;
};
#ifdef PRISM2_IO_DEBUG
static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
{
struct hostap_interface *iface;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
outb(v, dev->base_addr + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
local_info_t *local;
unsigned long flags;
u8 v;
iface = netdev_priv(dev);
local = iface->local;
spin_lock_irqsave(&local->lock, flags);
v = inb(dev->base_addr + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
{
struct hostap_interface *iface;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
outw(v, dev->base_addr + a);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
{
struct hostap_interface *iface;
local_info_t *local;
unsigned long flags;
u16 v;
iface = netdev_priv(dev);
local = iface->local;
spin_lock_irqsave(&local->lock, flags);
v = inw(dev->base_addr + a);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
spin_unlock_irqrestore(&local->lock, flags);
return v;
}
static inline void hfa384x_outsw_debug(struct net_device *dev, int a,
u8 *buf, int wc)
{
struct hostap_interface *iface;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc);
outsw(dev->base_addr + a, buf, wc);
spin_unlock_irqrestore(&local->lock, flags);
}
static inline void hfa384x_insw_debug(struct net_device *dev, int a,
u8 *buf, int wc)
{
struct hostap_interface *iface;
local_info_t *local;
unsigned long flags;
iface = netdev_priv(dev);
local = iface->local;
spin_lock_irqsave(&local->lock, flags);
prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc);
insw(dev->base_addr + a, buf, wc);
spin_unlock_irqrestore(&local->lock, flags);
}
#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
#define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc))
#define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc))
#else /* PRISM2_IO_DEBUG */
#define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a))
#define HFA384X_INB(a) inb(dev->base_addr + (a))
#define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a))
#define HFA384X_INW(a) inw(dev->base_addr + (a))
#define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc)
#define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc)
#endif /* PRISM2_IO_DEBUG */
static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
int len)
{
u16 d_off;
u16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (u16 *) buf;
if (len / 2)
HFA384X_INSW(d_off, buf, len / 2);
pos += len / 2;
if (len & 1)
*((char *) pos) = HFA384X_INB(d_off);
return 0;
}
static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
{
u16 d_off;
u16 *pos;
d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
pos = (u16 *) buf;
if (len / 2)
HFA384X_OUTSW(d_off, buf, len / 2);
pos += len / 2;
if (len & 1)
HFA384X_OUTB(*((char *) pos), d_off);
return 0;
}
/* FIX: This might change at some point.. */
#include "hostap_hw.c"
static void prism2_detach(struct pcmcia_device *p_dev);
static void prism2_release(u_long arg);
static int prism2_config(struct pcmcia_device *link);
static int prism2_pccard_card_present(local_info_t *local)
{
struct hostap_cs_priv *hw_priv = local->hw_priv;
if (hw_priv != NULL && hw_priv->link != NULL && pcmcia_dev_present(hw_priv->link))
return 1;
return 0;
}
/*
* SanDisk CompactFlash WLAN Flashcard - Product Manual v1.0
* Document No. 20-10-00058, January 2004
* http://www.sandisk.com/pdf/industrial/ProdManualCFWLANv1.0.pdf
*/
#define SANDISK_WLAN_ACTIVATION_OFF 0x40
#define SANDISK_HCR_OFF 0x42
static void sandisk_set_iobase(local_info_t *local)
{
int res;
struct hostap_cs_priv *hw_priv = local->hw_priv;
res = pcmcia_write_config_byte(hw_priv->link, 0x10,
hw_priv->link->resource[0]->start & 0x00ff);
if (res != 0) {
printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -"
" res=%d\n", res);
}
udelay(10);
res = pcmcia_write_config_byte(hw_priv->link, 0x12,
(hw_priv->link->resource[0]->start >> 8) & 0x00ff);
if (res != 0) {
printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -"
" res=%d\n", res);
}
}
static void sandisk_write_hcr(local_info_t *local, int hcr)
{
struct net_device *dev = local->dev;
int i;
HFA384X_OUTB(0x80, SANDISK_WLAN_ACTIVATION_OFF);
udelay(50);
for (i = 0; i < 10; i++) {
HFA384X_OUTB(hcr, SANDISK_HCR_OFF);
}
udelay(55);
HFA384X_OUTB(0x45, SANDISK_WLAN_ACTIVATION_OFF);
}
static int sandisk_enable_wireless(struct net_device *dev)
{
int res, ret = 0;
struct hostap_interface *iface = netdev_priv(dev);
local_info_t *local = iface->local;
struct hostap_cs_priv *hw_priv = local->hw_priv;
if (resource_size(hw_priv->link->resource[0]) < 0x42) {
/* Not enough ports to be SanDisk multi-function card */
ret = -ENODEV;
goto done;
}
if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) {
/* No SanDisk manfid found */
ret = -ENODEV;
goto done;
}
if (hw_priv->link->socket->functions < 2) {
/* No multi-function links found */
ret = -ENODEV;
goto done;
}
printk(KERN_DEBUG "%s: Multi-function SanDisk ConnectPlus detected"
" - using vendor-specific initialization\n", dev->name);
hw_priv->sandisk_connectplus = 1;
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
dev->name, res);
goto done;
}
mdelay(5);
/*
* Do not enable interrupts here to avoid some bogus events. Interrupts
* will be enabled during the first cor_sreset call.
*/
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
(COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE |
COR_FUNC_ENA));
if (res != 0) {
printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
dev->name, res);
goto done;
}
mdelay(5);
sandisk_set_iobase(local);
HFA384X_OUTB(0xc5, SANDISK_WLAN_ACTIVATION_OFF);
udelay(10);
HFA384X_OUTB(0x4b, SANDISK_WLAN_ACTIVATION_OFF);
udelay(10);
done:
return ret;
}
static void prism2_pccard_cor_sreset(local_info_t *local)
{
int res;
u8 val;
struct hostap_cs_priv *hw_priv = local->hw_priv;
if (!prism2_pccard_card_present(local))
return;
res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n",
res);
return;
}
printk(KERN_DEBUG "prism2_pccard_cor_sreset: original COR %02x\n",
val);
val |= COR_SOFT_RESET;
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n",
res);
return;
}
mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
val &= ~COR_SOFT_RESET;
if (hw_priv->sandisk_connectplus)
val |= COR_IREQ_ENA;
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n",
res);
return;
}
mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
if (hw_priv->sandisk_connectplus)
sandisk_set_iobase(local);
}
static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
{
int res;
u8 old_cor;
struct hostap_cs_priv *hw_priv = local->hw_priv;
if (!prism2_pccard_card_present(local))
return;
if (hw_priv->sandisk_connectplus) {
sandisk_write_hcr(local, hcr);
return;
}
res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &old_cor);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
"(%d)\n", res);
return;
}
printk(KERN_DEBUG "prism2_pccard_genesis_sreset: original COR %02x\n",
old_cor);
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
old_cor | COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
"(%d)\n", res);
return;
}
mdelay(10);
/* Setup Genesis mode */
res = pcmcia_write_config_byte(hw_priv->link, CISREG_CCSR, hcr);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
"(%d)\n", res);
return;
}
mdelay(10);
res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR,
old_cor & ~COR_SOFT_RESET);
if (res != 0) {
printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
"(%d)\n", res);
return;
}
mdelay(10);
}
static struct prism2_helper_functions prism2_pccard_funcs =
{
.card_present = prism2_pccard_card_present,
.cor_sreset = prism2_pccard_cor_sreset,
.genesis_reset = prism2_pccard_genesis_reset,
.hw_type = HOSTAP_HW_PCCARD,
};
/* allocate local data and register with CardServices
* initialize dev_link structure, but do not configure the card yet */
static int hostap_cs_probe(struct pcmcia_device *p_dev)
{
int ret;
PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info);
ret = prism2_config(p_dev);
if (ret) {
PDEBUG(DEBUG_EXTRA, "prism2_config() failed\n");
}
return ret;
}
static void prism2_detach(struct pcmcia_device *link)
{
PDEBUG(DEBUG_FLOW, "prism2_detach\n");
prism2_release((u_long)link);
/* release net devices */
if (link->priv) {
struct hostap_cs_priv *hw_priv;
struct net_device *dev;
struct hostap_interface *iface;
dev = link->priv;
iface = netdev_priv(dev);
hw_priv = iface->local->hw_priv;
prism2_free_local_data(dev);
kfree(hw_priv);
}
}
static int prism2_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
if (p_dev->config_index == 0)
return -EINVAL;
return pcmcia_request_io(p_dev);
}
static int prism2_config(struct pcmcia_device *link)
{
struct net_device *dev;
struct hostap_interface *iface;
local_info_t *local;
int ret = 1;
struct hostap_cs_priv *hw_priv;
unsigned long flags;
PDEBUG(DEBUG_FLOW, "prism2_config()\n");
hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL);
if (hw_priv == NULL) {
ret = -ENOMEM;
goto failed;
}
/* Look for an appropriate configuration table entry in the CIS */
link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO |
CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
if (ignore_cis_vcc)
link->config_flags &= ~CONF_AUTO_CHECK_VCC;
ret = pcmcia_loop_config(link, prism2_config_check, NULL);
if (ret) {
if (!ignore_cis_vcc)
printk(KERN_ERR "GetNextTuple(): No matching "
"CIS configuration. Maybe you need the "
"ignore_cis_vcc=1 parameter.\n");
goto failed;
}
/* Need to allocate net_device before requesting IRQ handler */
dev = prism2_init_local_data(&prism2_pccard_funcs, 0,
&link->dev);
if (dev == NULL)
goto failed;
link->priv = dev;
iface = netdev_priv(dev);
local = iface->local;
local->hw_priv = hw_priv;
hw_priv->link = link;
/*
* We enable IRQ here, but IRQ handler will not proceed
* until dev->base_addr is set below. This protect us from
* receive interrupts when driver is not initialized.
*/
ret = pcmcia_request_irq(link, prism2_interrupt);
if (ret)
goto failed;
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
spin_lock_irqsave(&local->irq_init_lock, flags);
dev->irq = link->irq;
dev->base_addr = link->resource[0]->start;
spin_unlock_irqrestore(&local->irq_init_lock, flags);
local->shutdown = 0;
sandisk_enable_wireless(dev);
ret = prism2_hw_config(dev, 1);
if (!ret)
ret = hostap_hw_ready(dev);
return ret;
failed:
kfree(hw_priv);
prism2_release((u_long)link);
return ret;
}
static void prism2_release(u_long arg)
{
struct pcmcia_device *link = (struct pcmcia_device *)arg;
PDEBUG(DEBUG_FLOW, "prism2_release\n");
if (link->priv) {
struct net_device *dev = link->priv;
struct hostap_interface *iface;
iface = netdev_priv(dev);
prism2_hw_shutdown(dev, 0);
iface->local->shutdown = 1;
}
pcmcia_disable_device(link);
PDEBUG(DEBUG_FLOW, "release - done\n");
}
static int hostap_cs_suspend(struct pcmcia_device *link)
{
struct net_device *dev = (struct net_device *) link->priv;
int dev_open = 0;
struct hostap_interface *iface = NULL;
if (!dev)
return -ENODEV;
iface = netdev_priv(dev);
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
if (iface && iface->local)
dev_open = iface->local->num_dev_open > 0;
if (dev_open) {
netif_stop_queue(dev);
netif_device_detach(dev);
}
prism2_suspend(dev);
return 0;
}
static int hostap_cs_resume(struct pcmcia_device *link)
{
struct net_device *dev = (struct net_device *) link->priv;
int dev_open = 0;
struct hostap_interface *iface = NULL;
if (!dev)
return -ENODEV;
iface = netdev_priv(dev);
PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
if (iface && iface->local)
dev_open = iface->local->num_dev_open > 0;
prism2_hw_shutdown(dev, 1);
prism2_hw_config(dev, dev_open ? 0 : 1);
if (dev_open) {
netif_device_attach(dev);
netif_start_queue(dev);
}
return 0;
}
static const struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100),
PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000),
PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x3301),
PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b),
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612),
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613),
PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0x02d2, 0x0001),
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001),
PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
/* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */
PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
PCMCIA_DEVICE_MANF_CARD(0x0126, 0x0002),
PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0xd601, 0x0005, "ADLINK 345 CF",
0x2d858104),
PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL",
0x74c5e40d),
PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil",
0x4b801a17),
PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_PROD_ID123(
"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
0xe6ec52ce, 0x08649af2, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
"D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
"Instant Wireless ", " Network PC CARD", "Version 01.02",
0x11d901af, 0x6e9bd926, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
"SMC", "SMC2632W", "Version 01.02",
0xc4f8b18b, 0x474a1f2a, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G",
0x2decece3, 0x82067c18),
PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card",
0x54f7c49c, 0x15a75e5b),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE",
0x74c5e40d, 0xdb472a18),
PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card",
0x0733cc81, 0x0c52f395),
PCMCIA_DEVICE_PROD_ID12(
"ZoomAir 11Mbps High", "Rate wireless Networking",
0x273fe3db, 0x32a1eaee),
PCMCIA_DEVICE_PROD_ID123(
"Pretec", "CompactWLAN Card 802.11b", "2.5",
0x1cadd3e5, 0xe697636c, 0x7a5bfcf1),
PCMCIA_DEVICE_PROD_ID123(
"U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02",
0xc7b8df9d, 0x1700d087, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
"Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio",
"Ver. 1.00",
0x5cd01705, 0x4271660f, 0x9d08ee12),
PCMCIA_DEVICE_PROD_ID123(
"Wireless LAN" , "11Mbps PC Card", "Version 01.02",
0x4b8870ff, 0x70e946d1, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
static struct pcmcia_driver hostap_driver = {
.name = "hostap_cs",
.probe = hostap_cs_probe,
.remove = prism2_detach,
.owner = THIS_MODULE,
.id_table = hostap_cs_ids,
.suspend = hostap_cs_suspend,
.resume = hostap_cs_resume,
};
static int __init init_prism2_pccard(void)
{
return pcmcia_register_driver(&hostap_driver);
}
static void __exit exit_prism2_pccard(void)
{
pcmcia_unregister_driver(&hostap_driver);
}
module_init(init_prism2_pccard);
module_exit(exit_prism2_pccard);
| gpl-2.0 |
Tesla-Redux-Devices/android_kernel_samsung_trlte | drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c | 2269 | 2736 | /*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <subdev/fb.h>
struct nv36_fb_priv {
struct nouveau_fb base;
};
static void
nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
struct nouveau_fb_tile *tile)
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
#ifdef __BIG_ENDIAN
tile->zcomp |= 0x80000000;
#endif
}
}
static int
nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv36_fb_priv *priv;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.memtype_valid = nv04_fb_memtype_valid;
priv->base.ram.init = nv20_fb_vram_init;
priv->base.tile.regions = 8;
priv->base.tile.init = nv30_fb_tile_init;
priv->base.tile.comp = nv36_fb_tile_comp;
priv->base.tile.fini = nv20_fb_tile_fini;
priv->base.tile.prog = nv20_fb_tile_prog;
return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
nv36_fb_oclass = {
.handle = NV_SUBDEV(FB, 0x36),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv36_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv30_fb_init,
.fini = _nouveau_fb_fini,
},
};
| gpl-2.0 |
gsstudios/Dorimanx-SG2-I9100-Kernel | kernel/gcov/gcc_3_4.c | 2269 | 14111 | /*
* This code provides functions to handle gcc's profiling data format
* introduced with gcc 3.4. Future versions of gcc may change the gcov
* format (as happened before), so all format-specific information needs
* to be kept modular and easily exchangeable.
*
* This file is based on gcc-internal definitions. Functions and data
* structures are defined to be compatible with gcc counterparts.
* For a better understanding, refer to gcc source: gcc/gcov-io.h.
*
* Copyright IBM Corp. 2009
* Author(s): Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
*
* Uses gcc-internal data definitions.
*/
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include "gcov.h"
#define GCOV_COUNTERS 5
static struct gcov_info *gcov_info_head;
/**
* struct gcov_fn_info - profiling meta data per function
* @ident: object file-unique function identifier
* @checksum: function checksum
* @n_ctrs: number of values per counter type belonging to this function
*
* This data is generated by gcc during compilation and doesn't change
* at run-time.
*/
struct gcov_fn_info {
unsigned int ident;
unsigned int checksum;
unsigned int n_ctrs[0];
};
/**
* struct gcov_ctr_info - profiling data per counter type
* @num: number of counter values for this type
* @values: array of counter values for this type
* @merge: merge function for counter values of this type (unused)
*
* This data is generated by gcc during compilation and doesn't change
* at run-time with the exception of the values array.
*/
struct gcov_ctr_info {
unsigned int num;
gcov_type *values;
void (*merge)(gcov_type *, unsigned int);
};
/**
* struct gcov_info - profiling data per object file
* @version: gcov version magic indicating the gcc version used for compilation
* @next: list head for a singly-linked list
* @stamp: time stamp
* @filename: name of the associated gcov data file
* @n_functions: number of instrumented functions
* @functions: function data
* @ctr_mask: mask specifying which counter types are active
* @counts: counter data per counter type
*
* This data is generated by gcc during compilation and doesn't change
* at run-time with the exception of the next pointer.
*/
struct gcov_info {
unsigned int version;
struct gcov_info *next;
unsigned int stamp;
const char *filename;
unsigned int n_functions;
const struct gcov_fn_info *functions;
unsigned int ctr_mask;
struct gcov_ctr_info counts[0];
};
/**
* gcov_info_filename - return info filename
* @info: profiling data set
*/
const char *gcov_info_filename(struct gcov_info *info)
{
return info->filename;
}
/**
* gcov_info_version - return info version
* @info: profiling data set
*/
unsigned int gcov_info_version(struct gcov_info *info)
{
return info->version;
}
/**
* gcov_info_next - return next profiling data set
* @info: profiling data set
*
* Returns next gcov_info following @info or first gcov_info in the chain if
* @info is %NULL.
*/
struct gcov_info *gcov_info_next(struct gcov_info *info)
{
if (!info)
return gcov_info_head;
return info->next;
}
/**
* gcov_info_link - link/add profiling data set to the list
* @info: profiling data set
*/
void gcov_info_link(struct gcov_info *info)
{
info->next = gcov_info_head;
gcov_info_head = info;
}
/**
* gcov_info_unlink - unlink/remove profiling data set from the list
* @prev: previous profiling data set
* @info: profiling data set
*/
void gcov_info_unlink(struct gcov_info *prev, struct gcov_info *info)
{
if (prev)
prev->next = info->next;
else
gcov_info_head = info->next;
}
/* Symbolic links to be created for each profiling data file. */
const struct gcov_link gcov_link[] = {
{ OBJ_TREE, "gcno" }, /* Link to .gcno file in $(objtree). */
{ 0, NULL},
};
/*
* Determine whether a counter is active. Based on gcc magic. Doesn't change
* at run-time.
*/
static int counter_active(struct gcov_info *info, unsigned int type)
{
return (1 << type) & info->ctr_mask;
}
/* Determine number of active counters. Based on gcc magic. */
static unsigned int num_counter_active(struct gcov_info *info)
{
unsigned int i;
unsigned int result = 0;
for (i = 0; i < GCOV_COUNTERS; i++) {
if (counter_active(info, i))
result++;
}
return result;
}
/**
* gcov_info_reset - reset profiling data to zero
* @info: profiling data set
*/
void gcov_info_reset(struct gcov_info *info)
{
unsigned int active = num_counter_active(info);
unsigned int i;
for (i = 0; i < active; i++) {
memset(info->counts[i].values, 0,
info->counts[i].num * sizeof(gcov_type));
}
}
/**
* gcov_info_is_compatible - check if profiling data can be added
* @info1: first profiling data set
* @info2: second profiling data set
*
* Returns non-zero if profiling data can be added, zero otherwise.
*/
int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
{
return (info1->stamp == info2->stamp);
}
/**
* gcov_info_add - add up profiling data
* @dest: profiling data set to which data is added
* @source: profiling data set which is added
*
* Adds profiling counts of @source to @dest.
*/
void gcov_info_add(struct gcov_info *dest, struct gcov_info *source)
{
unsigned int i;
unsigned int j;
for (i = 0; i < num_counter_active(dest); i++) {
for (j = 0; j < dest->counts[i].num; j++) {
dest->counts[i].values[j] +=
source->counts[i].values[j];
}
}
}
/* Get size of function info entry. Based on gcc magic. */
static size_t get_fn_size(struct gcov_info *info)
{
size_t size;
size = sizeof(struct gcov_fn_info) + num_counter_active(info) *
sizeof(unsigned int);
if (__alignof__(struct gcov_fn_info) > sizeof(unsigned int))
size = ALIGN(size, __alignof__(struct gcov_fn_info));
return size;
}
/* Get address of function info entry. Based on gcc magic. */
static struct gcov_fn_info *get_fn_info(struct gcov_info *info, unsigned int fn)
{
return (struct gcov_fn_info *)
((char *) info->functions + fn * get_fn_size(info));
}
/**
* gcov_info_dup - duplicate profiling data set
* @info: profiling data set to duplicate
*
* Return newly allocated duplicate on success, %NULL on error.
*/
struct gcov_info *gcov_info_dup(struct gcov_info *info)
{
struct gcov_info *dup;
unsigned int i;
unsigned int active;
/* Duplicate gcov_info. */
active = num_counter_active(info);
dup = kzalloc(sizeof(struct gcov_info) +
sizeof(struct gcov_ctr_info) * active, GFP_KERNEL);
if (!dup)
return NULL;
dup->version = info->version;
dup->stamp = info->stamp;
dup->n_functions = info->n_functions;
dup->ctr_mask = info->ctr_mask;
/* Duplicate filename. */
dup->filename = kstrdup(info->filename, GFP_KERNEL);
if (!dup->filename)
goto err_free;
/* Duplicate table of functions. */
dup->functions = kmemdup(info->functions, info->n_functions *
get_fn_size(info), GFP_KERNEL);
if (!dup->functions)
goto err_free;
/* Duplicate counter arrays. */
for (i = 0; i < active ; i++) {
struct gcov_ctr_info *ctr = &info->counts[i];
size_t size = ctr->num * sizeof(gcov_type);
dup->counts[i].num = ctr->num;
dup->counts[i].merge = ctr->merge;
dup->counts[i].values = vmalloc(size);
if (!dup->counts[i].values)
goto err_free;
memcpy(dup->counts[i].values, ctr->values, size);
}
return dup;
err_free:
gcov_info_free(dup);
return NULL;
}
/**
* gcov_info_free - release memory for profiling data set duplicate
* @info: profiling data set duplicate to free
*/
void gcov_info_free(struct gcov_info *info)
{
unsigned int active = num_counter_active(info);
unsigned int i;
for (i = 0; i < active ; i++)
vfree(info->counts[i].values);
kfree(info->functions);
kfree(info->filename);
kfree(info);
}
/**
* struct type_info - iterator helper array
* @ctr_type: counter type
* @offset: index of the first value of the current function for this type
*
* This array is needed to convert the in-memory data format into the in-file
* data format:
*
* In-memory:
* for each counter type
* for each function
* values
*
* In-file:
* for each function
* for each counter type
* values
*
* See gcc source gcc/gcov-io.h for more information on data organization.
*/
struct type_info {
int ctr_type;
unsigned int offset;
};
/**
* struct gcov_iterator - specifies current file position in logical records
* @info: associated profiling data
* @record: record type
* @function: function number
* @type: counter type
* @count: index into values array
* @num_types: number of counter types
* @type_info: helper array to get values-array offset for current function
*/
struct gcov_iterator {
struct gcov_info *info;
int record;
unsigned int function;
unsigned int type;
unsigned int count;
int num_types;
struct type_info type_info[0];
};
static struct gcov_fn_info *get_func(struct gcov_iterator *iter)
{
return get_fn_info(iter->info, iter->function);
}
static struct type_info *get_type(struct gcov_iterator *iter)
{
return &iter->type_info[iter->type];
}
/**
* gcov_iter_new - allocate and initialize profiling data iterator
* @info: profiling data set to be iterated
*
* Return file iterator on success, %NULL otherwise.
*/
struct gcov_iterator *gcov_iter_new(struct gcov_info *info)
{
struct gcov_iterator *iter;
iter = kzalloc(sizeof(struct gcov_iterator) +
num_counter_active(info) * sizeof(struct type_info),
GFP_KERNEL);
if (iter)
iter->info = info;
return iter;
}
/**
* gcov_iter_free - release memory for iterator
* @iter: file iterator to free
*/
void gcov_iter_free(struct gcov_iterator *iter)
{
kfree(iter);
}
/**
* gcov_iter_get_info - return profiling data set for given file iterator
* @iter: file iterator
*/
struct gcov_info *gcov_iter_get_info(struct gcov_iterator *iter)
{
return iter->info;
}
/**
* gcov_iter_start - reset file iterator to starting position
* @iter: file iterator
*/
void gcov_iter_start(struct gcov_iterator *iter)
{
int i;
iter->record = 0;
iter->function = 0;
iter->type = 0;
iter->count = 0;
iter->num_types = 0;
for (i = 0; i < GCOV_COUNTERS; i++) {
if (counter_active(iter->info, i)) {
iter->type_info[iter->num_types].ctr_type = i;
iter->type_info[iter->num_types++].offset = 0;
}
}
}
/* Mapping of logical record number to actual file content. */
#define RECORD_FILE_MAGIC 0
#define RECORD_GCOV_VERSION 1
#define RECORD_TIME_STAMP 2
#define RECORD_FUNCTION_TAG 3
#define RECORD_FUNCTON_TAG_LEN 4
#define RECORD_FUNCTION_IDENT 5
#define RECORD_FUNCTION_CHECK 6
#define RECORD_COUNT_TAG 7
#define RECORD_COUNT_LEN 8
#define RECORD_COUNT 9
/**
* gcov_iter_next - advance file iterator to next logical record
* @iter: file iterator
*
* Return zero if new position is valid, non-zero if iterator has reached end.
*/
int gcov_iter_next(struct gcov_iterator *iter)
{
switch (iter->record) {
case RECORD_FILE_MAGIC:
case RECORD_GCOV_VERSION:
case RECORD_FUNCTION_TAG:
case RECORD_FUNCTON_TAG_LEN:
case RECORD_FUNCTION_IDENT:
case RECORD_COUNT_TAG:
/* Advance to next record */
iter->record++;
break;
case RECORD_COUNT:
/* Advance to next count */
iter->count++;
/* fall through */
case RECORD_COUNT_LEN:
if (iter->count < get_func(iter)->n_ctrs[iter->type]) {
iter->record = 9;
break;
}
/* Advance to next counter type */
get_type(iter)->offset += iter->count;
iter->count = 0;
iter->type++;
/* fall through */
case RECORD_FUNCTION_CHECK:
if (iter->type < iter->num_types) {
iter->record = 7;
break;
}
/* Advance to next function */
iter->type = 0;
iter->function++;
/* fall through */
case RECORD_TIME_STAMP:
if (iter->function < iter->info->n_functions)
iter->record = 3;
else
iter->record = -1;
break;
}
/* Check for EOF. */
if (iter->record == -1)
return -EINVAL;
else
return 0;
}
/**
* seq_write_gcov_u32 - write 32 bit number in gcov format to seq_file
* @seq: seq_file handle
* @v: value to be stored
*
* Number format defined by gcc: numbers are recorded in the 32 bit
* unsigned binary form of the endianness of the machine generating the
* file.
*/
static int seq_write_gcov_u32(struct seq_file *seq, u32 v)
{
return seq_write(seq, &v, sizeof(v));
}
/**
* seq_write_gcov_u64 - write 64 bit number in gcov format to seq_file
* @seq: seq_file handle
* @v: value to be stored
*
* Number format defined by gcc: numbers are recorded in the 32 bit
* unsigned binary form of the endianness of the machine generating the
* file. 64 bit numbers are stored as two 32 bit numbers, the low part
* first.
*/
static int seq_write_gcov_u64(struct seq_file *seq, u64 v)
{
u32 data[2];
data[0] = (v & 0xffffffffUL);
data[1] = (v >> 32);
return seq_write(seq, data, sizeof(data));
}
/**
* gcov_iter_write - write data for current pos to seq_file
* @iter: file iterator
* @seq: seq_file handle
*
* Return zero on success, non-zero otherwise.
*/
int gcov_iter_write(struct gcov_iterator *iter, struct seq_file *seq)
{
int rc = -EINVAL;
switch (iter->record) {
case RECORD_FILE_MAGIC:
rc = seq_write_gcov_u32(seq, GCOV_DATA_MAGIC);
break;
case RECORD_GCOV_VERSION:
rc = seq_write_gcov_u32(seq, iter->info->version);
break;
case RECORD_TIME_STAMP:
rc = seq_write_gcov_u32(seq, iter->info->stamp);
break;
case RECORD_FUNCTION_TAG:
rc = seq_write_gcov_u32(seq, GCOV_TAG_FUNCTION);
break;
case RECORD_FUNCTON_TAG_LEN:
rc = seq_write_gcov_u32(seq, 2);
break;
case RECORD_FUNCTION_IDENT:
rc = seq_write_gcov_u32(seq, get_func(iter)->ident);
break;
case RECORD_FUNCTION_CHECK:
rc = seq_write_gcov_u32(seq, get_func(iter)->checksum);
break;
case RECORD_COUNT_TAG:
rc = seq_write_gcov_u32(seq,
GCOV_TAG_FOR_COUNTER(get_type(iter)->ctr_type));
break;
case RECORD_COUNT_LEN:
rc = seq_write_gcov_u32(seq,
get_func(iter)->n_ctrs[iter->type] * 2);
break;
case RECORD_COUNT:
rc = seq_write_gcov_u64(seq,
iter->info->counts[iter->type].
values[iter->count + get_type(iter)->offset]);
break;
}
return rc;
}
| gpl-2.0 |
M8s-dev/kernel_htc_msm8939 | drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c | 2269 | 2287 | /*
* Copyright (C) 2010 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <subdev/fb.h>
struct nv4e_fb_priv {
struct nouveau_fb base;
};
static int
nv4e_fb_vram_init(struct nouveau_fb *pfb)
{
pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
pfb->ram.type = NV_MEM_TYPE_STOLEN;
return 0;
}
static int
nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nv4e_fb_priv *priv;
int ret;
ret = nouveau_fb_create(parent, engine, oclass, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
priv->base.memtype_valid = nv04_fb_memtype_valid;
priv->base.ram.init = nv4e_fb_vram_init;
priv->base.tile.regions = 12;
priv->base.tile.init = nv46_fb_tile_init;
priv->base.tile.fini = nv20_fb_tile_fini;
priv->base.tile.prog = nv44_fb_tile_prog;
return nouveau_fb_preinit(&priv->base);
}
struct nouveau_oclass
nv4e_fb_oclass = {
.handle = NV_SUBDEV(FB, 0x4e),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv4e_fb_ctor,
.dtor = _nouveau_fb_dtor,
.init = nv44_fb_init,
.fini = _nouveau_fb_fini,
},
};
| gpl-2.0 |
cargabsj175/android_kernel_alcatel_4012a | drivers/acpi/apei/ghes.c | 2525 | 17171 | /*
* APEI Generic Hardware Error Source support
*
* Generic Hardware Error Source provides a way to report platform
* hardware errors (such as that from chipset). It works in so called
* "Firmware First" mode, that is, hardware errors are reported to
* firmware firstly, then reported to Linux by firmware. This way,
* some non-standard hardware error registers or non-standard hardware
* link can be checked by firmware to produce more hardware error
* information for Linux.
*
* For more information about Generic Hardware Error Source, please
* refer to ACPI Specification version 4.0, section 17.3.2.6
*
* Copyright 2010 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
#include <linux/kdebug.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
#include <asm/mce.h>
#include <asm/tlbflush.h>
#include "apei-internal.h"
#define GHES_PFX "GHES: "
#define GHES_ESTATUS_MAX_SIZE 65536
/*
* One struct ghes is created for each generic hardware error source.
* It provides the context for APEI hardware error timer/IRQ/SCI/NMI
* handler.
*
* estatus: memory buffer for error status block, allocated during
* HEST parsing.
*/
#define GHES_TO_CLEAR 0x0001
#define GHES_EXITING 0x0002
struct ghes {
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
u64 buffer_paddr;
unsigned long flags;
union {
struct list_head list;
struct timer_list timer;
unsigned int irq;
};
};
static int ghes_panic_timeout __read_mostly = 30;
/*
* All error sources notified with SCI shares one notifier function,
* so they need to be linked and checked one by one. This is applied
* to NMI too.
*
* RCU is used for these lists, so ghes_list_mutex is only used for
* list changing, not for traversing.
*/
static LIST_HEAD(ghes_sci);
static LIST_HEAD(ghes_nmi);
static DEFINE_MUTEX(ghes_list_mutex);
/*
* NMI may be triggered on any CPU, so ghes_nmi_lock is used for
* mutual exclusion.
*/
static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
/*
* Because the memory area used to transfer hardware error information
* from BIOS to Linux can be determined only in NMI, IRQ or timer
* handler, but general ioremap can not be used in atomic context, so
* a special version of atomic ioremap is implemented for that.
*/
/*
* Two virtual pages are used, one for NMI context, the other for
* IRQ/PROCESS context
*/
#define GHES_IOREMAP_PAGES 2
#define GHES_IOREMAP_NMI_PAGE(base) (base)
#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE)
/* virtual memory area for atomic ioremap */
static struct vm_struct *ghes_ioremap_area;
/*
* These 2 spinlock is used to prevent atomic ioremap virtual memory
* area from being mapped simultaneously.
*/
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
static int ghes_ioremap_init(void)
{
ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
VM_IOREMAP, VMALLOC_START, VMALLOC_END);
if (!ghes_ioremap_area) {
pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
return -ENOMEM;
}
return 0;
}
static void ghes_ioremap_exit(void)
{
free_vm_area(ghes_ioremap_area);
}
static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
{
unsigned long vaddr;
vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
pfn << PAGE_SHIFT, PAGE_KERNEL);
return (void __iomem *)vaddr;
}
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
{
unsigned long vaddr;
vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
pfn << PAGE_SHIFT, PAGE_KERNEL);
return (void __iomem *)vaddr;
}
static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
{
unsigned long vaddr = (unsigned long __force)vaddr_ptr;
void *base = ghes_ioremap_area->addr;
BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
__flush_tlb_one(vaddr);
}
static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
{
unsigned long vaddr = (unsigned long __force)vaddr_ptr;
void *base = ghes_ioremap_area->addr;
BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
__flush_tlb_one(vaddr);
}
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
unsigned int error_block_length;
int rc;
ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
rc = acpi_pre_map_gar(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
pr_warning(FW_WARN GHES_PFX
"Error status block length is too long: %u for "
"generic hardware error source: %d.\n",
error_block_length, generic->header.source_id);
error_block_length = GHES_ESTATUS_MAX_SIZE;
}
ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
if (!ghes->estatus) {
rc = -ENOMEM;
goto err_unmap;
}
return ghes;
err_unmap:
acpi_post_unmap_gar(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
}
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
acpi_post_unmap_gar(&ghes->generic->error_status_address);
}
enum {
GHES_SEV_NO = 0x0,
GHES_SEV_CORRECTED = 0x1,
GHES_SEV_RECOVERABLE = 0x2,
GHES_SEV_PANIC = 0x3,
};
static inline int ghes_severity(int severity)
{
switch (severity) {
case CPER_SEV_INFORMATIONAL:
return GHES_SEV_NO;
case CPER_SEV_CORRECTED:
return GHES_SEV_CORRECTED;
case CPER_SEV_RECOVERABLE:
return GHES_SEV_RECOVERABLE;
case CPER_SEV_FATAL:
return GHES_SEV_PANIC;
default:
/* Unknown, go panic */
return GHES_SEV_PANIC;
}
}
static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
int from_phys)
{
void __iomem *vaddr;
unsigned long flags = 0;
int in_nmi = in_nmi();
u64 offset;
u32 trunk;
while (len > 0) {
offset = paddr - (paddr & PAGE_MASK);
if (in_nmi) {
raw_spin_lock(&ghes_ioremap_lock_nmi);
vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
} else {
spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
}
trunk = PAGE_SIZE - offset;
trunk = min(trunk, len);
if (from_phys)
memcpy_fromio(buffer, vaddr + offset, trunk);
else
memcpy_toio(vaddr + offset, buffer, trunk);
len -= trunk;
paddr += trunk;
buffer += trunk;
if (in_nmi) {
ghes_iounmap_nmi(vaddr);
raw_spin_unlock(&ghes_ioremap_lock_nmi);
} else {
ghes_iounmap_irq(vaddr);
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
}
}
}
static int ghes_read_estatus(struct ghes *ghes, int silent)
{
struct acpi_hest_generic *g = ghes->generic;
u64 buf_paddr;
u32 len;
int rc;
rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
if (rc) {
if (!silent && printk_ratelimit())
pr_warning(FW_WARN GHES_PFX
"Failed to read error status block address for hardware error source: %d.\n",
g->header.source_id);
return -EIO;
}
if (!buf_paddr)
return -ENOENT;
ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
sizeof(*ghes->estatus), 1);
if (!ghes->estatus->block_status)
return -ENOENT;
ghes->buffer_paddr = buf_paddr;
ghes->flags |= GHES_TO_CLEAR;
rc = -EIO;
len = apei_estatus_len(ghes->estatus);
if (len < sizeof(*ghes->estatus))
goto err_read_block;
if (len > ghes->generic->error_block_length)
goto err_read_block;
if (apei_estatus_check_header(ghes->estatus))
goto err_read_block;
ghes_copy_tofrom_phys(ghes->estatus + 1,
buf_paddr + sizeof(*ghes->estatus),
len - sizeof(*ghes->estatus), 1);
if (apei_estatus_check(ghes->estatus))
goto err_read_block;
rc = 0;
err_read_block:
if (rc && !silent && printk_ratelimit())
pr_warning(FW_WARN GHES_PFX
"Failed to read error status block!\n");
return rc;
}
static void ghes_clear_estatus(struct ghes *ghes)
{
ghes->estatus->block_status = 0;
if (!(ghes->flags & GHES_TO_CLEAR))
return;
ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
sizeof(ghes->estatus->block_status), 0);
ghes->flags &= ~GHES_TO_CLEAR;
}
static void ghes_do_proc(struct ghes *ghes)
{
int sev, processed = 0;
struct acpi_hest_generic_data *gdata;
sev = ghes_severity(ghes->estatus->error_severity);
apei_estatus_for_each_section(ghes->estatus, gdata) {
#ifdef CONFIG_X86_MCE
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
apei_mce_report_mem_error(
sev == GHES_SEV_CORRECTED,
(struct cper_sec_mem_err *)(gdata+1));
processed = 1;
}
#endif
}
}
static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
{
/* Not more than 2 messages every 5 seconds */
static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
if (pfx == NULL) {
if (ghes_severity(ghes->estatus->error_severity) <=
GHES_SEV_CORRECTED)
pfx = KERN_WARNING HW_ERR;
else
pfx = KERN_ERR HW_ERR;
}
if (__ratelimit(&ratelimit)) {
printk(
"%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
pfx, ghes->generic->header.source_id);
apei_estatus_print(pfx, ghes->estatus);
}
}
static int ghes_proc(struct ghes *ghes)
{
int rc;
rc = ghes_read_estatus(ghes, 0);
if (rc)
goto out;
ghes_print_estatus(NULL, ghes);
ghes_do_proc(ghes);
out:
ghes_clear_estatus(ghes);
return 0;
}
static void ghes_add_timer(struct ghes *ghes)
{
struct acpi_hest_generic *g = ghes->generic;
unsigned long expire;
if (!g->notify.poll_interval) {
pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
g->header.source_id);
return;
}
expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
ghes->timer.expires = round_jiffies_relative(expire);
add_timer(&ghes->timer);
}
static void ghes_poll_func(unsigned long data)
{
struct ghes *ghes = (void *)data;
ghes_proc(ghes);
if (!(ghes->flags & GHES_EXITING))
ghes_add_timer(ghes);
}
static irqreturn_t ghes_irq_func(int irq, void *data)
{
struct ghes *ghes = data;
int rc;
rc = ghes_proc(ghes);
if (rc)
return IRQ_NONE;
return IRQ_HANDLED;
}
static int ghes_notify_sci(struct notifier_block *this,
unsigned long event, void *data)
{
struct ghes *ghes;
int ret = NOTIFY_DONE;
rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_sci, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
rcu_read_unlock();
return ret;
}
static int ghes_notify_nmi(struct notifier_block *this,
unsigned long cmd, void *data)
{
struct ghes *ghes, *ghes_global = NULL;
int sev, sev_global = -1;
int ret = NOTIFY_DONE;
if (cmd != DIE_NMI)
return ret;
raw_spin_lock(&ghes_nmi_lock);
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
if (ghes_read_estatus(ghes, 1)) {
ghes_clear_estatus(ghes);
continue;
}
sev = ghes_severity(ghes->estatus->error_severity);
if (sev > sev_global) {
sev_global = sev;
ghes_global = ghes;
}
ret = NOTIFY_STOP;
}
if (ret == NOTIFY_DONE)
goto out;
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
/* reboot to log the error! */
if (panic_timeout == 0)
panic_timeout = ghes_panic_timeout;
panic("Fatal hardware error!");
}
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
/* Do not print estatus because printk is not NMI safe */
ghes_do_proc(ghes);
ghes_clear_estatus(ghes);
}
out:
raw_spin_unlock(&ghes_nmi_lock);
return ret;
}
static struct notifier_block ghes_notifier_sci = {
.notifier_call = ghes_notify_sci,
};
static struct notifier_block ghes_notifier_nmi = {
.notifier_call = ghes_notify_nmi,
};
static int __devinit ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
int rc = -EINVAL;
generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
if (!generic->enabled)
return -ENODEV;
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
case ACPI_HEST_NOTIFY_EXTERNAL:
case ACPI_HEST_NOTIFY_SCI:
case ACPI_HEST_NOTIFY_NMI:
break;
case ACPI_HEST_NOTIFY_LOCAL:
pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
generic->header.source_id);
goto err;
default:
pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
generic->notify.type, generic->header.source_id);
goto err;
}
rc = -EIO;
if (generic->error_block_length <
sizeof(struct acpi_hest_generic_status)) {
pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
generic->error_block_length,
generic->header.source_id);
goto err;
}
ghes = ghes_new(generic);
if (IS_ERR(ghes)) {
rc = PTR_ERR(ghes);
ghes = NULL;
goto err;
}
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
ghes->timer.function = ghes_poll_func;
ghes->timer.data = (unsigned long)ghes;
init_timer_deferrable(&ghes->timer);
ghes_add_timer(ghes);
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
/* External interrupt vector is GSI */
if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) {
pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
generic->header.source_id);
goto err;
}
if (request_irq(ghes->irq, ghes_irq_func,
0, "GHES IRQ", ghes)) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
goto err;
}
break;
case ACPI_HEST_NOTIFY_SCI:
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_sci))
register_acpi_hed_notifier(&ghes_notifier_sci);
list_add_rcu(&ghes->list, &ghes_sci);
mutex_unlock(&ghes_list_mutex);
break;
case ACPI_HEST_NOTIFY_NMI:
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi))
register_die_notifier(&ghes_notifier_nmi);
list_add_rcu(&ghes->list, &ghes_nmi);
mutex_unlock(&ghes_list_mutex);
break;
default:
BUG();
}
platform_set_drvdata(ghes_dev, ghes);
return 0;
err:
if (ghes) {
ghes_fini(ghes);
kfree(ghes);
}
return rc;
}
static int __devexit ghes_remove(struct platform_device *ghes_dev)
{
struct ghes *ghes;
struct acpi_hest_generic *generic;
ghes = platform_get_drvdata(ghes_dev);
generic = ghes->generic;
ghes->flags |= GHES_EXITING;
switch (generic->notify.type) {
case ACPI_HEST_NOTIFY_POLLED:
del_timer_sync(&ghes->timer);
break;
case ACPI_HEST_NOTIFY_EXTERNAL:
free_irq(ghes->irq, ghes);
break;
case ACPI_HEST_NOTIFY_SCI:
mutex_lock(&ghes_list_mutex);
list_del_rcu(&ghes->list);
if (list_empty(&ghes_sci))
unregister_acpi_hed_notifier(&ghes_notifier_sci);
mutex_unlock(&ghes_list_mutex);
break;
case ACPI_HEST_NOTIFY_NMI:
mutex_lock(&ghes_list_mutex);
list_del_rcu(&ghes->list);
if (list_empty(&ghes_nmi))
unregister_die_notifier(&ghes_notifier_nmi);
mutex_unlock(&ghes_list_mutex);
/*
* To synchronize with NMI handler, ghes can only be
* freed after NMI handler finishes.
*/
synchronize_rcu();
break;
default:
BUG();
break;
}
ghes_fini(ghes);
kfree(ghes);
platform_set_drvdata(ghes_dev, NULL);
return 0;
}
static struct platform_driver ghes_platform_driver = {
.driver = {
.name = "GHES",
.owner = THIS_MODULE,
},
.probe = ghes_probe,
.remove = ghes_remove,
};
static int __init ghes_init(void)
{
int rc;
if (acpi_disabled)
return -ENODEV;
if (hest_disable) {
pr_info(GHES_PFX "HEST is not enabled!\n");
return -EINVAL;
}
rc = ghes_ioremap_init();
if (rc)
goto err;
rc = platform_driver_register(&ghes_platform_driver);
if (rc)
goto err_ioremap_exit;
return 0;
err_ioremap_exit:
ghes_ioremap_exit();
err:
return rc;
}
static void __exit ghes_exit(void)
{
platform_driver_unregister(&ghes_platform_driver);
ghes_ioremap_exit();
}
module_init(ghes_init);
module_exit(ghes_exit);
MODULE_AUTHOR("Huang Ying");
MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:GHES");
| gpl-2.0 |
Perferom/android_kernel_samsung_msm | drivers/ide/pmac.c | 2781 | 46824 | /*
* Support for IDE interfaces on PowerMacs.
*
* These IDE interfaces are memory-mapped and have a DBDMA channel
* for doing DMA.
*
* Copyright (C) 1998-2003 Paul Mackerras & Ben. Herrenschmidt
* Copyright (C) 2007-2008 Bartlomiej Zolnierkiewicz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Some code taken from drivers/ide/ide-dma.c:
*
* Copyright (c) 1995-1998 Mark Lord
*
* TODO: - Use pre-calculated (kauai) timing tables all the time and
* get rid of the "rounded" tables used previously, so we have the
* same table format for all controllers and can then just have one
* big table
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/pci.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/ide.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/sections.h>
#include <asm/irq.h>
#include <asm/mediabay.h>
#define DRV_NAME "ide-pmac"
#undef IDE_PMAC_DEBUG
#define DMA_WAIT_TIMEOUT 50
typedef struct pmac_ide_hwif {
unsigned long regbase;
int irq;
int kind;
int aapl_bus_id;
unsigned broken_dma : 1;
unsigned broken_dma_warn : 1;
struct device_node* node;
struct macio_dev *mdev;
u32 timings[4];
volatile u32 __iomem * *kauai_fcr;
ide_hwif_t *hwif;
/* Those fields are duplicating what is in hwif. We currently
* can't use the hwif ones because of some assumptions that are
* beeing done by the generic code about the kind of dma controller
* and format of the dma table. This will have to be fixed though.
*/
volatile struct dbdma_regs __iomem * dma_regs;
struct dbdma_cmd* dma_table_cpu;
} pmac_ide_hwif_t;
enum {
controller_ohare, /* OHare based */
controller_heathrow, /* Heathrow/Paddington */
controller_kl_ata3, /* KeyLargo ATA-3 */
controller_kl_ata4, /* KeyLargo ATA-4 */
controller_un_ata6, /* UniNorth2 ATA-6 */
controller_k2_ata6, /* K2 ATA-6 */
controller_sh_ata6, /* Shasta ATA-6 */
};
static const char* model_name[] = {
"OHare ATA", /* OHare based */
"Heathrow ATA", /* Heathrow/Paddington */
"KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
"KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
"UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
"K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
"Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
};
/*
* Extra registers, both 32-bit little-endian
*/
#define IDE_TIMING_CONFIG 0x200
#define IDE_INTERRUPT 0x300
/* Kauai (U2) ATA has different register setup */
#define IDE_KAUAI_PIO_CONFIG 0x200
#define IDE_KAUAI_ULTRA_CONFIG 0x210
#define IDE_KAUAI_POLL_CONFIG 0x220
/*
* Timing configuration register definitions
*/
/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
#define IDE_SYSCLK_NS 30 /* 33Mhz cell */
#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
/* 133Mhz cell, found in shasta.
* See comments about 100 Mhz Uninorth 2...
* Note that PIO_MASK and MDMA_MASK seem to overlap
*/
#define TR_133_PIOREG_PIO_MASK 0xff000fff
#define TR_133_PIOREG_MDMA_MASK 0x00fff800
#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
#define TR_133_UDMAREG_UDMA_EN 0x00000001
/* 100Mhz cell, found in Uninorth 2. I don't have much infos about
* this one yet, it appears as a pci device (106b/0033) on uninorth
* internal PCI bus and it's clock is controlled like gem or fw. It
* appears to be an evolution of keylargo ATA4 with a timing register
* extended to 2 32bits registers and a similar DBDMA channel. Other
* registers seem to exist but I can't tell much about them.
*
* So far, I'm using pre-calculated tables for this extracted from
* the values used by the MacOS X driver.
*
* The "PIO" register controls PIO and MDMA timings, the "ULTRA"
* register controls the UDMA timings. At least, it seems bit 0
* of this one enables UDMA vs. MDMA, and bits 4..7 are the
* cycle time in units of 10ns. Bits 8..15 are used by I don't
* know their meaning yet
*/
#define TR_100_PIOREG_PIO_MASK 0xff000fff
#define TR_100_PIOREG_MDMA_MASK 0x00fff000
#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
#define TR_100_UDMAREG_UDMA_EN 0x00000001
/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
* 40 connector cable and to 4 on 80 connector one.
* Clock unit is 15ns (66Mhz)
*
* 3 Values can be programmed:
* - Write data setup, which appears to match the cycle time. They
* also call it DIOW setup.
* - Ready to pause time (from spec)
* - Address setup. That one is weird. I don't see where exactly
* it fits in UDMA cycles, I got it's name from an obscure piece
* of commented out code in Darwin. They leave it to 0, we do as
* well, despite a comment that would lead to think it has a
* min value of 45ns.
* Apple also add 60ns to the write data setup (or cycle time ?) on
* reads.
*/
#define TR_66_UDMA_MASK 0xfff00000
#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
#define TR_66_UDMA_ADDRSETUP_MASK 0xe0000000 /* Address setup */
#define TR_66_UDMA_ADDRSETUP_SHIFT 29
#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
#define TR_66_UDMA_RDY2PAUS_SHIFT 25
#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
#define TR_66_UDMA_WRDATASETUP_SHIFT 21
#define TR_66_MDMA_MASK 0x000ffc00
#define TR_66_MDMA_RECOVERY_MASK 0x000f8000
#define TR_66_MDMA_RECOVERY_SHIFT 15
#define TR_66_MDMA_ACCESS_MASK 0x00007c00
#define TR_66_MDMA_ACCESS_SHIFT 10
#define TR_66_PIO_MASK 0x000003ff
#define TR_66_PIO_RECOVERY_MASK 0x000003e0
#define TR_66_PIO_RECOVERY_SHIFT 5
#define TR_66_PIO_ACCESS_MASK 0x0000001f
#define TR_66_PIO_ACCESS_SHIFT 0
/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
* Can do pio & mdma modes, clock unit is 30ns (33Mhz)
*
* The access time and recovery time can be programmed. Some older
* Darwin code base limit OHare to 150ns cycle time. I decided to do
* the same here fore safety against broken old hardware ;)
* The HalfTick bit, when set, adds half a clock (15ns) to the access
* time and removes one from recovery. It's not supported on KeyLargo
* implementation afaik. The E bit appears to be set for PIO mode 0 and
* is used to reach long timings used in this mode.
*/
#define TR_33_MDMA_MASK 0x003ff800
#define TR_33_MDMA_RECOVERY_MASK 0x001f0000
#define TR_33_MDMA_RECOVERY_SHIFT 16
#define TR_33_MDMA_ACCESS_MASK 0x0000f800
#define TR_33_MDMA_ACCESS_SHIFT 11
#define TR_33_MDMA_HALFTICK 0x00200000
#define TR_33_PIO_MASK 0x000007ff
#define TR_33_PIO_E 0x00000400
#define TR_33_PIO_RECOVERY_MASK 0x000003e0
#define TR_33_PIO_RECOVERY_SHIFT 5
#define TR_33_PIO_ACCESS_MASK 0x0000001f
#define TR_33_PIO_ACCESS_SHIFT 0
/*
* Interrupt register definitions
*/
#define IDE_INTR_DMA 0x80000000
#define IDE_INTR_DEVICE 0x40000000
/*
* FCR Register on Kauai. Not sure what bit 0x4 is ...
*/
#define KAUAI_FCR_UATA_MAGIC 0x00000004
#define KAUAI_FCR_UATA_RESET_N 0x00000002
#define KAUAI_FCR_UATA_ENABLE 0x00000001
/* Rounded Multiword DMA timings
*
* I gave up finding a generic formula for all controller
* types and instead, built tables based on timing values
* used by Apple in Darwin's implementation.
*/
struct mdma_timings_t {
int accessTime;
int recoveryTime;
int cycleTime;
};
struct mdma_timings_t mdma_timings_33[] =
{
{ 240, 240, 480 },
{ 180, 180, 360 },
{ 135, 135, 270 },
{ 120, 120, 240 },
{ 105, 105, 210 },
{ 90, 90, 180 },
{ 75, 75, 150 },
{ 75, 45, 120 },
{ 0, 0, 0 }
};
struct mdma_timings_t mdma_timings_33k[] =
{
{ 240, 240, 480 },
{ 180, 180, 360 },
{ 150, 150, 300 },
{ 120, 120, 240 },
{ 90, 120, 210 },
{ 90, 90, 180 },
{ 90, 60, 150 },
{ 90, 30, 120 },
{ 0, 0, 0 }
};
struct mdma_timings_t mdma_timings_66[] =
{
{ 240, 240, 480 },
{ 180, 180, 360 },
{ 135, 135, 270 },
{ 120, 120, 240 },
{ 105, 105, 210 },
{ 90, 90, 180 },
{ 90, 75, 165 },
{ 75, 45, 120 },
{ 0, 0, 0 }
};
/* KeyLargo ATA-4 Ultra DMA timings (rounded) */
struct {
int addrSetup; /* ??? */
int rdy2pause;
int wrDataSetup;
} kl66_udma_timings[] =
{
{ 0, 180, 120 }, /* Mode 0 */
{ 0, 150, 90 }, /* 1 */
{ 0, 120, 60 }, /* 2 */
{ 0, 90, 45 }, /* 3 */
{ 0, 90, 30 } /* 4 */
};
/* UniNorth 2 ATA/100 timings */
struct kauai_timing {
int cycle_time;
u32 timing_reg;
};
static struct kauai_timing kauai_pio_timings[] =
{
{ 930 , 0x08000fff },
{ 600 , 0x08000a92 },
{ 383 , 0x0800060f },
{ 360 , 0x08000492 },
{ 330 , 0x0800048f },
{ 300 , 0x080003cf },
{ 270 , 0x080003cc },
{ 240 , 0x0800038b },
{ 239 , 0x0800030c },
{ 180 , 0x05000249 },
{ 120 , 0x04000148 },
{ 0 , 0 },
};
static struct kauai_timing kauai_mdma_timings[] =
{
{ 1260 , 0x00fff000 },
{ 480 , 0x00618000 },
{ 360 , 0x00492000 },
{ 270 , 0x0038e000 },
{ 240 , 0x0030c000 },
{ 210 , 0x002cb000 },
{ 180 , 0x00249000 },
{ 150 , 0x00209000 },
{ 120 , 0x00148000 },
{ 0 , 0 },
};
static struct kauai_timing kauai_udma_timings[] =
{
{ 120 , 0x000070c0 },
{ 90 , 0x00005d80 },
{ 60 , 0x00004a60 },
{ 45 , 0x00003a50 },
{ 30 , 0x00002a30 },
{ 20 , 0x00002921 },
{ 0 , 0 },
};
static struct kauai_timing shasta_pio_timings[] =
{
{ 930 , 0x08000fff },
{ 600 , 0x0A000c97 },
{ 383 , 0x07000712 },
{ 360 , 0x040003cd },
{ 330 , 0x040003cd },
{ 300 , 0x040003cd },
{ 270 , 0x040003cd },
{ 240 , 0x040003cd },
{ 239 , 0x040003cd },
{ 180 , 0x0400028b },
{ 120 , 0x0400010a },
{ 0 , 0 },
};
static struct kauai_timing shasta_mdma_timings[] =
{
{ 1260 , 0x00fff000 },
{ 480 , 0x00820800 },
{ 360 , 0x00820800 },
{ 270 , 0x00820800 },
{ 240 , 0x00820800 },
{ 210 , 0x00820800 },
{ 180 , 0x00820800 },
{ 150 , 0x0028b000 },
{ 120 , 0x001ca000 },
{ 0 , 0 },
};
static struct kauai_timing shasta_udma133_timings[] =
{
{ 120 , 0x00035901, },
{ 90 , 0x000348b1, },
{ 60 , 0x00033881, },
{ 45 , 0x00033861, },
{ 30 , 0x00033841, },
{ 20 , 0x00033031, },
{ 15 , 0x00033021, },
{ 0 , 0 },
};
static inline u32
kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
{
int i;
for (i=0; table[i].cycle_time; i++)
if (cycle_time > table[i+1].cycle_time)
return table[i].timing_reg;
BUG();
return 0;
}
/* allow up to 256 DBDMA commands per xfer */
#define MAX_DCMDS 256
/*
* Wait 1s for disk to answer on IDE bus after a hard reset
* of the device (via GPIO/FCR).
*
* Some devices seem to "pollute" the bus even after dropping
* the BSY bit (typically some combo drives slave on the UDMA
* bus) after a hard reset. Since we hard reset all drives on
* KeyLargo ATA66, we have to keep that delay around. I may end
* up not hard resetting anymore on these and keep the delay only
* for older interfaces instead (we have to reset when coming
* from MacOS...) --BenH.
*/
#define IDE_WAKEUP_DELAY (1*HZ)
static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
#define PMAC_IDE_REG(x) \
((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
/*
* Apply the timings of the proper unit (master/slave) to the shared
* timing register when selecting that unit. This version is for
* ASICs with a single timing register
*/
static void pmac_ide_apply_timings(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
if (drive->dn & 1)
writel(pmif->timings[1], PMAC_IDE_REG(IDE_TIMING_CONFIG));
else
writel(pmif->timings[0], PMAC_IDE_REG(IDE_TIMING_CONFIG));
(void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
}
/*
* Apply the timings of the proper unit (master/slave) to the shared
* timing register when selecting that unit. This version is for
* ASICs with a dual timing register (Kauai)
*/
static void pmac_ide_kauai_apply_timings(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
if (drive->dn & 1) {
writel(pmif->timings[1], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
writel(pmif->timings[3], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
} else {
writel(pmif->timings[0], PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
writel(pmif->timings[2], PMAC_IDE_REG(IDE_KAUAI_ULTRA_CONFIG));
}
(void)readl(PMAC_IDE_REG(IDE_KAUAI_PIO_CONFIG));
}
/*
* Force an update of controller timing values for a given drive
*/
static void
pmac_ide_do_update_timings(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
if (pmif->kind == controller_sh_ata6 ||
pmif->kind == controller_un_ata6 ||
pmif->kind == controller_k2_ata6)
pmac_ide_kauai_apply_timings(drive);
else
pmac_ide_apply_timings(drive);
}
static void pmac_dev_select(ide_drive_t *drive)
{
pmac_ide_apply_timings(drive);
writeb(drive->select | ATA_DEVICE_OBS,
(void __iomem *)drive->hwif->io_ports.device_addr);
}
static void pmac_kauai_dev_select(ide_drive_t *drive)
{
pmac_ide_kauai_apply_timings(drive);
writeb(drive->select | ATA_DEVICE_OBS,
(void __iomem *)drive->hwif->io_ports.device_addr);
}
static void pmac_exec_command(ide_hwif_t *hwif, u8 cmd)
{
writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
(void)readl((void __iomem *)(hwif->io_ports.data_addr
+ IDE_TIMING_CONFIG));
}
static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
{
writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
(void)readl((void __iomem *)(hwif->io_ports.data_addr
+ IDE_TIMING_CONFIG));
}
/*
* Old tuning functions (called on hdparm -p), sets up drive PIO timings
*/
static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
u32 *timings, t;
unsigned accessTicks, recTicks;
unsigned accessTime, recTime;
unsigned int cycle_time;
/* which drive is it ? */
timings = &pmif->timings[drive->dn & 1];
t = *timings;
cycle_time = ide_pio_cycle_time(drive, pio);
switch (pmif->kind) {
case controller_sh_ata6: {
/* 133Mhz cell */
u32 tr = kauai_lookup_timing(shasta_pio_timings, cycle_time);
t = (t & ~TR_133_PIOREG_PIO_MASK) | tr;
break;
}
case controller_un_ata6:
case controller_k2_ata6: {
/* 100Mhz cell */
u32 tr = kauai_lookup_timing(kauai_pio_timings, cycle_time);
t = (t & ~TR_100_PIOREG_PIO_MASK) | tr;
break;
}
case controller_kl_ata4:
/* 66Mhz cell */
recTime = cycle_time - tim->active - tim->setup;
recTime = max(recTime, 150U);
accessTime = tim->active;
accessTime = max(accessTime, 150U);
accessTicks = SYSCLK_TICKS_66(accessTime);
accessTicks = min(accessTicks, 0x1fU);
recTicks = SYSCLK_TICKS_66(recTime);
recTicks = min(recTicks, 0x1fU);
t = (t & ~TR_66_PIO_MASK) |
(accessTicks << TR_66_PIO_ACCESS_SHIFT) |
(recTicks << TR_66_PIO_RECOVERY_SHIFT);
break;
default: {
/* 33Mhz cell */
int ebit = 0;
recTime = cycle_time - tim->active - tim->setup;
recTime = max(recTime, 150U);
accessTime = tim->active;
accessTime = max(accessTime, 150U);
accessTicks = SYSCLK_TICKS(accessTime);
accessTicks = min(accessTicks, 0x1fU);
accessTicks = max(accessTicks, 4U);
recTicks = SYSCLK_TICKS(recTime);
recTicks = min(recTicks, 0x1fU);
recTicks = max(recTicks, 5U) - 4;
if (recTicks > 9) {
recTicks--; /* guess, but it's only for PIO0, so... */
ebit = 1;
}
t = (t & ~TR_33_PIO_MASK) |
(accessTicks << TR_33_PIO_ACCESS_SHIFT) |
(recTicks << TR_33_PIO_RECOVERY_SHIFT);
if (ebit)
t |= TR_33_PIO_E;
break;
}
}
#ifdef IDE_PMAC_DEBUG
printk(KERN_ERR "%s: Set PIO timing for mode %d, reg: 0x%08x\n",
drive->name, pio, *timings);
#endif
*timings = t;
pmac_ide_do_update_timings(drive);
}
/*
* Calculate KeyLargo ATA/66 UDMA timings
*/
static int
set_timings_udma_ata4(u32 *timings, u8 speed)
{
unsigned rdyToPauseTicks, wrDataSetupTicks, addrTicks;
if (speed > XFER_UDMA_4)
return 1;
rdyToPauseTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].rdy2pause);
wrDataSetupTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].wrDataSetup);
addrTicks = SYSCLK_TICKS_66(kl66_udma_timings[speed & 0xf].addrSetup);
*timings = ((*timings) & ~(TR_66_UDMA_MASK | TR_66_MDMA_MASK)) |
(wrDataSetupTicks << TR_66_UDMA_WRDATASETUP_SHIFT) |
(rdyToPauseTicks << TR_66_UDMA_RDY2PAUS_SHIFT) |
(addrTicks <<TR_66_UDMA_ADDRSETUP_SHIFT) |
TR_66_UDMA_EN;
#ifdef IDE_PMAC_DEBUG
printk(KERN_ERR "ide_pmac: Set UDMA timing for mode %d, reg: 0x%08x\n",
speed & 0xf, *timings);
#endif
return 0;
}
/*
* Calculate Kauai ATA/100 UDMA timings
*/
static int
set_timings_udma_ata6(u32 *pio_timings, u32 *ultra_timings, u8 speed)
{
struct ide_timing *t = ide_timing_find_mode(speed);
u32 tr;
if (speed > XFER_UDMA_5 || t == NULL)
return 1;
tr = kauai_lookup_timing(kauai_udma_timings, (int)t->udma);
*ultra_timings = ((*ultra_timings) & ~TR_100_UDMAREG_UDMA_MASK) | tr;
*ultra_timings = (*ultra_timings) | TR_100_UDMAREG_UDMA_EN;
return 0;
}
/*
* Calculate Shasta ATA/133 UDMA timings
*/
static int
set_timings_udma_shasta(u32 *pio_timings, u32 *ultra_timings, u8 speed)
{
struct ide_timing *t = ide_timing_find_mode(speed);
u32 tr;
if (speed > XFER_UDMA_6 || t == NULL)
return 1;
tr = kauai_lookup_timing(shasta_udma133_timings, (int)t->udma);
*ultra_timings = ((*ultra_timings) & ~TR_133_UDMAREG_UDMA_MASK) | tr;
*ultra_timings = (*ultra_timings) | TR_133_UDMAREG_UDMA_EN;
return 0;
}
/*
* Calculate MDMA timings for all cells
*/
static void
set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
u8 speed)
{
u16 *id = drive->id;
int cycleTime, accessTime = 0, recTime = 0;
unsigned accessTicks, recTicks;
struct mdma_timings_t* tm = NULL;
int i;
/* Get default cycle time for mode */
switch(speed & 0xf) {
case 0: cycleTime = 480; break;
case 1: cycleTime = 150; break;
case 2: cycleTime = 120; break;
default:
BUG();
break;
}
/* Check if drive provides explicit DMA cycle time */
if ((id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_DMA_TIME])
cycleTime = max_t(int, id[ATA_ID_EIDE_DMA_TIME], cycleTime);
/* OHare limits according to some old Apple sources */
if ((intf_type == controller_ohare) && (cycleTime < 150))
cycleTime = 150;
/* Get the proper timing array for this controller */
switch(intf_type) {
case controller_sh_ata6:
case controller_un_ata6:
case controller_k2_ata6:
break;
case controller_kl_ata4:
tm = mdma_timings_66;
break;
case controller_kl_ata3:
tm = mdma_timings_33k;
break;
default:
tm = mdma_timings_33;
break;
}
if (tm != NULL) {
/* Lookup matching access & recovery times */
i = -1;
for (;;) {
if (tm[i+1].cycleTime < cycleTime)
break;
i++;
}
cycleTime = tm[i].cycleTime;
accessTime = tm[i].accessTime;
recTime = tm[i].recoveryTime;
#ifdef IDE_PMAC_DEBUG
printk(KERN_ERR "%s: MDMA, cycleTime: %d, accessTime: %d, recTime: %d\n",
drive->name, cycleTime, accessTime, recTime);
#endif
}
switch(intf_type) {
case controller_sh_ata6: {
/* 133Mhz cell */
u32 tr = kauai_lookup_timing(shasta_mdma_timings, cycleTime);
*timings = ((*timings) & ~TR_133_PIOREG_MDMA_MASK) | tr;
*timings2 = (*timings2) & ~TR_133_UDMAREG_UDMA_EN;
}
case controller_un_ata6:
case controller_k2_ata6: {
/* 100Mhz cell */
u32 tr = kauai_lookup_timing(kauai_mdma_timings, cycleTime);
*timings = ((*timings) & ~TR_100_PIOREG_MDMA_MASK) | tr;
*timings2 = (*timings2) & ~TR_100_UDMAREG_UDMA_EN;
}
break;
case controller_kl_ata4:
/* 66Mhz cell */
accessTicks = SYSCLK_TICKS_66(accessTime);
accessTicks = min(accessTicks, 0x1fU);
accessTicks = max(accessTicks, 0x1U);
recTicks = SYSCLK_TICKS_66(recTime);
recTicks = min(recTicks, 0x1fU);
recTicks = max(recTicks, 0x3U);
/* Clear out mdma bits and disable udma */
*timings = ((*timings) & ~(TR_66_MDMA_MASK | TR_66_UDMA_MASK)) |
(accessTicks << TR_66_MDMA_ACCESS_SHIFT) |
(recTicks << TR_66_MDMA_RECOVERY_SHIFT);
break;
case controller_kl_ata3:
/* 33Mhz cell on KeyLargo */
accessTicks = SYSCLK_TICKS(accessTime);
accessTicks = max(accessTicks, 1U);
accessTicks = min(accessTicks, 0x1fU);
accessTime = accessTicks * IDE_SYSCLK_NS;
recTicks = SYSCLK_TICKS(recTime);
recTicks = max(recTicks, 1U);
recTicks = min(recTicks, 0x1fU);
*timings = ((*timings) & ~TR_33_MDMA_MASK) |
(accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
(recTicks << TR_33_MDMA_RECOVERY_SHIFT);
break;
default: {
/* 33Mhz cell on others */
int halfTick = 0;
int origAccessTime = accessTime;
int origRecTime = recTime;
accessTicks = SYSCLK_TICKS(accessTime);
accessTicks = max(accessTicks, 1U);
accessTicks = min(accessTicks, 0x1fU);
accessTime = accessTicks * IDE_SYSCLK_NS;
recTicks = SYSCLK_TICKS(recTime);
recTicks = max(recTicks, 2U) - 1;
recTicks = min(recTicks, 0x1fU);
recTime = (recTicks + 1) * IDE_SYSCLK_NS;
if ((accessTicks > 1) &&
((accessTime - IDE_SYSCLK_NS/2) >= origAccessTime) &&
((recTime - IDE_SYSCLK_NS/2) >= origRecTime)) {
halfTick = 1;
accessTicks--;
}
*timings = ((*timings) & ~TR_33_MDMA_MASK) |
(accessTicks << TR_33_MDMA_ACCESS_SHIFT) |
(recTicks << TR_33_MDMA_RECOVERY_SHIFT);
if (halfTick)
*timings |= TR_33_MDMA_HALFTICK;
}
}
#ifdef IDE_PMAC_DEBUG
printk(KERN_ERR "%s: Set MDMA timing for mode %d, reg: 0x%08x\n",
drive->name, speed & 0xf, *timings);
#endif
}
static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
int ret = 0;
u32 *timings, *timings2, tl[2];
u8 unit = drive->dn & 1;
const u8 speed = drive->dma_mode;
timings = &pmif->timings[unit];
timings2 = &pmif->timings[unit+2];
/* Copy timings to local image */
tl[0] = *timings;
tl[1] = *timings2;
if (speed >= XFER_UDMA_0) {
if (pmif->kind == controller_kl_ata4)
ret = set_timings_udma_ata4(&tl[0], speed);
else if (pmif->kind == controller_un_ata6
|| pmif->kind == controller_k2_ata6)
ret = set_timings_udma_ata6(&tl[0], &tl[1], speed);
else if (pmif->kind == controller_sh_ata6)
ret = set_timings_udma_shasta(&tl[0], &tl[1], speed);
else
ret = -1;
} else
set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
if (ret)
return;
/* Apply timings to controller */
*timings = tl[0];
*timings2 = tl[1];
pmac_ide_do_update_timings(drive);
}
/*
* Blast some well known "safe" values to the timing registers at init or
* wakeup from sleep time, before we do real calculation
*/
static void
sanitize_timings(pmac_ide_hwif_t *pmif)
{
unsigned int value, value2 = 0;
switch(pmif->kind) {
case controller_sh_ata6:
value = 0x0a820c97;
value2 = 0x00033031;
break;
case controller_un_ata6:
case controller_k2_ata6:
value = 0x08618a92;
value2 = 0x00002921;
break;
case controller_kl_ata4:
value = 0x0008438c;
break;
case controller_kl_ata3:
value = 0x00084526;
break;
case controller_heathrow:
case controller_ohare:
default:
value = 0x00074526;
break;
}
pmif->timings[0] = pmif->timings[1] = value;
pmif->timings[2] = pmif->timings[3] = value2;
}
static int on_media_bay(pmac_ide_hwif_t *pmif)
{
return pmif->mdev && pmif->mdev->media_bay != NULL;
}
/* Suspend call back, should be called after the child devices
* have actually been suspended
*/
static int pmac_ide_do_suspend(pmac_ide_hwif_t *pmif)
{
/* We clear the timings */
pmif->timings[0] = 0;
pmif->timings[1] = 0;
disable_irq(pmif->irq);
/* The media bay will handle itself just fine */
if (on_media_bay(pmif))
return 0;
/* Kauai has bus control FCRs directly here */
if (pmif->kauai_fcr) {
u32 fcr = readl(pmif->kauai_fcr);
fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
writel(fcr, pmif->kauai_fcr);
}
/* Disable the bus on older machines and the cell on kauai */
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id,
0);
return 0;
}
/* Resume call back, should be called before the child devices
* are resumed
*/
static int pmac_ide_do_resume(pmac_ide_hwif_t *pmif)
{
/* Hard reset & re-enable controller (do we really need to reset ? -BenH) */
if (!on_media_bay(pmif)) {
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, pmif->node, pmif->aapl_bus_id, 1);
msleep(10);
ppc_md.feature_call(PMAC_FTR_IDE_RESET, pmif->node, pmif->aapl_bus_id, 0);
/* Kauai has it different */
if (pmif->kauai_fcr) {
u32 fcr = readl(pmif->kauai_fcr);
fcr |= KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE;
writel(fcr, pmif->kauai_fcr);
}
msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
}
/* Sanitize drive timings */
sanitize_timings(pmif);
enable_irq(pmif->irq);
return 0;
}
static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct device_node *np = pmif->node;
const char *cable = of_get_property(np, "cable-type", NULL);
struct device_node *root = of_find_node_by_path("/");
const char *model = of_get_property(root, "model", NULL);
/* Get cable type from device-tree. */
if (cable && !strncmp(cable, "80-", 3)) {
/* Some drives fail to detect 80c cable in PowerBook */
/* These machine use proprietary short IDE cable anyway */
if (!strncmp(model, "PowerBook", 9))
return ATA_CBL_PATA40_SHORT;
else
return ATA_CBL_PATA80;
}
/*
* G5's seem to have incorrect cable type in device-tree.
* Let's assume they have a 80 conductor cable, this seem
* to be always the case unless the user mucked around.
*/
if (of_device_is_compatible(np, "K2-UATA") ||
of_device_is_compatible(np, "shasta-ata"))
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
static void pmac_ide_init_dev(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
if (on_media_bay(pmif)) {
if (check_media_bay(pmif->mdev->media_bay) == MB_CD) {
drive->dev_flags &= ~IDE_DFLAG_NOPROBE;
return;
}
drive->dev_flags |= IDE_DFLAG_NOPROBE;
}
}
static const struct ide_tp_ops pmac_tp_ops = {
.exec_command = pmac_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = pmac_write_devctl,
.dev_select = pmac_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = ide_input_data,
.output_data = ide_output_data,
};
static const struct ide_tp_ops pmac_ata6_tp_ops = {
.exec_command = pmac_exec_command,
.read_status = ide_read_status,
.read_altstatus = ide_read_altstatus,
.write_devctl = pmac_write_devctl,
.dev_select = pmac_kauai_dev_select,
.tf_load = ide_tf_load,
.tf_read = ide_tf_read,
.input_data = ide_input_data,
.output_data = ide_output_data,
};
static const struct ide_port_ops pmac_ide_ata4_port_ops = {
.init_dev = pmac_ide_init_dev,
.set_pio_mode = pmac_ide_set_pio_mode,
.set_dma_mode = pmac_ide_set_dma_mode,
.cable_detect = pmac_ide_cable_detect,
};
static const struct ide_port_ops pmac_ide_port_ops = {
.init_dev = pmac_ide_init_dev,
.set_pio_mode = pmac_ide_set_pio_mode,
.set_dma_mode = pmac_ide_set_dma_mode,
};
static const struct ide_dma_ops pmac_dma_ops;
static const struct ide_port_info pmac_port_info = {
.name = DRV_NAME,
.init_dma = pmac_ide_init_dma,
.chipset = ide_pmac,
.tp_ops = &pmac_tp_ops,
.port_ops = &pmac_ide_port_ops,
.dma_ops = &pmac_dma_ops,
.host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_MMIO |
IDE_HFLAG_UNMASK_IRQS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
};
/*
* Setup, register & probe an IDE channel driven by this driver, this is
* called by one of the 2 probe functions (macio or PCI).
*/
static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif,
struct ide_hw *hw)
{
struct device_node *np = pmif->node;
const int *bidp;
struct ide_host *host;
ide_hwif_t *hwif;
struct ide_hw *hws[] = { hw };
struct ide_port_info d = pmac_port_info;
int rc;
pmif->broken_dma = pmif->broken_dma_warn = 0;
if (of_device_is_compatible(np, "shasta-ata")) {
pmif->kind = controller_sh_ata6;
d.tp_ops = &pmac_ata6_tp_ops;
d.port_ops = &pmac_ide_ata4_port_ops;
d.udma_mask = ATA_UDMA6;
} else if (of_device_is_compatible(np, "kauai-ata")) {
pmif->kind = controller_un_ata6;
d.tp_ops = &pmac_ata6_tp_ops;
d.port_ops = &pmac_ide_ata4_port_ops;
d.udma_mask = ATA_UDMA5;
} else if (of_device_is_compatible(np, "K2-UATA")) {
pmif->kind = controller_k2_ata6;
d.tp_ops = &pmac_ata6_tp_ops;
d.port_ops = &pmac_ide_ata4_port_ops;
d.udma_mask = ATA_UDMA5;
} else if (of_device_is_compatible(np, "keylargo-ata")) {
if (strcmp(np->name, "ata-4") == 0) {
pmif->kind = controller_kl_ata4;
d.port_ops = &pmac_ide_ata4_port_ops;
d.udma_mask = ATA_UDMA4;
} else
pmif->kind = controller_kl_ata3;
} else if (of_device_is_compatible(np, "heathrow-ata")) {
pmif->kind = controller_heathrow;
} else {
pmif->kind = controller_ohare;
pmif->broken_dma = 1;
}
bidp = of_get_property(np, "AAPL,bus-id", NULL);
pmif->aapl_bus_id = bidp ? *bidp : 0;
/* On Kauai-type controllers, we make sure the FCR is correct */
if (pmif->kauai_fcr)
writel(KAUAI_FCR_UATA_MAGIC |
KAUAI_FCR_UATA_RESET_N |
KAUAI_FCR_UATA_ENABLE, pmif->kauai_fcr);
/* Make sure we have sane timings */
sanitize_timings(pmif);
/* If we are on a media bay, wait for it to settle and lock it */
if (pmif->mdev)
lock_media_bay(pmif->mdev->media_bay);
host = ide_host_alloc(&d, hws, 1);
if (host == NULL) {
rc = -ENOMEM;
goto bail;
}
hwif = pmif->hwif = host->ports[0];
if (on_media_bay(pmif)) {
/* Fixup bus ID for media bay */
if (!bidp)
pmif->aapl_bus_id = 1;
} else if (pmif->kind == controller_ohare) {
/* The code below is having trouble on some ohare machines
* (timing related ?). Until I can put my hand on one of these
* units, I keep the old way
*/
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, 0, 1);
} else {
/* This is necessary to enable IDE when net-booting */
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, np, pmif->aapl_bus_id, 1);
msleep(10);
ppc_md.feature_call(PMAC_FTR_IDE_RESET, np, pmif->aapl_bus_id, 0);
msleep(jiffies_to_msecs(IDE_WAKEUP_DELAY));
}
printk(KERN_INFO DRV_NAME ": Found Apple %s controller (%s), "
"bus ID %d%s, irq %d\n", model_name[pmif->kind],
pmif->mdev ? "macio" : "PCI", pmif->aapl_bus_id,
on_media_bay(pmif) ? " (mediabay)" : "", hw->irq);
rc = ide_host_register(host, &d, hws);
if (rc)
pmif->hwif = NULL;
if (pmif->mdev)
unlock_media_bay(pmif->mdev->media_bay);
bail:
if (rc && host)
ide_host_free(host);
return rc;
}
static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base)
{
int i;
for (i = 0; i < 8; ++i)
hw->io_ports_array[i] = base + i * 0x10;
hw->io_ports.ctl_addr = base + 0x160;
}
/*
* Attach to a macio probed interface
*/
static int __devinit
pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
void __iomem *base;
unsigned long regbase;
pmac_ide_hwif_t *pmif;
int irq, rc;
struct ide_hw hw;
pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
if (pmif == NULL)
return -ENOMEM;
if (macio_resource_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no address for %s\n",
mdev->ofdev.dev.of_node->full_name);
rc = -ENXIO;
goto out_free_pmif;
}
/* Request memory resource for IO ports */
if (macio_request_resource(mdev, 0, "ide-pmac (ports)")) {
printk(KERN_ERR "ide-pmac: can't request MMIO resource for "
"%s!\n", mdev->ofdev.dev.of_node->full_name);
rc = -EBUSY;
goto out_free_pmif;
}
/* XXX This is bogus. Should be fixed in the registry by checking
* the kind of host interrupt controller, a bit like gatwick
* fixes in irq.c. That works well enough for the single case
* where that happens though...
*/
if (macio_irq_count(mdev) == 0) {
printk(KERN_WARNING "ide-pmac: no intrs for device %s, using "
"13\n", mdev->ofdev.dev.of_node->full_name);
irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
base = ioremap(macio_resource_start(mdev, 0), 0x400);
regbase = (unsigned long) base;
pmif->mdev = mdev;
pmif->node = mdev->ofdev.dev.of_node;
pmif->regbase = regbase;
pmif->irq = irq;
pmif->kauai_fcr = NULL;
if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA "
"resource for %s!\n",
mdev->ofdev.dev.of_node->full_name);
else
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else
pmif->dma_regs = NULL;
dev_set_drvdata(&mdev->ofdev.dev, pmif);
memset(&hw, 0, sizeof(hw));
pmac_ide_init_ports(&hw, pmif->regbase);
hw.irq = irq;
hw.dev = &mdev->bus->pdev->dev;
hw.parent = &mdev->ofdev.dev;
rc = pmac_ide_setup_device(pmif, &hw);
if (rc != 0) {
/* The inteface is released to the common IDE layer */
dev_set_drvdata(&mdev->ofdev.dev, NULL);
iounmap(base);
if (pmif->dma_regs) {
iounmap(pmif->dma_regs);
macio_release_resource(mdev, 1);
}
macio_release_resource(mdev, 0);
kfree(pmif);
}
return rc;
out_free_pmif:
kfree(pmif);
return rc;
}
static int
pmac_ide_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
int rc = 0;
if (mesg.event != mdev->ofdev.dev.power.power_state.event
&& (mesg.event & PM_EVENT_SLEEP)) {
rc = pmac_ide_do_suspend(pmif);
if (rc == 0)
mdev->ofdev.dev.power.power_state = mesg;
}
return rc;
}
static int
pmac_ide_macio_resume(struct macio_dev *mdev)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
int rc = 0;
if (mdev->ofdev.dev.power.power_state.event != PM_EVENT_ON) {
rc = pmac_ide_do_resume(pmif);
if (rc == 0)
mdev->ofdev.dev.power.power_state = PMSG_ON;
}
return rc;
}
/*
* Attach to a PCI probed interface
*/
static int __devinit
pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device_node *np;
pmac_ide_hwif_t *pmif;
void __iomem *base;
unsigned long rbase, rlen;
int rc;
struct ide_hw hw;
np = pci_device_to_OF_node(pdev);
if (np == NULL) {
printk(KERN_ERR "ide-pmac: cannot find MacIO node for Kauai ATA interface\n");
return -ENODEV;
}
pmif = kzalloc(sizeof(*pmif), GFP_KERNEL);
if (pmif == NULL)
return -ENOMEM;
if (pci_enable_device(pdev)) {
printk(KERN_WARNING "ide-pmac: Can't enable PCI device for "
"%s\n", np->full_name);
rc = -ENXIO;
goto out_free_pmif;
}
pci_set_master(pdev);
if (pci_request_regions(pdev, "Kauai ATA")) {
printk(KERN_ERR "ide-pmac: Cannot obtain PCI resources for "
"%s\n", np->full_name);
rc = -ENXIO;
goto out_free_pmif;
}
pmif->mdev = NULL;
pmif->node = np;
rbase = pci_resource_start(pdev, 0);
rlen = pci_resource_len(pdev, 0);
base = ioremap(rbase, rlen);
pmif->regbase = (unsigned long) base + 0x2000;
pmif->dma_regs = base + 0x1000;
pmif->kauai_fcr = base;
pmif->irq = pdev->irq;
pci_set_drvdata(pdev, pmif);
memset(&hw, 0, sizeof(hw));
pmac_ide_init_ports(&hw, pmif->regbase);
hw.irq = pdev->irq;
hw.dev = &pdev->dev;
rc = pmac_ide_setup_device(pmif, &hw);
if (rc != 0) {
/* The inteface is released to the common IDE layer */
pci_set_drvdata(pdev, NULL);
iounmap(base);
pci_release_regions(pdev);
kfree(pmif);
}
return rc;
out_free_pmif:
kfree(pmif);
return rc;
}
static int
pmac_ide_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
int rc = 0;
if (mesg.event != pdev->dev.power.power_state.event
&& (mesg.event & PM_EVENT_SLEEP)) {
rc = pmac_ide_do_suspend(pmif);
if (rc == 0)
pdev->dev.power.power_state = mesg;
}
return rc;
}
static int
pmac_ide_pci_resume(struct pci_dev *pdev)
{
pmac_ide_hwif_t *pmif = pci_get_drvdata(pdev);
int rc = 0;
if (pdev->dev.power.power_state.event != PM_EVENT_ON) {
rc = pmac_ide_do_resume(pmif);
if (rc == 0)
pdev->dev.power.power_state = PMSG_ON;
}
return rc;
}
#ifdef CONFIG_PMAC_MEDIABAY
static void pmac_ide_macio_mb_event(struct macio_dev* mdev, int mb_state)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(&mdev->ofdev.dev);
switch(mb_state) {
case MB_CD:
if (!pmif->hwif->present)
ide_port_scan(pmif->hwif);
break;
default:
if (pmif->hwif->present)
ide_port_unregister_devices(pmif->hwif);
}
}
#endif /* CONFIG_PMAC_MEDIABAY */
static struct of_device_id pmac_ide_macio_match[] =
{
{
.name = "IDE",
},
{
.name = "ATA",
},
{
.type = "ide",
},
{
.type = "ata",
},
{},
};
static struct macio_driver pmac_ide_macio_driver =
{
.driver = {
.name = "ide-pmac",
.owner = THIS_MODULE,
.of_match_table = pmac_ide_macio_match,
},
.probe = pmac_ide_macio_attach,
.suspend = pmac_ide_macio_suspend,
.resume = pmac_ide_macio_resume,
#ifdef CONFIG_PMAC_MEDIABAY
.mediabay_event = pmac_ide_macio_mb_event,
#endif
};
static const struct pci_device_id pmac_ide_pci_match[] = {
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
{},
};
static struct pci_driver pmac_ide_pci_driver = {
.name = "ide-pmac",
.id_table = pmac_ide_pci_match,
.probe = pmac_ide_pci_attach,
.suspend = pmac_ide_pci_suspend,
.resume = pmac_ide_pci_resume,
};
MODULE_DEVICE_TABLE(pci, pmac_ide_pci_match);
int __init pmac_ide_probe(void)
{
int error;
if (!machine_is(powermac))
return -ENODEV;
#ifdef CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST
error = pci_register_driver(&pmac_ide_pci_driver);
if (error)
goto out;
error = macio_register_driver(&pmac_ide_macio_driver);
if (error) {
pci_unregister_driver(&pmac_ide_pci_driver);
goto out;
}
#else
error = macio_register_driver(&pmac_ide_macio_driver);
if (error)
goto out;
error = pci_register_driver(&pmac_ide_pci_driver);
if (error) {
macio_unregister_driver(&pmac_ide_macio_driver);
goto out;
}
#endif
out:
return error;
}
/*
* pmac_ide_build_dmatable builds the DBDMA command list
* for a transfer and sets the DBDMA channel to point to it.
*/
static int pmac_ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct dbdma_cmd *table;
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
struct scatterlist *sg;
int wr = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
int i = cmd->sg_nents, count = 0;
/* DMA table is already aligned */
table = (struct dbdma_cmd *) pmif->dma_table_cpu;
/* Make sure DMA controller is stopped (necessary ?) */
writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma->control);
while (readl(&dma->status) & RUN)
udelay(1);
/* Build DBDMA commands list */
sg = hwif->sg_table;
while (i && sg_dma_len(sg)) {
u32 cur_addr;
u32 cur_len;
cur_addr = sg_dma_address(sg);
cur_len = sg_dma_len(sg);
if (pmif->broken_dma && cur_addr & (L1_CACHE_BYTES - 1)) {
if (pmif->broken_dma_warn == 0) {
printk(KERN_WARNING "%s: DMA on non aligned address, "
"switching to PIO on Ohare chipset\n", drive->name);
pmif->broken_dma_warn = 1;
}
return 0;
}
while (cur_len) {
unsigned int tc = (cur_len < 0xfe00)? cur_len: 0xfe00;
if (count++ >= MAX_DCMDS) {
printk(KERN_WARNING "%s: DMA table too small\n",
drive->name);
return 0;
}
st_le16(&table->command, wr? OUTPUT_MORE: INPUT_MORE);
st_le16(&table->req_count, tc);
st_le32(&table->phy_addr, cur_addr);
table->cmd_dep = 0;
table->xfer_status = 0;
table->res_count = 0;
cur_addr += tc;
cur_len -= tc;
++table;
}
sg = sg_next(sg);
i--;
}
/* convert the last command to an input/output last command */
if (count) {
st_le16(&table[-1].command, wr? OUTPUT_LAST: INPUT_LAST);
/* add the stop command to the end of the list */
memset(table, 0, sizeof(struct dbdma_cmd));
st_le16(&table->command, DBDMA_STOP);
mb();
writel(hwif->dmatable_dma, &dma->cmdptr);
return 1;
}
printk(KERN_DEBUG "%s: empty DMA table?\n", drive->name);
return 0; /* revert to PIO for this request */
}
/*
* Prepare a DMA transfer. We build the DMA table, adjust the timings for
* a read on KeyLargo ATA/66 and mark us as waiting for DMA completion
*/
static int pmac_ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
u8 unit = drive->dn & 1, ata4 = (pmif->kind == controller_kl_ata4);
u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
if (pmac_ide_build_dmatable(drive, cmd) == 0)
return 1;
/* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
writel(pmif->timings[unit] + (write ? 0 : 0x00800000UL),
PMAC_IDE_REG(IDE_TIMING_CONFIG));
(void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
}
return 0;
}
/*
* Kick the DMA controller into life after the DMA command has been issued
* to the drive.
*/
static void
pmac_ide_dma_start(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma;
dma = pmif->dma_regs;
writel((RUN << 16) | RUN, &dma->control);
/* Make sure it gets to the controller right now */
(void)readl(&dma->control);
}
/*
* After a DMA transfer, make sure the controller is stopped
*/
static int
pmac_ide_dma_end (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
u32 dstat;
dstat = readl(&dma->status);
writel(((RUN|WAKE|DEAD) << 16), &dma->control);
/* verify good dma status. we don't check for ACTIVE beeing 0. We should...
* in theory, but with ATAPI decices doing buffer underruns, that would
* cause us to disable DMA, which isn't what we want
*/
return (dstat & (RUN|DEAD)) != RUN;
}
/*
* Check out that the interrupt we got was for us. We can't always know this
* for sure with those Apple interfaces (well, we could on the recent ones but
* that's not implemented yet), on the other hand, we don't have shared interrupts
* so it's not really a problem
*/
static int
pmac_ide_dma_test_irq (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
unsigned long status, timeout;
/* We have to things to deal with here:
*
* - The dbdma won't stop if the command was started
* but completed with an error without transferring all
* datas. This happens when bad blocks are met during
* a multi-block transfer.
*
* - The dbdma fifo hasn't yet finished flushing to
* to system memory when the disk interrupt occurs.
*
*/
/* If ACTIVE is cleared, the STOP command have passed and
* transfer is complete.
*/
status = readl(&dma->status);
if (!(status & ACTIVE))
return 1;
/* If dbdma didn't execute the STOP command yet, the
* active bit is still set. We consider that we aren't
* sharing interrupts (which is hopefully the case with
* those controllers) and so we just try to flush the
* channel for pending data in the fifo
*/
udelay(1);
writel((FLUSH << 16) | FLUSH, &dma->control);
timeout = 0;
for (;;) {
udelay(1);
status = readl(&dma->status);
if ((status & FLUSH) == 0)
break;
if (++timeout > 100) {
printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n",
hwif->index);
break;
}
}
return 1;
}
static void pmac_ide_dma_host_set(ide_drive_t *drive, int on)
{
}
static void
pmac_ide_dma_lost_irq (ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
unsigned long status = readl(&dma->status);
printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
}
static const struct ide_dma_ops pmac_dma_ops = {
.dma_host_set = pmac_ide_dma_host_set,
.dma_setup = pmac_ide_dma_setup,
.dma_start = pmac_ide_dma_start,
.dma_end = pmac_ide_dma_end,
.dma_test_irq = pmac_ide_dma_test_irq,
.dma_lost_irq = pmac_ide_dma_lost_irq,
};
/*
* Allocate the data structures needed for using DMA with an interface
* and fill the proper list of functions pointers
*/
static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
struct pci_dev *dev = to_pci_dev(hwif->dev);
/* We won't need pci_dev if we switch to generic consistent
* DMA routines ...
*/
if (dev == NULL || pmif->dma_regs == 0)
return -ENODEV;
/*
* Allocate space for the DBDMA commands.
* The +2 is +1 for the stop command and +1 to allow for
* aligning the start address to a multiple of 16 bytes.
*/
pmif->dma_table_cpu = pci_alloc_consistent(
dev,
(MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
&hwif->dmatable_dma);
if (pmif->dma_table_cpu == NULL) {
printk(KERN_ERR "%s: unable to allocate DMA command list\n",
hwif->name);
return -ENOMEM;
}
hwif->sg_max_nents = MAX_DCMDS;
return 0;
}
module_init(pmac_ide_probe);
MODULE_LICENSE("GPL");
| gpl-2.0 |
AzraelsKiss/android_kernel_samsung_smdk4412 | lib/uuid.c | 4317 | 1398 | /*
* Unified UUID/GUID definition
*
* Copyright (C) 2009, Intel Corp.
* Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uuid.h>
#include <linux/random.h>
static void __uuid_gen_common(__u8 b[16])
{
int i;
u32 r;
for (i = 0; i < 4; i++) {
r = random32();
memcpy(b + i * 4, &r, 4);
}
/* reversion 0b10 */
b[8] = (b[8] & 0x3F) | 0x80;
}
void uuid_le_gen(uuid_le *lu)
{
__uuid_gen_common(lu->b);
/* version 4 : random generation */
lu->b[7] = (lu->b[7] & 0x0F) | 0x40;
}
EXPORT_SYMBOL_GPL(uuid_le_gen);
void uuid_be_gen(uuid_be *bu)
{
__uuid_gen_common(bu->b);
/* version 4 : random generation */
bu->b[6] = (bu->b[6] & 0x0F) | 0x40;
}
EXPORT_SYMBOL_GPL(uuid_be_gen);
| gpl-2.0 |
crystalfontz/linux-2.6 | arch/mips/mipssim/sim_console.c | 9437 | 1218 | /*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
* Copyright (C) 2007 MIPS Technologies, Inc.
* written by Ralf Baechle
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/serial_reg.h>
static inline unsigned int serial_in(int offset)
{
return inb(0x3f8 + offset);
}
static inline void serial_out(int offset, int value)
{
outb(value, 0x3f8 + offset);
}
void __init prom_putchar(char c)
{
while ((serial_in(UART_LSR) & UART_LSR_THRE) == 0)
;
serial_out(UART_TX, c);
}
| gpl-2.0 |
R-M-S/DragunKorr_V.12-MAX-3.0.42 | fs/squashfs/export.c | 11485 | 4728 | /*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
* Phillip Lougher <phillip@squashfs.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* export.c
*/
/*
* This file implements code to make Squashfs filesystems exportable (NFS etc.)
*
* The export code uses an inode lookup table to map inode numbers passed in
* filehandles to an inode location on disk. This table is stored compressed
* into metadata blocks. A second index table is used to locate these. This
* second index table for speed of access (and because it is small) is read at
* mount time and cached in memory.
*
* The inode lookup table is used only by the export code, inode disk
* locations are directly encoded in directories, enabling direct access
* without an intermediate lookup for all operations except the export ops.
*/
#include <linux/fs.h>
#include <linux/vfs.h>
#include <linux/dcache.h>
#include <linux/exportfs.h>
#include <linux/slab.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs_fs_i.h"
#include "squashfs.h"
/*
* Look-up inode number (ino) in table, returning the inode location.
*/
static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
__le64 ino;
int err;
TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
if (err < 0)
return err;
TRACE("squashfs_inode_lookup, inode = 0x%llx\n",
(u64) le64_to_cpu(ino));
return le64_to_cpu(ino);
}
static struct dentry *squashfs_export_iget(struct super_block *sb,
unsigned int ino_num)
{
long long ino;
struct dentry *dentry = ERR_PTR(-ENOENT);
TRACE("Entered squashfs_export_iget\n");
ino = squashfs_inode_lookup(sb, ino_num);
if (ino >= 0)
dentry = d_obtain_alias(squashfs_iget(sb, ino, ino_num));
return dentry;
}
static struct dentry *squashfs_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
if ((fh_type != FILEID_INO32_GEN && fh_type != FILEID_INO32_GEN_PARENT)
|| fh_len < 2)
return NULL;
return squashfs_export_iget(sb, fid->i32.ino);
}
static struct dentry *squashfs_fh_to_parent(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
if (fh_type != FILEID_INO32_GEN_PARENT || fh_len < 4)
return NULL;
return squashfs_export_iget(sb, fid->i32.parent_ino);
}
static struct dentry *squashfs_get_parent(struct dentry *child)
{
struct inode *inode = child->d_inode;
unsigned int parent_ino = squashfs_i(inode)->parent;
return squashfs_export_iget(inode->i_sb, parent_ino);
}
/*
* Read uncompressed inode lookup table indexes off disk into memory
*/
__le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
u64 lookup_table_start, u64 next_table, unsigned int inodes)
{
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
__le64 *table;
TRACE("In read_inode_lookup_table, length %d\n", length);
/* Sanity check values */
/* there should always be at least one inode */
if (inodes == 0)
return ERR_PTR(-EINVAL);
/* length bytes should not extend into the next table - this check
* also traps instances where lookup_table_start is incorrectly larger
* than the next table start
*/
if (lookup_table_start + length > next_table)
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, lookup_table_start, length);
/*
* table[0] points to the first inode lookup table metadata block,
* this should be less than lookup_table_start
*/
if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
kfree(table);
return ERR_PTR(-EINVAL);
}
return table;
}
const struct export_operations squashfs_export_ops = {
.fh_to_dentry = squashfs_fh_to_dentry,
.fh_to_parent = squashfs_fh_to_parent,
.get_parent = squashfs_get_parent
};
| gpl-2.0 |
Muyiafan/android_kernel_oneplus_msm8994 | drivers/power/qpnp-charger.c | 222 | 152822 | /* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/spmi.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/radix-tree.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/qpnp/qpnp-adc.h>
#include <linux/power_supply.h>
#include <linux/bitops.h>
#include <linux/ratelimit.h>
#include <linux/wakelock.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
#include <linux/of_batterydata.h>
#include <linux/qpnp-revid.h>
#include <linux/alarmtimer.h>
#include <linux/time.h>
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/qpnp/pin.h>
/* Interrupt offsets */
#define INT_RT_STS(base) (base + 0x10)
#define INT_SET_TYPE(base) (base + 0x11)
#define INT_POLARITY_HIGH(base) (base + 0x12)
#define INT_POLARITY_LOW(base) (base + 0x13)
#define INT_LATCHED_CLR(base) (base + 0x14)
#define INT_EN_SET(base) (base + 0x15)
#define INT_EN_CLR(base) (base + 0x16)
#define INT_LATCHED_STS(base) (base + 0x18)
#define INT_PENDING_STS(base) (base + 0x19)
#define INT_MID_SEL(base) (base + 0x1A)
#define INT_PRIORITY(base) (base + 0x1B)
/* Peripheral register offsets */
#define CHGR_CHG_OPTION 0x08
#define CHGR_ATC_STATUS 0x0A
#define CHGR_VBAT_STATUS 0x0B
#define CHGR_IBAT_BMS 0x0C
#define CHGR_IBAT_STS 0x0D
#define CHGR_VDD_MAX 0x40
#define CHGR_VDD_SAFE 0x41
#define CHGR_VDD_MAX_STEP 0x42
#define CHGR_IBAT_MAX 0x44
#define CHGR_IBAT_SAFE 0x45
#define CHGR_VIN_MIN 0x47
#define CHGR_VIN_MIN_STEP 0x48
#define CHGR_CHG_CTRL 0x49
#define CHGR_CHG_FAILED 0x4A
#define CHGR_ATC_CTRL 0x4B
#define CHGR_ATC_FAILED 0x4C
#define CHGR_VBAT_TRKL 0x50
#define CHGR_VBAT_WEAK 0x52
#define CHGR_IBAT_ATC_A 0x54
#define CHGR_IBAT_ATC_B 0x55
#define CHGR_IBAT_TERM_CHGR 0x5B
#define CHGR_IBAT_TERM_BMS 0x5C
#define CHGR_VBAT_DET 0x5D
#define CHGR_TTRKL_MAX_EN 0x5E
#define CHGR_TTRKL_MAX 0x5F
#define CHGR_TCHG_MAX_EN 0x60
#define CHGR_TCHG_MAX 0x61
#define CHGR_CHG_WDOG_TIME 0x62
#define CHGR_CHG_WDOG_DLY 0x63
#define CHGR_CHG_WDOG_PET 0x64
#define CHGR_CHG_WDOG_EN 0x65
#define CHGR_IR_DROP_COMPEN 0x67
#define CHGR_I_MAX_REG 0x44
#define CHGR_USB_USB_SUSP 0x47
#define CHGR_USB_USB_OTG_CTL 0x48
#define CHGR_USB_ENUM_T_STOP 0x4E
#define CHGR_USB_TRIM 0xF1
#define CHGR_CHG_TEMP_THRESH 0x66
#define CHGR_BAT_IF_PRES_STATUS 0x08
#define CHGR_STATUS 0x09
#define CHGR_BAT_IF_VCP 0x42
#define CHGR_BAT_IF_BATFET_CTRL1 0x90
#define CHGR_BAT_IF_BATFET_CTRL4 0x93
#define CHGR_BAT_IF_SPARE 0xDF
#define CHGR_MISC_BOOT_DONE 0x42
#define CHGR_BUCK_PSTG_CTRL 0x73
#define CHGR_BUCK_COMPARATOR_OVRIDE_1 0xEB
#define CHGR_BUCK_COMPARATOR_OVRIDE_2 0xEC
#define CHGR_BUCK_COMPARATOR_OVRIDE_3 0xED
#define CHG_OVR0 0xED
#define CHG_TRICKLE_CLAMP 0xE3
#define CHGR_BUCK_BCK_VBAT_REG_MODE 0x74
#define MISC_REVISION2 0x01
#define USB_OVP_CTL 0x42
#define USB_CHG_GONE_REV_BST 0xED
#define BUCK_VCHG_OV 0x77
#define BUCK_TEST_SMBC_MODES 0xE6
#define BUCK_CTRL_TRIM1 0xF1
#define BUCK_CTRL_TRIM3 0xF3
#define SEC_ACCESS 0xD0
#define BAT_IF_VREF_BAT_THM_CTRL 0x4A
#define BAT_IF_BPD_CTRL 0x48
#define BOOST_VSET 0x41
#define BOOST_ENABLE_CONTROL 0x46
#define COMP_OVR1 0xEA
#define BAT_IF_COMP_OVR0 0xE5
#define BAT_IF_BTC_CTRL 0x49
#define BAT_IF_BAT_TEMP_STATUS 0x09
#define USB_OCP_THR 0x52
#define USB_OCP_CLR 0x53
#define BAT_IF_TEMP_STATUS 0x09
#define BOOST_ILIM 0x78
#define USB_SPARE 0xDF
#define DC_COMP_OVR1 0xE9
#define CHGR_COMP_OVR1 0xEE
#define USB_CHGPTH_CTL 0x40
#define REG_OFFSET_PERP_SUBTYPE 0x05
/* SMBB peripheral subtype values */
#define SMBB_CHGR_SUBTYPE 0x01
#define SMBB_BUCK_SUBTYPE 0x02
#define SMBB_BAT_IF_SUBTYPE 0x03
#define SMBB_USB_CHGPTH_SUBTYPE 0x04
#define SMBB_DC_CHGPTH_SUBTYPE 0x05
#define SMBB_BOOST_SUBTYPE 0x06
#define SMBB_MISC_SUBTYPE 0x07
/* SMBB peripheral subtype values */
#define SMBBP_CHGR_SUBTYPE 0x31
#define SMBBP_BUCK_SUBTYPE 0x32
#define SMBBP_BAT_IF_SUBTYPE 0x33
#define SMBBP_USB_CHGPTH_SUBTYPE 0x34
#define SMBBP_BOOST_SUBTYPE 0x36
#define SMBBP_MISC_SUBTYPE 0x37
/* SMBCL peripheral subtype values */
#define SMBCL_CHGR_SUBTYPE 0x41
#define SMBCL_BUCK_SUBTYPE 0x42
#define SMBCL_BAT_IF_SUBTYPE 0x43
#define SMBCL_USB_CHGPTH_SUBTYPE 0x44
#define SMBCL_MISC_SUBTYPE 0x47
#define QPNP_CHARGER_DEV_NAME "qcom,qpnp-charger"
/* Status bits and masks */
#define CHGR_BOOT_DONE BIT(7)
#define CHGR_CHG_EN BIT(7)
#define CHGR_ON_BAT_FORCE_BIT BIT(0)
#define USB_VALID_DEB_20MS 0x03
#define BUCK_VBAT_REG_NODE_SEL_BIT BIT(0)
#define VREF_BATT_THERM_FORCE_ON 0xC0
#define BAT_IF_BPD_CTRL_SEL 0x03
#define VREF_BAT_THM_ENABLED_FSM 0x80
#define REV_BST_DETECTED BIT(0)
#define BAT_THM_EN BIT(1)
#define BAT_ID_EN BIT(0)
#define BOOST_PWR_EN BIT(7)
#define OCP_CLR_BIT BIT(7)
#define OCP_THR_MASK 0x03
#define OCP_THR_900_MA 0x02
#define OCP_THR_500_MA 0x01
#define OCP_THR_200_MA 0x00
#define DC_HIGHER_PRIORITY BIT(7)
#define BATT_TEMP_HOT BIT(6)
#define BATT_TEMP_OK BIT(7)
/* Interrupt definitions */
/* smbb_chg_interrupts */
#define CHG_DONE_IRQ BIT(7)
#define CHG_FAILED_IRQ BIT(6)
#define FAST_CHG_ON_IRQ BIT(5)
#define TRKL_CHG_ON_IRQ BIT(4)
#define STATE_CHANGE_ON_IR BIT(3)
#define CHGWDDOG_IRQ BIT(2)
#define VBAT_DET_HI_IRQ BIT(1)
#define VBAT_DET_LOW_IRQ BIT(0)
/* smbb_buck_interrupts */
#define VDD_LOOP_IRQ BIT(6)
#define IBAT_LOOP_IRQ BIT(5)
#define ICHG_LOOP_IRQ BIT(4)
#define VCHG_LOOP_IRQ BIT(3)
#define OVERTEMP_IRQ BIT(2)
#define VREF_OV_IRQ BIT(1)
#define VBAT_OV_IRQ BIT(0)
/* smbb_bat_if_interrupts */
#define PSI_IRQ BIT(4)
#define VCP_ON_IRQ BIT(3)
#define BAT_FET_ON_IRQ BIT(2)
#define BAT_TEMP_OK_IRQ BIT(1)
#define BATT_PRES_IRQ BIT(0)
/* smbb_usb_interrupts */
#define CHG_GONE_IRQ BIT(2)
#define USBIN_VALID_IRQ BIT(1)
#define COARSE_DET_USB_IRQ BIT(0)
/* smbb_dc_interrupts */
#define DCIN_VALID_IRQ BIT(1)
#define COARSE_DET_DC_IRQ BIT(0)
/* smbb_boost_interrupts */
#define LIMIT_ERROR_IRQ BIT(1)
#define BOOST_PWR_OK_IRQ BIT(0)
/* smbb_misc_interrupts */
#define TFTWDOG_IRQ BIT(0)
/* SMBB types */
#define SMBB BIT(1)
#define SMBBP BIT(2)
#define SMBCL BIT(3)
/* Workaround flags */
#define CHG_FLAGS_VCP_WA BIT(0)
#define BOOST_FLASH_WA BIT(1)
#define POWER_STAGE_WA BIT(2)
struct qpnp_chg_irq {
int irq;
unsigned long disabled;
unsigned long wake_enable;
bool is_wake;
};
struct qpnp_chg_regulator {
struct regulator_desc rdesc;
struct regulator_dev *rdev;
};
/**
* struct qpnp_chg_chip - device information
* @dev: device pointer to access the parent
* @spmi: spmi pointer to access spmi information
* @chgr_base: charger peripheral base address
* @buck_base: buck peripheral base address
* @bat_if_base: battery interface peripheral base address
* @usb_chgpth_base: USB charge path peripheral base address
* @dc_chgpth_base: DC charge path peripheral base address
* @boost_base: boost peripheral base address
* @misc_base: misc peripheral base address
* @freq_base: freq peripheral base address
* @bat_is_cool: indicates that battery is cool
* @bat_is_warm: indicates that battery is warm
* @chg_done: indicates that charging is completed
* @usb_present: present status of usb
* @dc_present: present status of dc
* @batt_present: present status of battery
* @use_default_batt_values: flag to report default battery properties
* @btc_disabled Flag to disable btc (disables hot and cold irqs)
* @max_voltage_mv: the max volts the batt should be charged up to
* @min_voltage_mv: min battery voltage before turning the FET on
* @batt_weak_voltage_mv: Weak battery voltage threshold
* @vbatdet_max_err_mv resume voltage hysterisis
* @max_bat_chg_current: maximum battery charge current in mA
* @warm_bat_chg_ma: warm battery maximum charge current in mA
* @cool_bat_chg_ma: cool battery maximum charge current in mA
* @warm_bat_mv: warm temperature battery target voltage
* @cool_bat_mv: cool temperature battery target voltage
* @resume_delta_mv: voltage delta at which battery resumes charging
* @term_current: the charging based term current
* @safe_current: battery safety current setting
* @maxinput_usb_ma: Maximum Input current USB
* @maxinput_dc_ma: Maximum Input current DC
* @hot_batt_p Hot battery threshold setting
* @cold_batt_p Cold battery threshold setting
* @warm_bat_decidegc Warm battery temperature in degree Celsius
* @cool_bat_decidegc Cool battery temperature in degree Celsius
* @revision: PMIC revision
* @type: SMBB type
* @tchg_mins maximum allowed software initiated charge time
* @thermal_levels amount of thermal mitigation levels
* @thermal_mitigation thermal mitigation level values
* @therm_lvl_sel thermal mitigation level selection
* @dc_psy power supply to export information to userspace
* @usb_psy power supply to export information to userspace
* @bms_psy power supply to export information to userspace
* @batt_psy: power supply to export information to userspace
* @flags: flags to activate specific workarounds
* throughout the driver
*
*/
struct qpnp_chg_chip {
struct device *dev;
struct spmi_device *spmi;
u16 chgr_base;
u16 buck_base;
u16 bat_if_base;
u16 usb_chgpth_base;
u16 dc_chgpth_base;
u16 boost_base;
u16 misc_base;
u16 freq_base;
struct qpnp_chg_irq usbin_valid;
struct qpnp_chg_irq usb_ocp;
struct qpnp_chg_irq dcin_valid;
struct qpnp_chg_irq chg_gone;
struct qpnp_chg_irq chg_fastchg;
struct qpnp_chg_irq chg_trklchg;
struct qpnp_chg_irq chg_failed;
struct qpnp_chg_irq chg_vbatdet_lo;
struct qpnp_chg_irq batt_pres;
struct qpnp_chg_irq batt_temp_ok;
struct qpnp_chg_irq coarse_det_usb;
bool bat_is_cool;
bool bat_is_warm;
bool chg_done;
bool charger_monitor_checked;
bool usb_present;
u8 usbin_health;
bool usb_coarse_det;
bool dc_present;
bool batt_present;
bool charging_disabled;
bool ovp_monitor_enable;
bool usb_valid_check_ovp;
bool btc_disabled;
bool use_default_batt_values;
bool duty_cycle_100p;
bool ibat_calibration_enabled;
bool aicl_settled;
bool use_external_rsense;
bool fastchg_on;
bool parallel_ovp_mode;
unsigned int bpd_detection;
unsigned int max_bat_chg_current;
unsigned int warm_bat_chg_ma;
unsigned int cool_bat_chg_ma;
unsigned int safe_voltage_mv;
unsigned int max_voltage_mv;
unsigned int min_voltage_mv;
unsigned int batt_weak_voltage_mv;
unsigned int vbatdet_max_err_mv;
int prev_usb_max_ma;
int set_vddmax_mv;
int delta_vddmax_mv;
u8 trim_center;
unsigned int warm_bat_mv;
unsigned int cool_bat_mv;
unsigned int resume_delta_mv;
int insertion_ocv_uv;
int term_current;
int soc_resume_limit;
bool resuming_charging;
unsigned int maxinput_usb_ma;
unsigned int maxinput_dc_ma;
unsigned int hot_batt_p;
unsigned int cold_batt_p;
int warm_bat_decidegc;
int cool_bat_decidegc;
int fake_battery_soc;
unsigned int safe_current;
unsigned int revision;
unsigned int type;
unsigned int tchg_mins;
unsigned int thermal_levels;
unsigned int therm_lvl_sel;
unsigned int *thermal_mitigation;
struct power_supply dc_psy;
struct power_supply *usb_psy;
struct power_supply *bms_psy;
struct power_supply batt_psy;
uint32_t flags;
struct qpnp_adc_tm_btm_param adc_param;
struct work_struct adc_measure_work;
struct work_struct adc_disable_work;
struct delayed_work arb_stop_work;
struct delayed_work eoc_work;
struct delayed_work usbin_health_check;
struct work_struct soc_check_work;
struct delayed_work aicl_check_work;
struct work_struct insertion_ocv_work;
struct work_struct ocp_clear_work;
struct work_struct btc_hot_irq_debounce_work;
struct qpnp_chg_regulator flash_wa_vreg;
struct qpnp_chg_regulator otg_vreg;
struct qpnp_chg_regulator boost_vreg;
struct qpnp_chg_regulator batfet_vreg;
bool batfet_ext_en;
struct work_struct batfet_lcl_work;
struct qpnp_vadc_chip *vadc_dev;
struct qpnp_iadc_chip *iadc_dev;
struct qpnp_adc_tm_chip *adc_tm_dev;
struct mutex jeita_configure_lock;
struct mutex batfet_vreg_lock;
spinlock_t usbin_health_monitor_lock;
struct alarm reduce_power_stage_alarm;
struct work_struct reduce_power_stage_work;
bool power_stage_workaround_running;
bool power_stage_workaround_enable;
bool is_flash_wa_reg_enabled;
bool ext_ovp_ic_gpio_enabled;
unsigned int ext_ovp_isns_gpio;
unsigned int usb_trim_default;
u8 chg_temp_thresh_default;
};
static void
qpnp_chg_set_appropriate_battery_current(struct qpnp_chg_chip *chip);
static struct of_device_id qpnp_charger_match_table[] = {
{ .compatible = QPNP_CHARGER_DEV_NAME, },
{}
};
enum bpd_type {
BPD_TYPE_BAT_ID,
BPD_TYPE_BAT_THM,
BPD_TYPE_BAT_THM_BAT_ID,
};
static const char * const bpd_label[] = {
[BPD_TYPE_BAT_ID] = "bpd_id",
[BPD_TYPE_BAT_THM] = "bpd_thm",
[BPD_TYPE_BAT_THM_BAT_ID] = "bpd_thm_id",
};
enum btc_type {
HOT_THD_25_PCT = 25,
HOT_THD_35_PCT = 35,
COLD_THD_70_PCT = 70,
COLD_THD_80_PCT = 80,
};
static u8 btc_value[] = {
[HOT_THD_25_PCT] = 0x0,
[HOT_THD_35_PCT] = BIT(0),
[COLD_THD_70_PCT] = 0x0,
[COLD_THD_80_PCT] = BIT(1),
};
enum usbin_health {
USBIN_UNKNOW,
USBIN_OK,
USBIN_OVP,
};
static int ext_ovp_isns_present;
module_param(ext_ovp_isns_present, int, 0444);
static int ext_ovp_isns_r;
module_param(ext_ovp_isns_r, int, 0444);
static bool ext_ovp_isns_online;
static long ext_ovp_isns_ua;
#define MAX_CURRENT_LENGTH_9A 10
#define ISNS_CURRENT_RATIO 2500
static int ext_ovp_isns_read(char *buffer, const struct kernel_param *kp)
{
int rc;
struct qpnp_vadc_result results;
struct power_supply *batt_psy = power_supply_get_by_name("battery");
struct qpnp_chg_chip *chip = container_of(batt_psy,
struct qpnp_chg_chip, batt_psy);
if (!ext_ovp_isns_present)
return 0;
rc = qpnp_vadc_read(chip->vadc_dev, P_MUX7_1_1, &results);
if (rc) {
pr_err("Unable to read vbat rc=%d\n", rc);
return 0;
}
pr_debug("voltage %lld uV, current: %d\n mA", results.physical,
((int) results.physical /
(ext_ovp_isns_r / ISNS_CURRENT_RATIO)));
return snprintf(buffer, MAX_CURRENT_LENGTH_9A, "%d\n",
((int)results.physical /
(ext_ovp_isns_r / ISNS_CURRENT_RATIO)));
}
static int ext_ovp_isns_enable(const char *val, const struct kernel_param *kp)
{
int rc;
struct power_supply *batt_psy = power_supply_get_by_name("battery");
struct qpnp_chg_chip *chip = container_of(batt_psy,
struct qpnp_chg_chip, batt_psy);
rc = param_set_bool(val, kp);
if (rc) {
pr_err("Unable to set gpio en: %d\n", rc);
return rc;
}
if (*(bool *)kp->arg) {
gpio_direction_output(
chip->ext_ovp_isns_gpio, 1);
chip->ext_ovp_ic_gpio_enabled = 1;
pr_debug("enabled GPIO\n");
} else {
gpio_direction_output(
chip->ext_ovp_isns_gpio, 0);
chip->ext_ovp_ic_gpio_enabled = 0;
pr_debug("disabled GPIO\n");
}
return rc;
}
static struct kernel_param_ops ext_ovp_isns_ops = {
.get = ext_ovp_isns_read,
};
module_param_cb(ext_ovp_isns_ua, &ext_ovp_isns_ops, &ext_ovp_isns_ua, 0644);
static struct kernel_param_ops ext_ovp_en_ops = {
.set = ext_ovp_isns_enable,
.get = param_get_bool,
};
module_param_cb(ext_ovp_isns_online, &ext_ovp_en_ops,
&ext_ovp_isns_online, 0664);
static inline int
get_bpd(const char *name)
{
int i = 0;
for (i = 0; i < ARRAY_SIZE(bpd_label); i++) {
if (strcmp(bpd_label[i], name) == 0)
return i;
}
return -EINVAL;
}
static bool
is_within_range(int value, int left, int right)
{
if (left >= right && left >= value && value >= right)
return 1;
if (left <= right && left <= value && value <= right)
return 1;
return 0;
}
static int
qpnp_chg_read(struct qpnp_chg_chip *chip, u8 *val,
u16 base, int count)
{
int rc = 0;
struct spmi_device *spmi = chip->spmi;
if (base == 0) {
pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
base, spmi->sid, rc);
return -EINVAL;
}
rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, val, count);
if (rc) {
pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", base,
spmi->sid, rc);
return rc;
}
return 0;
}
static int
qpnp_chg_write(struct qpnp_chg_chip *chip, u8 *val,
u16 base, int count)
{
int rc = 0;
struct spmi_device *spmi = chip->spmi;
if (base == 0) {
pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
base, spmi->sid, rc);
return -EINVAL;
}
rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, val, count);
if (rc) {
pr_err("write failed base=0x%02x sid=0x%02x rc=%d\n",
base, spmi->sid, rc);
return rc;
}
return 0;
}
static int
qpnp_chg_masked_write(struct qpnp_chg_chip *chip, u16 base,
u8 mask, u8 val, int count)
{
int rc;
u8 reg;
rc = qpnp_chg_read(chip, ®, base, count);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n", base, rc);
return rc;
}
pr_debug("addr = 0x%x read 0x%x\n", base, reg);
reg &= ~mask;
reg |= val & mask;
pr_debug("Writing 0x%x\n", reg);
rc = qpnp_chg_write(chip, ®, base, count);
if (rc) {
pr_err("spmi write failed: addr=%03X, rc=%d\n", base, rc);
return rc;
}
return 0;
}
static void
qpnp_chg_enable_irq(struct qpnp_chg_irq *irq)
{
if (__test_and_clear_bit(0, &irq->disabled)) {
pr_debug("number = %d\n", irq->irq);
enable_irq(irq->irq);
}
if ((irq->is_wake) && (!__test_and_set_bit(0, &irq->wake_enable))) {
pr_debug("enable wake, number = %d\n", irq->irq);
enable_irq_wake(irq->irq);
}
}
static void
qpnp_chg_disable_irq(struct qpnp_chg_irq *irq)
{
if (!__test_and_set_bit(0, &irq->disabled)) {
pr_debug("number = %d\n", irq->irq);
disable_irq_nosync(irq->irq);
}
if ((irq->is_wake) && (__test_and_clear_bit(0, &irq->wake_enable))) {
pr_debug("disable wake, number = %d\n", irq->irq);
disable_irq_wake(irq->irq);
}
}
static void
qpnp_chg_irq_wake_enable(struct qpnp_chg_irq *irq)
{
if (!__test_and_set_bit(0, &irq->wake_enable)) {
pr_debug("number = %d\n", irq->irq);
enable_irq_wake(irq->irq);
}
irq->is_wake = true;
}
static void
qpnp_chg_irq_wake_disable(struct qpnp_chg_irq *irq)
{
if (__test_and_clear_bit(0, &irq->wake_enable)) {
pr_debug("number = %d\n", irq->irq);
disable_irq_wake(irq->irq);
}
irq->is_wake = false;
}
#define USB_OTG_EN_BIT BIT(0)
static int
qpnp_chg_is_otg_en_set(struct qpnp_chg_chip *chip)
{
u8 usb_otg_en;
int rc;
rc = qpnp_chg_read(chip, &usb_otg_en,
chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
chip->usb_chgpth_base + CHGR_STATUS, rc);
return rc;
}
pr_debug("usb otg en 0x%x\n", usb_otg_en);
return (usb_otg_en & USB_OTG_EN_BIT) ? 1 : 0;
}
static int
qpnp_chg_is_boost_en_set(struct qpnp_chg_chip *chip)
{
u8 boost_en_ctl;
int rc;
rc = qpnp_chg_read(chip, &boost_en_ctl,
chip->boost_base + BOOST_ENABLE_CONTROL, 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
chip->boost_base + BOOST_ENABLE_CONTROL, rc);
return rc;
}
pr_debug("boost en 0x%x\n", boost_en_ctl);
return (boost_en_ctl & BOOST_PWR_EN) ? 1 : 0;
}
static int
qpnp_chg_is_batt_temp_ok(struct qpnp_chg_chip *chip)
{
u8 batt_rt_sts;
int rc;
rc = qpnp_chg_read(chip, &batt_rt_sts,
INT_RT_STS(chip->bat_if_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->bat_if_base), rc);
return rc;
}
return (batt_rt_sts & BAT_TEMP_OK_IRQ) ? 1 : 0;
}
static int
qpnp_chg_is_batt_present(struct qpnp_chg_chip *chip)
{
u8 batt_pres_rt_sts;
int rc;
rc = qpnp_chg_read(chip, &batt_pres_rt_sts,
INT_RT_STS(chip->bat_if_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->bat_if_base), rc);
return rc;
}
return (batt_pres_rt_sts & BATT_PRES_IRQ) ? 1 : 0;
}
static int
qpnp_chg_is_batfet_closed(struct qpnp_chg_chip *chip)
{
u8 batfet_closed_rt_sts;
int rc;
rc = qpnp_chg_read(chip, &batfet_closed_rt_sts,
INT_RT_STS(chip->bat_if_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->bat_if_base), rc);
return rc;
}
return (batfet_closed_rt_sts & BAT_FET_ON_IRQ) ? 1 : 0;
}
static int
qpnp_chg_is_usb_chg_plugged_in(struct qpnp_chg_chip *chip)
{
u8 usb_chgpth_rt_sts;
int rc;
rc = qpnp_chg_read(chip, &usb_chgpth_rt_sts,
INT_RT_STS(chip->usb_chgpth_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->usb_chgpth_base), rc);
return rc;
}
pr_debug("chgr usb sts 0x%x\n", usb_chgpth_rt_sts);
return (usb_chgpth_rt_sts & USBIN_VALID_IRQ) ? 1 : 0;
}
static bool
qpnp_is_dc_higher_prio(struct qpnp_chg_chip *chip)
{
int rc;
u8 usb_ctl;
if (!chip->type == SMBB)
return false;
rc = qpnp_chg_read(chip, &usb_ctl,
chip->usb_chgpth_base + USB_CHGPTH_CTL, 1);
if (rc) {
pr_err("failed to read usb ctl rc=%d\n", rc);
return 0;
}
return !!(usb_ctl & DC_HIGHER_PRIORITY);
}
static bool
qpnp_chg_is_ibat_loop_active(struct qpnp_chg_chip *chip)
{
int rc;
u8 buck_sts;
rc = qpnp_chg_read(chip, &buck_sts,
INT_RT_STS(chip->buck_base), 1);
if (rc) {
pr_err("failed to read buck RT status rc=%d\n", rc);
return 0;
}
return !!(buck_sts & IBAT_LOOP_IRQ);
}
#define USB_VALID_MASK 0xC0
#define USB_VALID_IN_MASK BIT(7)
#define USB_COARSE_DET 0x10
#define USB_VALID_OVP_VALUE 0x40
static int
qpnp_chg_check_usb_coarse_det(struct qpnp_chg_chip *chip)
{
u8 usbin_chg_rt_sts;
int rc;
rc = qpnp_chg_read(chip, &usbin_chg_rt_sts,
chip->usb_chgpth_base + CHGR_STATUS , 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
chip->usb_chgpth_base + CHGR_STATUS, rc);
return rc;
}
return (usbin_chg_rt_sts & USB_COARSE_DET) ? 1 : 0;
}
static int
qpnp_chg_check_usbin_health(struct qpnp_chg_chip *chip)
{
u8 usbin_chg_rt_sts, usb_chgpth_rt_sts;
u8 usbin_health = 0;
int rc;
rc = qpnp_chg_read(chip, &usbin_chg_rt_sts,
chip->usb_chgpth_base + CHGR_STATUS , 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
chip->usb_chgpth_base + CHGR_STATUS, rc);
return rc;
}
rc = qpnp_chg_read(chip, &usb_chgpth_rt_sts,
INT_RT_STS(chip->usb_chgpth_base) , 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->usb_chgpth_base), rc);
return rc;
}
pr_debug("chgr usb sts 0x%x, chgpth rt sts 0x%x\n",
usbin_chg_rt_sts, usb_chgpth_rt_sts);
if ((usbin_chg_rt_sts & USB_COARSE_DET) == USB_COARSE_DET) {
if ((usbin_chg_rt_sts & USB_VALID_MASK)
== USB_VALID_OVP_VALUE) {
usbin_health = USBIN_OVP;
pr_err("Over voltage charger inserted\n");
} else if ((usb_chgpth_rt_sts & USBIN_VALID_IRQ) != 0) {
usbin_health = USBIN_OK;
pr_debug("Valid charger inserted\n");
}
} else {
usbin_health = USBIN_UNKNOW;
pr_debug("Charger plug out\n");
}
return usbin_health;
}
static int
qpnp_chg_is_dc_chg_plugged_in(struct qpnp_chg_chip *chip)
{
u8 dcin_valid_rt_sts;
int rc;
if (!chip->dc_chgpth_base)
return 0;
rc = qpnp_chg_read(chip, &dcin_valid_rt_sts,
INT_RT_STS(chip->dc_chgpth_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->dc_chgpth_base), rc);
return rc;
}
return (dcin_valid_rt_sts & DCIN_VALID_IRQ) ? 1 : 0;
}
static int
qpnp_chg_is_ichg_loop_active(struct qpnp_chg_chip *chip)
{
u8 buck_sts;
int rc;
rc = qpnp_chg_read(chip, &buck_sts, INT_RT_STS(chip->buck_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->buck_base), rc);
return rc;
}
pr_debug("buck usb sts 0x%x\n", buck_sts);
return (buck_sts & ICHG_LOOP_IRQ) ? 1 : 0;
}
#define QPNP_CHG_I_MAX_MIN_100 100
#define QPNP_CHG_I_MAX_MIN_150 150
#define QPNP_CHG_I_MAX_MIN_MA 200
#define QPNP_CHG_I_MAX_MAX_MA 2500
#define QPNP_CHG_I_MAXSTEP_MA 100
static int
qpnp_chg_idcmax_set(struct qpnp_chg_chip *chip, int mA)
{
int rc = 0;
u8 dc = 0;
if (mA < QPNP_CHG_I_MAX_MIN_100
|| mA > QPNP_CHG_I_MAX_MAX_MA) {
pr_err("bad mA=%d asked to set\n", mA);
return -EINVAL;
}
if (mA == QPNP_CHG_I_MAX_MIN_100) {
dc = 0x00;
pr_debug("current=%d setting %02x\n", mA, dc);
return qpnp_chg_write(chip, &dc,
chip->dc_chgpth_base + CHGR_I_MAX_REG, 1);
} else if (mA == QPNP_CHG_I_MAX_MIN_150) {
dc = 0x01;
pr_debug("current=%d setting %02x\n", mA, dc);
return qpnp_chg_write(chip, &dc,
chip->dc_chgpth_base + CHGR_I_MAX_REG, 1);
}
dc = mA / QPNP_CHG_I_MAXSTEP_MA;
pr_debug("current=%d setting 0x%x\n", mA, dc);
rc = qpnp_chg_write(chip, &dc,
chip->dc_chgpth_base + CHGR_I_MAX_REG, 1);
return rc;
}
static int
qpnp_chg_iusb_trim_get(struct qpnp_chg_chip *chip)
{
int rc = 0;
u8 trim_reg;
rc = qpnp_chg_read(chip, &trim_reg,
chip->usb_chgpth_base + CHGR_USB_TRIM, 1);
if (rc) {
pr_err("failed to read USB_TRIM rc=%d\n", rc);
return 0;
}
return trim_reg;
}
static int
qpnp_chg_iusb_trim_set(struct qpnp_chg_chip *chip, int trim)
{
int rc = 0;
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + CHGR_USB_TRIM,
0xFF,
trim, 1);
if (rc) {
pr_err("failed to write USB TRIM rc=%d\n", rc);
return rc;
}
return rc;
}
#define IOVP_USB_WALL_TRSH_MA 150
static int
qpnp_chg_iusbmax_set(struct qpnp_chg_chip *chip, int mA)
{
int rc = 0;
u8 usb_reg = 0, temp = 8;
if (mA < 0 || mA > QPNP_CHG_I_MAX_MAX_MA) {
pr_err("bad mA=%d asked to set\n", mA);
return -EINVAL;
}
if (mA <= QPNP_CHG_I_MAX_MIN_100) {
usb_reg = 0x00;
pr_debug("current=%d setting %02x\n", mA, usb_reg);
return qpnp_chg_write(chip, &usb_reg,
chip->usb_chgpth_base + CHGR_I_MAX_REG, 1);
} else if (mA == QPNP_CHG_I_MAX_MIN_150) {
usb_reg = 0x01;
pr_debug("current=%d setting %02x\n", mA, usb_reg);
return qpnp_chg_write(chip, &usb_reg,
chip->usb_chgpth_base + CHGR_I_MAX_REG, 1);
}
/* Impose input current limit */
if (chip->maxinput_usb_ma)
mA = (chip->maxinput_usb_ma) <= mA ? chip->maxinput_usb_ma : mA;
usb_reg = mA / QPNP_CHG_I_MAXSTEP_MA;
if (chip->flags & CHG_FLAGS_VCP_WA) {
temp = 0xA5;
rc = qpnp_chg_write(chip, &temp,
chip->buck_base + SEC_ACCESS, 1);
rc = qpnp_chg_masked_write(chip,
chip->buck_base + CHGR_BUCK_COMPARATOR_OVRIDE_3,
0x0C, 0x0C, 1);
}
pr_debug("current=%d setting 0x%x\n", mA, usb_reg);
rc = qpnp_chg_write(chip, &usb_reg,
chip->usb_chgpth_base + CHGR_I_MAX_REG, 1);
if (chip->flags & CHG_FLAGS_VCP_WA) {
temp = 0xA5;
udelay(200);
rc = qpnp_chg_write(chip, &temp,
chip->buck_base + SEC_ACCESS, 1);
rc = qpnp_chg_masked_write(chip,
chip->buck_base + CHGR_BUCK_COMPARATOR_OVRIDE_3,
0x0C, 0x00, 1);
}
return rc;
}
#define QPNP_CHG_VINMIN_MIN_MV 4000
#define QPNP_CHG_VINMIN_HIGH_MIN_MV 5600
#define QPNP_CHG_VINMIN_HIGH_MIN_VAL 0x2B
#define QPNP_CHG_VINMIN_MAX_MV 9600
#define QPNP_CHG_VINMIN_STEP_MV 50
#define QPNP_CHG_VINMIN_STEP_HIGH_MV 200
#define QPNP_CHG_VINMIN_MASK 0x3F
#define QPNP_CHG_VINMIN_MIN_VAL 0x0C
static int
qpnp_chg_vinmin_set(struct qpnp_chg_chip *chip, int voltage)
{
u8 temp;
if ((voltage < QPNP_CHG_VINMIN_MIN_MV)
|| (voltage > QPNP_CHG_VINMIN_MAX_MV)) {
pr_err("bad mV=%d asked to set\n", voltage);
return -EINVAL;
}
if (voltage >= QPNP_CHG_VINMIN_HIGH_MIN_MV) {
temp = QPNP_CHG_VINMIN_HIGH_MIN_VAL;
temp += (voltage - QPNP_CHG_VINMIN_HIGH_MIN_MV)
/ QPNP_CHG_VINMIN_STEP_HIGH_MV;
} else {
temp = QPNP_CHG_VINMIN_MIN_VAL;
temp += (voltage - QPNP_CHG_VINMIN_MIN_MV)
/ QPNP_CHG_VINMIN_STEP_MV;
}
pr_debug("voltage=%d setting %02x\n", voltage, temp);
return qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_VIN_MIN,
QPNP_CHG_VINMIN_MASK, temp, 1);
}
static int
qpnp_chg_vinmin_get(struct qpnp_chg_chip *chip)
{
int rc, vin_min_mv;
u8 vin_min;
rc = qpnp_chg_read(chip, &vin_min, chip->chgr_base + CHGR_VIN_MIN, 1);
if (rc) {
pr_err("failed to read VIN_MIN rc=%d\n", rc);
return 0;
}
if (vin_min == 0)
vin_min_mv = QPNP_CHG_I_MAX_MIN_100;
else if (vin_min >= QPNP_CHG_VINMIN_HIGH_MIN_VAL)
vin_min_mv = QPNP_CHG_VINMIN_HIGH_MIN_MV +
(vin_min - QPNP_CHG_VINMIN_HIGH_MIN_VAL)
* QPNP_CHG_VINMIN_STEP_HIGH_MV;
else
vin_min_mv = QPNP_CHG_VINMIN_MIN_MV +
(vin_min - QPNP_CHG_VINMIN_MIN_VAL)
* QPNP_CHG_VINMIN_STEP_MV;
pr_debug("vin_min= 0x%02x, ma = %d\n", vin_min, vin_min_mv);
return vin_min_mv;
}
#define QPNP_CHG_VBATWEAK_MIN_MV 2100
#define QPNP_CHG_VBATWEAK_MAX_MV 3600
#define QPNP_CHG_VBATWEAK_STEP_MV 100
static int
qpnp_chg_vbatweak_set(struct qpnp_chg_chip *chip, int vbatweak_mv)
{
u8 temp;
if (vbatweak_mv < QPNP_CHG_VBATWEAK_MIN_MV
|| vbatweak_mv > QPNP_CHG_VBATWEAK_MAX_MV)
return -EINVAL;
temp = (vbatweak_mv - QPNP_CHG_VBATWEAK_MIN_MV)
/ QPNP_CHG_VBATWEAK_STEP_MV;
pr_debug("voltage=%d setting %02x\n", vbatweak_mv, temp);
return qpnp_chg_write(chip, &temp,
chip->chgr_base + CHGR_VBAT_WEAK, 1);
}
static int
qpnp_chg_usb_iusbmax_get(struct qpnp_chg_chip *chip)
{
int rc, iusbmax_ma;
u8 iusbmax;
rc = qpnp_chg_read(chip, &iusbmax,
chip->usb_chgpth_base + CHGR_I_MAX_REG, 1);
if (rc) {
pr_err("failed to read IUSB_MAX rc=%d\n", rc);
return 0;
}
if (iusbmax == 0)
iusbmax_ma = QPNP_CHG_I_MAX_MIN_100;
else if (iusbmax == 0x01)
iusbmax_ma = QPNP_CHG_I_MAX_MIN_150;
else
iusbmax_ma = iusbmax * QPNP_CHG_I_MAXSTEP_MA;
pr_debug("iusbmax = 0x%02x, ma = %d\n", iusbmax, iusbmax_ma);
return iusbmax_ma;
}
#define ILIMIT_OVR_0 0x02
static int
override_dcin_ilimit(struct qpnp_chg_chip *chip, bool override)
{
int rc;
pr_debug("override %d\n", override);
rc = qpnp_chg_masked_write(chip,
chip->dc_chgpth_base + SEC_ACCESS,
0xA5,
0xA5, 1);
rc |= qpnp_chg_masked_write(chip,
chip->dc_chgpth_base + DC_COMP_OVR1,
0xFF,
override ? ILIMIT_OVR_0 : 0, 1);
if (rc) {
pr_err("Failed to override dc ilimit rc = %d\n", rc);
return rc;
}
return rc;
}
#define DUAL_PATH_EN BIT(7)
static int
switch_parallel_ovp_mode(struct qpnp_chg_chip *chip, bool enable)
{
int rc = 0;
if (!chip->usb_chgpth_base || !chip->dc_chgpth_base)
return rc;
pr_debug("enable %d\n", enable);
rc = override_dcin_ilimit(chip, 1);
udelay(10);
/* enable/disable dual path mode */
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + SEC_ACCESS,
0xA5,
0xA5, 1);
rc |= qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + USB_SPARE,
0xFF,
enable ? DUAL_PATH_EN : 0, 1);
if (rc) {
pr_err("Failed to turn on usb ovp rc = %d\n", rc);
return rc;
}
if (enable)
rc = override_dcin_ilimit(chip, 0);
return rc;
}
#define USB_SUSPEND_BIT BIT(0)
static int
qpnp_chg_usb_suspend_enable(struct qpnp_chg_chip *chip, int enable)
{
/* Turn off DC OVP FET when going into USB suspend */
if (chip->parallel_ovp_mode && enable)
switch_parallel_ovp_mode(chip, 0);
return qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + CHGR_USB_USB_SUSP,
USB_SUSPEND_BIT,
enable ? USB_SUSPEND_BIT : 0, 1);
}
static int
qpnp_chg_charge_en(struct qpnp_chg_chip *chip, int enable)
{
if (chip->insertion_ocv_uv == 0 && enable) {
pr_debug("Battery not present, skipping\n");
return 0;
}
pr_debug("charging %s\n", enable ? "enabled" : "disabled");
return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
CHGR_CHG_EN,
enable ? CHGR_CHG_EN : 0, 1);
}
static int
qpnp_chg_force_run_on_batt(struct qpnp_chg_chip *chip, int disable)
{
/* Don't run on battery for batteryless hardware */
if (chip->use_default_batt_values)
return 0;
/* Don't force on battery if battery is not present */
if (!qpnp_chg_is_batt_present(chip))
return 0;
/* This bit forces the charger to run off of the battery rather
* than a connected charger */
return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_CHG_CTRL,
CHGR_ON_BAT_FORCE_BIT,
disable ? CHGR_ON_BAT_FORCE_BIT : 0, 1);
}
#define BUCK_DUTY_MASK_100P 0x30
static int
qpnp_buck_set_100_duty_cycle_enable(struct qpnp_chg_chip *chip, int enable)
{
int rc;
pr_debug("enable: %d\n", enable);
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS, 0xA5, 0xA5, 1);
if (rc) {
pr_debug("failed to write sec access rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + BUCK_TEST_SMBC_MODES,
BUCK_DUTY_MASK_100P, enable ? 0x00 : 0x10, 1);
if (rc) {
pr_debug("failed enable 100p duty cycle rc=%d\n", rc);
return rc;
}
return rc;
}
#define COMPATATOR_OVERRIDE_0 0x80
static int
qpnp_chg_toggle_chg_done_logic(struct qpnp_chg_chip *chip, int enable)
{
int rc;
pr_debug("toggle: %d\n", enable);
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS, 0xA5, 0xA5, 1);
if (rc) {
pr_debug("failed to write sec access rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + CHGR_BUCK_COMPARATOR_OVRIDE_1,
0xC0, enable ? 0x00 : COMPATATOR_OVERRIDE_0, 1);
if (rc) {
pr_debug("failed to toggle chg done override rc=%d\n", rc);
return rc;
}
return rc;
}
#define QPNP_CHG_VBATDET_MIN_MV 3240
#define QPNP_CHG_VBATDET_MAX_MV 5780
#define QPNP_CHG_VBATDET_STEP_MV 20
static int
qpnp_chg_vbatdet_set(struct qpnp_chg_chip *chip, int vbatdet_mv)
{
u8 temp;
if (vbatdet_mv < QPNP_CHG_VBATDET_MIN_MV
|| vbatdet_mv > QPNP_CHG_VBATDET_MAX_MV) {
pr_err("bad mV=%d asked to set\n", vbatdet_mv);
return -EINVAL;
}
temp = (vbatdet_mv - QPNP_CHG_VBATDET_MIN_MV)
/ QPNP_CHG_VBATDET_STEP_MV;
pr_debug("voltage=%d setting %02x\n", vbatdet_mv, temp);
return qpnp_chg_write(chip, &temp,
chip->chgr_base + CHGR_VBAT_DET, 1);
}
static void
qpnp_chg_set_appropriate_vbatdet(struct qpnp_chg_chip *chip)
{
if (chip->bat_is_cool)
qpnp_chg_vbatdet_set(chip, chip->cool_bat_mv
- chip->resume_delta_mv);
else if (chip->bat_is_warm)
qpnp_chg_vbatdet_set(chip, chip->warm_bat_mv
- chip->resume_delta_mv);
else if (chip->resuming_charging)
qpnp_chg_vbatdet_set(chip, chip->max_voltage_mv
+ chip->resume_delta_mv);
else
qpnp_chg_vbatdet_set(chip, chip->max_voltage_mv
- chip->resume_delta_mv);
}
static void
qpnp_arb_stop_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct qpnp_chg_chip *chip = container_of(dwork,
struct qpnp_chg_chip, arb_stop_work);
if (!chip->chg_done)
qpnp_chg_charge_en(chip, !chip->charging_disabled);
qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
}
static void
qpnp_bat_if_adc_measure_work(struct work_struct *work)
{
struct qpnp_chg_chip *chip = container_of(work,
struct qpnp_chg_chip, adc_measure_work);
if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
pr_err("request ADC error\n");
}
static void
qpnp_bat_if_adc_disable_work(struct work_struct *work)
{
struct qpnp_chg_chip *chip = container_of(work,
struct qpnp_chg_chip, adc_disable_work);
qpnp_adc_tm_disable_chan_meas(chip->adc_tm_dev, &chip->adc_param);
}
#define EOC_CHECK_PERIOD_MS 10000
static irqreturn_t
qpnp_chg_vbatdet_lo_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
u8 chg_sts = 0;
int rc;
pr_debug("vbatdet-lo triggered\n");
rc = qpnp_chg_read(chip, &chg_sts, INT_RT_STS(chip->chgr_base), 1);
if (rc)
pr_err("failed to read chg_sts rc=%d\n", rc);
pr_debug("chg_done chg_sts: 0x%x triggered\n", chg_sts);
if (!chip->charging_disabled && (chg_sts & FAST_CHG_ON_IRQ)) {
schedule_delayed_work(&chip->eoc_work,
msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
pm_stay_awake(chip->dev);
}
qpnp_chg_disable_irq(&chip->chg_vbatdet_lo);
pr_debug("psy changed usb_psy\n");
power_supply_changed(chip->usb_psy);
if (chip->dc_chgpth_base) {
pr_debug("psy changed dc_psy\n");
power_supply_changed(&chip->dc_psy);
}
if (chip->bat_if_base) {
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
}
return IRQ_HANDLED;
}
#define ARB_STOP_WORK_MS 1000
static irqreturn_t
qpnp_chg_usb_chg_gone_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
u8 usb_sts;
int rc;
rc = qpnp_chg_read(chip, &usb_sts,
INT_RT_STS(chip->usb_chgpth_base), 1);
if (rc)
pr_err("failed to read usb_chgpth_sts rc=%d\n", rc);
pr_debug("chg_gone triggered\n");
if ((qpnp_chg_is_usb_chg_plugged_in(chip)
|| qpnp_chg_is_dc_chg_plugged_in(chip))
&& (usb_sts & CHG_GONE_IRQ)) {
if (ext_ovp_isns_present) {
pr_debug("EXT OVP IC ISNS disabled due to ARB WA\n");
gpio_direction_output(chip->ext_ovp_isns_gpio, 0);
}
qpnp_chg_charge_en(chip, 0);
qpnp_chg_force_run_on_batt(chip, 1);
schedule_delayed_work(&chip->arb_stop_work,
msecs_to_jiffies(ARB_STOP_WORK_MS));
}
return IRQ_HANDLED;
}
static irqreturn_t
qpnp_chg_usb_usb_ocp_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
pr_debug("usb-ocp triggered\n");
schedule_work(&chip->ocp_clear_work);
return IRQ_HANDLED;
}
#define BOOST_ILIMIT_MIN 0x07
#define BOOST_ILIMIT_DEF 0x02
#define BOOST_ILIMT_MASK 0xFF
static void
qpnp_chg_ocp_clear_work(struct work_struct *work)
{
int rc;
u8 usb_sts;
struct qpnp_chg_chip *chip = container_of(work,
struct qpnp_chg_chip, ocp_clear_work);
if (chip->type == SMBBP) {
rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ILIM,
BOOST_ILIMT_MASK,
BOOST_ILIMIT_MIN, 1);
if (rc) {
pr_err("Failed to turn configure ilim rc = %d\n", rc);
return;
}
}
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + USB_OCP_CLR,
OCP_CLR_BIT,
OCP_CLR_BIT, 1);
if (rc)
pr_err("Failed to clear OCP bit rc = %d\n", rc);
/* force usb ovp fet off */
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
USB_OTG_EN_BIT,
USB_OTG_EN_BIT, 1);
if (rc)
pr_err("Failed to turn off usb ovp rc = %d\n", rc);
if (chip->type == SMBBP) {
/* Wait for OCP circuitry to be powered up */
msleep(100);
rc = qpnp_chg_read(chip, &usb_sts,
INT_RT_STS(chip->usb_chgpth_base), 1);
if (rc) {
pr_err("failed to read interrupt sts %d\n", rc);
return;
}
if (usb_sts & COARSE_DET_USB_IRQ) {
rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ILIM,
BOOST_ILIMT_MASK,
BOOST_ILIMIT_DEF, 1);
if (rc) {
pr_err("Failed to set ilim rc = %d\n", rc);
return;
}
} else {
pr_warn_ratelimited("USB short to GND detected!\n");
}
}
}
#define QPNP_CHG_VDDMAX_MIN 3400
#define QPNP_CHG_V_MIN_MV 3240
#define QPNP_CHG_V_MAX_MV 4500
#define QPNP_CHG_V_STEP_MV 10
#define QPNP_CHG_BUCK_TRIM1_STEP 10
#define QPNP_CHG_BUCK_VDD_TRIM_MASK 0xF0
static int
qpnp_chg_vddmax_and_trim_set(struct qpnp_chg_chip *chip,
int voltage, int trim_mv)
{
int rc, trim_set;
u8 vddmax = 0, trim = 0;
if (voltage < QPNP_CHG_VDDMAX_MIN
|| voltage > QPNP_CHG_V_MAX_MV) {
pr_err("bad mV=%d asked to set\n", voltage);
return -EINVAL;
}
vddmax = (voltage - QPNP_CHG_V_MIN_MV) / QPNP_CHG_V_STEP_MV;
rc = qpnp_chg_write(chip, &vddmax, chip->chgr_base + CHGR_VDD_MAX, 1);
if (rc) {
pr_err("Failed to write vddmax: %d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS rc=%d\n", rc);
return rc;
}
trim_set = clamp((int)chip->trim_center
+ (trim_mv / QPNP_CHG_BUCK_TRIM1_STEP),
0, 0xF);
trim = (u8)trim_set << 4;
rc = qpnp_chg_masked_write(chip,
chip->buck_base + BUCK_CTRL_TRIM1,
QPNP_CHG_BUCK_VDD_TRIM_MASK,
trim, 1);
if (rc) {
pr_err("Failed to write buck trim1: %d\n", rc);
return rc;
}
pr_debug("voltage=%d+%d setting vddmax: %02x, trim: %02x\n",
voltage, trim_mv, vddmax, trim);
return 0;
}
static int
qpnp_chg_vddmax_get(struct qpnp_chg_chip *chip)
{
int rc;
u8 vddmax = 0;
rc = qpnp_chg_read(chip, &vddmax, chip->chgr_base + CHGR_VDD_MAX, 1);
if (rc) {
pr_err("Failed to write vddmax: %d\n", rc);
return rc;
}
return QPNP_CHG_V_MIN_MV + (int)vddmax * QPNP_CHG_V_STEP_MV;
}
/* JEITA compliance logic */
static void
qpnp_chg_set_appropriate_vddmax(struct qpnp_chg_chip *chip)
{
if (chip->bat_is_cool)
qpnp_chg_vddmax_and_trim_set(chip, chip->cool_bat_mv,
chip->delta_vddmax_mv);
else if (chip->bat_is_warm)
qpnp_chg_vddmax_and_trim_set(chip, chip->warm_bat_mv,
chip->delta_vddmax_mv);
else
qpnp_chg_vddmax_and_trim_set(chip, chip->max_voltage_mv,
chip->delta_vddmax_mv);
}
#define BATFET_LPM_MASK 0xC0
#define BATFET_LPM 0x40
#define BATFET_NO_LPM 0x00
static int
qpnp_chg_regulator_batfet_set(struct qpnp_chg_chip *chip, bool enable)
{
int rc = 0;
if (chip->charging_disabled || !chip->bat_if_base)
return rc;
if (chip->type == SMBB)
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + CHGR_BAT_IF_SPARE,
BATFET_LPM_MASK,
enable ? BATFET_NO_LPM : BATFET_LPM, 1);
else
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + CHGR_BAT_IF_BATFET_CTRL4,
BATFET_LPM_MASK,
enable ? BATFET_NO_LPM : BATFET_LPM, 1);
return rc;
}
static void
qpnp_usbin_health_check_work(struct work_struct *work)
{
int usbin_health = 0;
u8 psy_health_sts = 0;
struct delayed_work *dwork = to_delayed_work(work);
struct qpnp_chg_chip *chip = container_of(dwork,
struct qpnp_chg_chip, usbin_health_check);
usbin_health = qpnp_chg_check_usbin_health(chip);
spin_lock(&chip->usbin_health_monitor_lock);
if (chip->usbin_health != usbin_health) {
pr_debug("health_check_work: pr_usbin_health = %d, usbin_health = %d",
chip->usbin_health, usbin_health);
chip->usbin_health = usbin_health;
if (usbin_health == USBIN_OVP)
psy_health_sts = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
else if (usbin_health == USBIN_OK)
psy_health_sts = POWER_SUPPLY_HEALTH_GOOD;
power_supply_set_health_state(chip->usb_psy, psy_health_sts);
power_supply_changed(chip->usb_psy);
}
/* enable OVP monitor in usb valid after coarse-det complete */
chip->usb_valid_check_ovp = true;
spin_unlock(&chip->usbin_health_monitor_lock);
return;
}
#define USB_VALID_DEBOUNCE_TIME_MASK 0x3
#define USB_DEB_BYPASS 0x0
#define USB_DEB_5MS 0x1
#define USB_DEB_10MS 0x2
#define USB_DEB_20MS 0x3
static irqreturn_t
qpnp_chg_coarse_det_usb_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
int host_mode, rc = 0;
int debounce[] = {
[USB_DEB_BYPASS] = 0,
[USB_DEB_5MS] = 5,
[USB_DEB_10MS] = 10,
[USB_DEB_20MS] = 20 };
u8 ovp_ctl;
bool usb_coarse_det;
host_mode = qpnp_chg_is_otg_en_set(chip);
usb_coarse_det = qpnp_chg_check_usb_coarse_det(chip);
pr_debug("usb coarse-det triggered: %d host_mode: %d\n",
usb_coarse_det, host_mode);
if (host_mode)
return IRQ_HANDLED;
/* ignore to monitor OVP in usbin valid irq handler
* if the coarse-det fired first, do the OVP state monitor
* in the usbin_health_check work, and after the work,
* enable monitor OVP in usbin valid irq handler */
chip->usb_valid_check_ovp = false;
if (chip->usb_coarse_det ^ usb_coarse_det) {
chip->usb_coarse_det = usb_coarse_det;
if (usb_coarse_det) {
/* usb coarse-det rising edge, check the usbin_valid
* debounce time setting, and start a delay work to
* check the OVP status */
rc = qpnp_chg_read(chip, &ovp_ctl,
chip->usb_chgpth_base + USB_OVP_CTL, 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
chip->usb_chgpth_base + USB_OVP_CTL,
rc);
return rc;
}
ovp_ctl = ovp_ctl & USB_VALID_DEBOUNCE_TIME_MASK;
schedule_delayed_work(&chip->usbin_health_check,
msecs_to_jiffies(debounce[ovp_ctl]));
} else {
/* usb coarse-det rising edge, set the usb psy health
* status to unknown */
pr_debug("usb coarse det clear, set usb health to unknown\n");
chip->usbin_health = USBIN_UNKNOW;
power_supply_set_health_state(chip->usb_psy,
POWER_SUPPLY_HEALTH_UNKNOWN);
power_supply_changed(chip->usb_psy);
}
}
return IRQ_HANDLED;
}
#define USB_WALL_THRESHOLD_MA 500
#define ENUM_T_STOP_BIT BIT(0)
#define USB_5V_UV 5000000
#define USB_9V_UV 9000000
static irqreturn_t
qpnp_chg_usb_usbin_valid_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
int usb_present, host_mode, usbin_health;
u8 psy_health_sts;
usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
host_mode = qpnp_chg_is_otg_en_set(chip);
pr_debug("usbin-valid triggered: %d host_mode: %d\n",
usb_present, host_mode);
/* In host mode notifications cmoe from USB supply */
if (host_mode)
return IRQ_HANDLED;
if (chip->usb_present ^ usb_present) {
chip->aicl_settled = false;
chip->usb_present = usb_present;
if (!usb_present) {
/* when a valid charger inserted, and increase the
* charger voltage to OVP threshold, then
* usb_in_valid falling edge interrupt triggers.
* So we handle the OVP monitor here, and ignore
* other health state changes */
if (chip->ovp_monitor_enable &&
(chip->usb_valid_check_ovp)) {
usbin_health =
qpnp_chg_check_usbin_health(chip);
if ((chip->usbin_health != usbin_health)
&& (usbin_health == USBIN_OVP)) {
chip->usbin_health = usbin_health;
psy_health_sts =
POWER_SUPPLY_HEALTH_OVERVOLTAGE;
power_supply_set_health_state(
chip->usb_psy,
psy_health_sts);
power_supply_changed(chip->usb_psy);
}
}
if (!qpnp_chg_is_dc_chg_plugged_in(chip))
chip->chg_done = false;
if (!qpnp_is_dc_higher_prio(chip))
qpnp_chg_idcmax_set(chip, chip->maxinput_dc_ma);
qpnp_chg_usb_suspend_enable(chip, 0);
qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
qpnp_chg_iusb_trim_set(chip, chip->usb_trim_default);
chip->prev_usb_max_ma = -EINVAL;
} else {
/* when OVP clamped usbin, and then decrease
* the charger voltage to lower than the OVP
* threshold, a usbin_valid rising edge
* interrupt triggered. So we change the usb
* psy health state back to good */
if (chip->ovp_monitor_enable &&
(chip->usb_valid_check_ovp)) {
usbin_health =
qpnp_chg_check_usbin_health(chip);
if ((chip->usbin_health != usbin_health)
&& (usbin_health == USBIN_OK)) {
chip->usbin_health = usbin_health;
psy_health_sts =
POWER_SUPPLY_HEALTH_GOOD;
power_supply_set_health_state(
chip->usb_psy,
psy_health_sts);
power_supply_changed(chip->usb_psy);
}
}
schedule_delayed_work(&chip->eoc_work,
msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
schedule_work(&chip->soc_check_work);
}
power_supply_set_present(chip->usb_psy, chip->usb_present);
schedule_work(&chip->batfet_lcl_work);
}
return IRQ_HANDLED;
}
#define BUCK_VIN_LOOP_CMP_OVRD_MASK 0x30
static int
qpnp_chg_bypass_vchg_loop_debouncer(struct qpnp_chg_chip *chip, bool bypass)
{
int rc;
u8 value = bypass ? 0x10 : 0;
pr_debug("bypass vchg_loop debouncer: %d\n", bypass);
rc = qpnp_chg_masked_write(chip, chip->buck_base + SEC_ACCESS,
0xFF, 0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS register, rc = %d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + CHGR_BUCK_COMPARATOR_OVRIDE_2,
BUCK_VIN_LOOP_CMP_OVRD_MASK, value, 1);
if (rc)
pr_err("failed to write BUCK_COMP_OVRIDE_2, rc = %d\n", rc);
return rc;
}
static int
qpnp_chg_vchg_loop_debouncer_setting_get(struct qpnp_chg_chip *chip)
{
int rc;
u8 value;
rc = qpnp_chg_read(chip, &value,
chip->buck_base + CHGR_BUCK_COMPARATOR_OVRIDE_2, 1);
if (rc) {
pr_err("failed to read BUCK_CMP_OVERIDE_2, rc = %d\n", rc);
return 0;
}
return value & BUCK_VIN_LOOP_CMP_OVRD_MASK;
}
#define BAT_TOO_HOT_BYPASS 0x04
static int
bypass_btc_hot_comparator(struct qpnp_chg_chip *chip, bool bypass)
{
int rc;
pr_debug("bypass %d\n", bypass);
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + SEC_ACCESS, 0xA5, 0xA5, 1);
rc |= qpnp_chg_masked_write(chip,
chip->bat_if_base + BAT_IF_COMP_OVR0, 0xFF,
bypass ? BAT_TOO_HOT_BYPASS : 0, 1);
if (rc)
pr_err("Failed to bypass BAT_TOO_HOT rc = %d\n", rc);
return rc;
}
#define TEST_EN_SMBC_LOOP 0xE5
#define IBAT_REGULATION_DISABLE BIT(2)
#define BATT_TEMP_STAT_MASK (BIT(6) | BIT(7))
#define BATT_TEMP_COLD 0
static irqreturn_t
qpnp_chg_bat_if_batt_temp_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
int batt_temp_good, batt_present, rc;
u8 batt_temp, batt_hot_sts;
batt_temp_good = qpnp_chg_is_batt_temp_ok(chip);
pr_debug("batt-temp triggered: %d\n", batt_temp_good);
/* Read battery temp status */
rc = qpnp_chg_read(chip, &batt_temp,
chip->bat_if_base + BAT_IF_BAT_TEMP_STATUS, 1);
if (rc) {
pr_err("failed to read BAT TEMP status rc=%d\n", rc);
return rc;
}
batt_hot_sts = batt_temp & BATT_TEMP_STAT_MASK;
/*
* If BTC is triggered at HOT_THD, start a work to double check the
* battery thermal voltage
*/
if (batt_hot_sts == BATT_TEMP_HOT)
schedule_work(&chip->btc_hot_irq_debounce_work);
batt_present = qpnp_chg_is_batt_present(chip);
if (batt_present) {
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + TEST_EN_SMBC_LOOP,
IBAT_REGULATION_DISABLE,
batt_temp_good ? 0 : IBAT_REGULATION_DISABLE, 1);
if (rc) {
pr_err("failed to write COMP_OVR1 rc=%d\n", rc);
return rc;
}
}
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
return IRQ_HANDLED;
}
static irqreturn_t
qpnp_chg_bat_if_batt_pres_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
int batt_present, batt_temp_good, rc;
batt_present = qpnp_chg_is_batt_present(chip);
pr_debug("batt-pres triggered: %d\n", batt_present);
if (chip->batt_present ^ batt_present) {
if (batt_present) {
batt_temp_good = qpnp_chg_is_batt_temp_ok(chip);
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS: %d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + TEST_EN_SMBC_LOOP,
IBAT_REGULATION_DISABLE,
batt_temp_good
? 0 : IBAT_REGULATION_DISABLE, 1);
if (rc) {
pr_err("failed to write COMP_OVR1 rc=%d\n", rc);
return rc;
}
schedule_work(&chip->insertion_ocv_work);
} else {
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS: %d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + TEST_EN_SMBC_LOOP,
IBAT_REGULATION_DISABLE,
0, 1);
if (rc) {
pr_err("failed to write COMP_OVR1 rc=%d\n", rc);
return rc;
}
chip->insertion_ocv_uv = 0;
qpnp_chg_charge_en(chip, 0);
}
chip->batt_present = batt_present;
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
pr_debug("psy changed usb_psy\n");
power_supply_changed(chip->usb_psy);
if ((chip->cool_bat_decidegc || chip->warm_bat_decidegc)
&& batt_present) {
pr_debug("enabling vadc notifications\n");
schedule_work(&chip->adc_measure_work);
} else if ((chip->cool_bat_decidegc || chip->warm_bat_decidegc)
&& !batt_present) {
schedule_work(&chip->adc_disable_work);
pr_debug("disabling vadc notifications\n");
}
}
return IRQ_HANDLED;
}
static irqreturn_t
qpnp_chg_dc_dcin_valid_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
int dc_present;
dc_present = qpnp_chg_is_dc_chg_plugged_in(chip);
pr_debug("dcin-valid triggered: %d\n", dc_present);
if (chip->dc_present ^ dc_present) {
chip->dc_present = dc_present;
if (qpnp_chg_is_otg_en_set(chip))
qpnp_chg_force_run_on_batt(chip, !dc_present ? 1 : 0);
if (!dc_present && (!qpnp_chg_is_usb_chg_plugged_in(chip) ||
qpnp_chg_is_otg_en_set(chip))) {
chip->chg_done = false;
} else {
schedule_delayed_work(&chip->eoc_work,
msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
schedule_work(&chip->soc_check_work);
}
if (qpnp_is_dc_higher_prio(chip)) {
pr_debug("dc has higher priority\n");
if (dc_present) {
qpnp_chg_iusbmax_set(chip,
QPNP_CHG_I_MAX_MIN_100);
power_supply_set_voltage_limit(chip->usb_psy,
USB_5V_UV);
} else {
chip->aicl_settled = false;
qpnp_chg_iusbmax_set(chip,
USB_WALL_THRESHOLD_MA);
power_supply_set_voltage_limit(chip->usb_psy,
USB_9V_UV);
}
}
pr_debug("psy changed dc_psy\n");
power_supply_changed(&chip->dc_psy);
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
schedule_work(&chip->batfet_lcl_work);
}
return IRQ_HANDLED;
}
#define CHGR_CHG_FAILED_BIT BIT(7)
static irqreturn_t
qpnp_chg_chgr_chg_failed_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
int rc;
pr_debug("chg_failed triggered\n");
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_CHG_FAILED,
CHGR_CHG_FAILED_BIT,
CHGR_CHG_FAILED_BIT, 1);
if (rc)
pr_err("Failed to write chg_fail clear bit!\n");
if (chip->bat_if_base) {
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
}
pr_debug("psy changed usb_psy\n");
power_supply_changed(chip->usb_psy);
if (chip->dc_chgpth_base) {
pr_debug("psy changed dc_psy\n");
power_supply_changed(&chip->dc_psy);
}
return IRQ_HANDLED;
}
static irqreturn_t
qpnp_chg_chgr_chg_trklchg_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
pr_debug("TRKL IRQ triggered\n");
chip->chg_done = false;
if (chip->bat_if_base) {
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
}
return IRQ_HANDLED;
}
static int qpnp_chg_is_fastchg_on(struct qpnp_chg_chip *chip)
{
u8 chgr_sts;
int rc;
rc = qpnp_chg_read(chip, &chgr_sts, INT_RT_STS(chip->chgr_base), 1);
if (rc) {
pr_err("failed to read interrupt status %d\n", rc);
return rc;
}
pr_debug("chgr_sts 0x%x\n", chgr_sts);
return (chgr_sts & FAST_CHG_ON_IRQ) ? 1 : 0;
}
#define VBATDET_BYPASS 0x01
static int
bypass_vbatdet_comp(struct qpnp_chg_chip *chip, bool bypass)
{
int rc;
pr_debug("bypass %d\n", bypass);
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + SEC_ACCESS,
0xA5,
0xA5, 1);
rc |= qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_COMP_OVR1,
0xFF,
bypass ? VBATDET_BYPASS : 0, 1);
if (rc) {
pr_err("Failed to bypass vbatdet comp rc = %d\n", rc);
return rc;
}
return rc;
}
static irqreturn_t
qpnp_chg_chgr_chg_fastchg_irq_handler(int irq, void *_chip)
{
struct qpnp_chg_chip *chip = _chip;
bool fastchg_on = false;
qpnp_chg_irq_wake_disable(&chip->chg_fastchg);
fastchg_on = qpnp_chg_is_fastchg_on(chip);
pr_debug("FAST_CHG IRQ triggered, fastchg_on: %d\n", fastchg_on);
if (chip->fastchg_on ^ fastchg_on) {
chip->fastchg_on = fastchg_on;
if (chip->bat_if_base) {
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
}
pr_debug("psy changed usb_psy\n");
power_supply_changed(chip->usb_psy);
if (chip->dc_chgpth_base) {
pr_debug("psy changed dc_psy\n");
power_supply_changed(&chip->dc_psy);
}
if (fastchg_on) {
chip->chg_done = false;
bypass_vbatdet_comp(chip, 1);
if (chip->bat_is_warm || chip->bat_is_cool) {
qpnp_chg_set_appropriate_vddmax(chip);
qpnp_chg_set_appropriate_battery_current(chip);
}
if (chip->resuming_charging) {
chip->resuming_charging = false;
qpnp_chg_set_appropriate_vbatdet(chip);
}
if (!chip->charging_disabled) {
schedule_delayed_work(&chip->eoc_work,
msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
pm_stay_awake(chip->dev);
}
if (chip->parallel_ovp_mode)
switch_parallel_ovp_mode(chip, 1);
if (ext_ovp_isns_present &&
chip->ext_ovp_ic_gpio_enabled) {
pr_debug("EXT OVP IC ISNS enabled\n");
gpio_direction_output(
chip->ext_ovp_isns_gpio, 1);
}
} else {
if (chip->parallel_ovp_mode)
switch_parallel_ovp_mode(chip, 0);
if (!chip->bat_is_warm && !chip->bat_is_cool)
bypass_vbatdet_comp(chip, 0);
}
}
qpnp_chg_enable_irq(&chip->chg_vbatdet_lo);
return IRQ_HANDLED;
}
static int
qpnp_dc_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
case POWER_SUPPLY_PROP_CURRENT_MAX:
return 1;
default:
break;
}
return 0;
}
static int
qpnp_batt_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
case POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM:
case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
case POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS:
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
case POWER_SUPPLY_PROP_COOL_TEMP:
case POWER_SUPPLY_PROP_WARM_TEMP:
case POWER_SUPPLY_PROP_CAPACITY:
return 1;
default:
break;
}
return 0;
}
static int
qpnp_chg_buck_control(struct qpnp_chg_chip *chip, int enable)
{
int rc;
if (chip->charging_disabled && enable) {
pr_debug("Charging disabled\n");
return 0;
}
rc = qpnp_chg_charge_en(chip, enable);
if (rc) {
pr_err("Failed to control charging %d\n", rc);
return rc;
}
rc = qpnp_chg_force_run_on_batt(chip, !enable);
if (rc)
pr_err("Failed to control charging %d\n", rc);
return rc;
}
static int
switch_usb_to_charge_mode(struct qpnp_chg_chip *chip)
{
int rc;
pr_debug("switch to charge mode\n");
if (!qpnp_chg_is_otg_en_set(chip))
return 0;
if (chip->type == SMBBP) {
rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ILIM,
BOOST_ILIMT_MASK,
BOOST_ILIMIT_DEF, 1);
if (rc) {
pr_err("Failed to set ilim rc = %d\n", rc);
return rc;
}
}
/* enable usb ovp fet */
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
USB_OTG_EN_BIT,
0, 1);
if (rc) {
pr_err("Failed to turn on usb ovp rc = %d\n", rc);
return rc;
}
rc = qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
if (rc) {
pr_err("Failed re-enable charging rc = %d\n", rc);
return rc;
}
return 0;
}
static int
switch_usb_to_host_mode(struct qpnp_chg_chip *chip)
{
int rc;
u8 usb_sts;
pr_debug("switch to host mode\n");
if (qpnp_chg_is_otg_en_set(chip))
return 0;
if (chip->parallel_ovp_mode)
switch_parallel_ovp_mode(chip, 0);
if (chip->type == SMBBP) {
rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ILIM,
BOOST_ILIMT_MASK,
BOOST_ILIMIT_MIN, 1);
if (rc) {
pr_err("Failed to turn configure ilim rc = %d\n", rc);
return rc;
}
}
if (!qpnp_chg_is_dc_chg_plugged_in(chip)) {
rc = qpnp_chg_force_run_on_batt(chip, 1);
if (rc) {
pr_err("Failed to disable charging rc = %d\n", rc);
return rc;
}
}
/* force usb ovp fet off */
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + CHGR_USB_USB_OTG_CTL,
USB_OTG_EN_BIT,
USB_OTG_EN_BIT, 1);
if (rc) {
pr_err("Failed to turn off usb ovp rc = %d\n", rc);
return rc;
}
if (chip->type == SMBBP) {
/* Wait for OCP circuitry to be powered up */
msleep(100);
rc = qpnp_chg_read(chip, &usb_sts,
INT_RT_STS(chip->usb_chgpth_base), 1);
if (rc) {
pr_err("failed to read interrupt sts %d\n", rc);
return rc;
}
if (usb_sts & COARSE_DET_USB_IRQ) {
rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ILIM,
BOOST_ILIMT_MASK,
BOOST_ILIMIT_DEF, 1);
if (rc) {
pr_err("Failed to set ilim rc = %d\n", rc);
return rc;
}
} else {
pr_warn_ratelimited("USB short to GND detected!\n");
}
}
return 0;
}
static enum power_supply_property pm_power_props_mains[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_CURRENT_MAX,
};
static enum power_supply_property msm_batt_power_props[] = {
POWER_SUPPLY_PROP_CHARGING_ENABLED,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_COOL_TEMP,
POWER_SUPPLY_PROP_WARM_TEMP,
POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
};
static char *pm_power_supplied_to[] = {
"battery",
};
static char *pm_batt_supplied_to[] = {
"bms",
};
static int charger_monitor;
module_param(charger_monitor, int, 0644);
static int ext_ovp_present;
module_param(ext_ovp_present, int, 0444);
#define OVP_USB_WALL_TRSH_MA 200
static int
qpnp_power_get_property_mains(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct qpnp_chg_chip *chip = container_of(psy, struct qpnp_chg_chip,
dc_psy);
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_ONLINE:
val->intval = 0;
if (chip->charging_disabled)
return 0;
val->intval = qpnp_chg_is_dc_chg_plugged_in(chip);
break;
case POWER_SUPPLY_PROP_CURRENT_MAX:
val->intval = chip->maxinput_dc_ma * 1000;
break;
default:
return -EINVAL;
}
return 0;
}
static void
qpnp_aicl_check_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct qpnp_chg_chip *chip = container_of(dwork,
struct qpnp_chg_chip, aicl_check_work);
union power_supply_propval ret = {0,};
if (!charger_monitor && qpnp_chg_is_usb_chg_plugged_in(chip)) {
chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &ret);
if ((ret.intval / 1000) > USB_WALL_THRESHOLD_MA) {
pr_debug("no charger_monitor present set iusbmax %d\n",
ret.intval / 1000);
qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
}
} else {
pr_debug("charger_monitor is present\n");
}
chip->charger_monitor_checked = true;
}
static int
get_prop_battery_voltage_now(struct qpnp_chg_chip *chip)
{
int rc = 0;
struct qpnp_vadc_result results;
if (chip->revision == 0 && chip->type == SMBB) {
pr_err("vbat reading not supported for 1.0 rc=%d\n", rc);
return 0;
} else {
rc = qpnp_vadc_read(chip->vadc_dev, VBAT_SNS, &results);
if (rc) {
pr_err("Unable to read vbat rc=%d\n", rc);
return 0;
}
return results.physical;
}
}
#define BATT_PRES_BIT BIT(7)
static int
get_prop_batt_present(struct qpnp_chg_chip *chip)
{
u8 batt_present;
int rc;
rc = qpnp_chg_read(chip, &batt_present,
chip->bat_if_base + CHGR_BAT_IF_PRES_STATUS, 1);
if (rc) {
pr_err("Couldn't read battery status read failed rc=%d\n", rc);
return 0;
};
return (batt_present & BATT_PRES_BIT) ? 1 : 0;
}
static int
get_prop_batt_health(struct qpnp_chg_chip *chip)
{
u8 batt_health;
int rc;
rc = qpnp_chg_read(chip, &batt_health,
chip->bat_if_base + CHGR_STATUS, 1);
if (rc) {
pr_err("Couldn't read battery health read failed rc=%d\n", rc);
return POWER_SUPPLY_HEALTH_UNKNOWN;
};
if (BATT_TEMP_OK & batt_health)
return POWER_SUPPLY_HEALTH_GOOD;
if (BATT_TEMP_HOT & batt_health)
return POWER_SUPPLY_HEALTH_OVERHEAT;
else
return POWER_SUPPLY_HEALTH_COLD;
}
static int
get_prop_charge_type(struct qpnp_chg_chip *chip)
{
int rc;
u8 chgr_sts;
if (!get_prop_batt_present(chip))
return POWER_SUPPLY_CHARGE_TYPE_NONE;
rc = qpnp_chg_read(chip, &chgr_sts,
INT_RT_STS(chip->chgr_base), 1);
if (rc) {
pr_err("failed to read interrupt sts %d\n", rc);
return POWER_SUPPLY_CHARGE_TYPE_NONE;
}
if (chgr_sts & TRKL_CHG_ON_IRQ)
return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
if (chgr_sts & FAST_CHG_ON_IRQ)
return POWER_SUPPLY_CHARGE_TYPE_FAST;
return POWER_SUPPLY_CHARGE_TYPE_NONE;
}
#define DEFAULT_CAPACITY 50
static int
get_batt_capacity(struct qpnp_chg_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->fake_battery_soc >= 0)
return chip->fake_battery_soc;
if (chip->use_default_batt_values || !get_prop_batt_present(chip))
return DEFAULT_CAPACITY;
if (chip->bms_psy) {
chip->bms_psy->get_property(chip->bms_psy,
POWER_SUPPLY_PROP_CAPACITY, &ret);
return ret.intval;
}
return DEFAULT_CAPACITY;
}
static int
get_prop_batt_status(struct qpnp_chg_chip *chip)
{
int rc;
u8 chgr_sts, bat_if_sts;
rc = qpnp_chg_read(chip, &chgr_sts, INT_RT_STS(chip->chgr_base), 1);
if (rc) {
pr_err("failed to read interrupt sts %d\n", rc);
return POWER_SUPPLY_CHARGE_TYPE_NONE;
}
rc = qpnp_chg_read(chip, &bat_if_sts, INT_RT_STS(chip->bat_if_base), 1);
if (rc) {
pr_err("failed to read bat_if sts %d\n", rc);
return POWER_SUPPLY_CHARGE_TYPE_NONE;
}
if ((chgr_sts & TRKL_CHG_ON_IRQ) && !(bat_if_sts & BAT_FET_ON_IRQ))
return POWER_SUPPLY_STATUS_CHARGING;
if (chgr_sts & FAST_CHG_ON_IRQ && bat_if_sts & BAT_FET_ON_IRQ)
return POWER_SUPPLY_STATUS_CHARGING;
/*
* Report full if state of charge is 100 or chg_done is true
* when a charger is connected and boost is disabled
*/
if ((qpnp_chg_is_usb_chg_plugged_in(chip) ||
qpnp_chg_is_dc_chg_plugged_in(chip)) &&
(chip->chg_done || get_batt_capacity(chip) == 100)
&& qpnp_chg_is_boost_en_set(chip) == 0) {
return POWER_SUPPLY_STATUS_FULL;
}
return POWER_SUPPLY_STATUS_DISCHARGING;
}
static int
get_prop_current_now(struct qpnp_chg_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->bms_psy) {
chip->bms_psy->get_property(chip->bms_psy,
POWER_SUPPLY_PROP_CURRENT_NOW, &ret);
return ret.intval;
} else {
pr_debug("No BMS supply registered return 0\n");
}
return 0;
}
static int
get_prop_full_design(struct qpnp_chg_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->bms_psy) {
chip->bms_psy->get_property(chip->bms_psy,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, &ret);
return ret.intval;
} else {
pr_debug("No BMS supply registered return 0\n");
}
return 0;
}
static int
get_prop_charge_full(struct qpnp_chg_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->bms_psy) {
chip->bms_psy->get_property(chip->bms_psy,
POWER_SUPPLY_PROP_CHARGE_FULL, &ret);
return ret.intval;
} else {
pr_debug("No BMS supply registered return 0\n");
}
return 0;
}
static int
get_prop_capacity(struct qpnp_chg_chip *chip)
{
union power_supply_propval ret = {0,};
int battery_status, bms_status, soc, charger_in;
if (chip->fake_battery_soc >= 0)
return chip->fake_battery_soc;
if (chip->use_default_batt_values || !get_prop_batt_present(chip))
return DEFAULT_CAPACITY;
if (chip->bms_psy) {
chip->bms_psy->get_property(chip->bms_psy,
POWER_SUPPLY_PROP_CAPACITY, &ret);
soc = ret.intval;
battery_status = get_prop_batt_status(chip);
chip->bms_psy->get_property(chip->bms_psy,
POWER_SUPPLY_PROP_STATUS, &ret);
bms_status = ret.intval;
charger_in = qpnp_chg_is_usb_chg_plugged_in(chip) ||
qpnp_chg_is_dc_chg_plugged_in(chip);
if (battery_status != POWER_SUPPLY_STATUS_CHARGING
&& bms_status != POWER_SUPPLY_STATUS_CHARGING
&& charger_in
&& !chip->bat_is_cool
&& !chip->bat_is_warm
&& !chip->resuming_charging
&& !chip->charging_disabled
&& chip->soc_resume_limit
&& soc <= chip->soc_resume_limit) {
pr_debug("resuming charging at %d%% soc\n", soc);
chip->resuming_charging = true;
qpnp_chg_irq_wake_enable(&chip->chg_fastchg);
qpnp_chg_set_appropriate_vbatdet(chip);
qpnp_chg_charge_en(chip, !chip->charging_disabled);
}
if (soc == 0) {
if (!qpnp_chg_is_usb_chg_plugged_in(chip)
&& !qpnp_chg_is_usb_chg_plugged_in(chip))
pr_warn_ratelimited("Battery 0, CHG absent\n");
}
return soc;
} else {
pr_debug("No BMS supply registered return 50\n");
}
/* return default capacity to avoid userspace
* from shutting down unecessarily */
return DEFAULT_CAPACITY;
}
#define DEFAULT_TEMP 250
#define MAX_TOLERABLE_BATT_TEMP_DDC 680
static int
get_prop_batt_temp(struct qpnp_chg_chip *chip)
{
int rc = 0;
struct qpnp_vadc_result results;
if (chip->use_default_batt_values || !get_prop_batt_present(chip))
return DEFAULT_TEMP;
rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM, &results);
if (rc) {
pr_debug("Unable to read batt temperature rc=%d\n", rc);
return 0;
}
pr_debug("get_bat_temp %d, %lld\n",
results.adc_code, results.physical);
return (int)results.physical;
}
static int get_prop_cycle_count(struct qpnp_chg_chip *chip)
{
union power_supply_propval ret = {0,};
if (chip->bms_psy)
chip->bms_psy->get_property(chip->bms_psy,
POWER_SUPPLY_PROP_CYCLE_COUNT, &ret);
return ret.intval;
}
static int get_prop_vchg_loop(struct qpnp_chg_chip *chip)
{
u8 buck_sts;
int rc;
rc = qpnp_chg_read(chip, &buck_sts, INT_RT_STS(chip->buck_base), 1);
if (rc) {
pr_err("spmi read failed: addr=%03X, rc=%d\n",
INT_RT_STS(chip->buck_base), rc);
return rc;
}
pr_debug("buck usb sts 0x%x\n", buck_sts);
return (buck_sts & VCHG_LOOP_IRQ) ? 1 : 0;
}
static int get_prop_online(struct qpnp_chg_chip *chip)
{
return qpnp_chg_is_batfet_closed(chip);
}
#define USB_SUSPEND_UA 2000
static void
qpnp_batt_external_power_changed(struct power_supply *psy)
{
struct qpnp_chg_chip *chip = container_of(psy, struct qpnp_chg_chip,
batt_psy);
union power_supply_propval ret = {0,};
if (!chip->bms_psy)
chip->bms_psy = power_supply_get_by_name("bms");
chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_ONLINE, &ret);
/* Only honour requests while USB is present */
if (qpnp_chg_is_usb_chg_plugged_in(chip)) {
chip->usb_psy->get_property(chip->usb_psy,
POWER_SUPPLY_PROP_CURRENT_MAX, &ret);
if (chip->prev_usb_max_ma == ret.intval)
goto skip_set_iusb_max;
chip->prev_usb_max_ma = ret.intval;
if (ret.intval <= USB_SUSPEND_UA &&
!chip->use_default_batt_values &&
get_prop_batt_present(chip)) {
if (ret.intval == USB_SUSPEND_UA)
qpnp_chg_usb_suspend_enable(chip, 1);
qpnp_chg_iusbmax_set(chip, QPNP_CHG_I_MAX_MIN_100);
} else {
qpnp_chg_usb_suspend_enable(chip, 0);
if (qpnp_is_dc_higher_prio(chip)
&& qpnp_chg_is_dc_chg_plugged_in(chip)) {
pr_debug("dc has higher priority\n");
qpnp_chg_iusbmax_set(chip,
QPNP_CHG_I_MAX_MIN_100);
} else if (((ret.intval / 1000) > USB_WALL_THRESHOLD_MA)
&& (charger_monitor ||
!chip->charger_monitor_checked)) {
if (!qpnp_is_dc_higher_prio(chip))
qpnp_chg_idcmax_set(chip,
QPNP_CHG_I_MAX_MIN_100);
if (unlikely(ext_ovp_present)) {
qpnp_chg_iusbmax_set(chip,
OVP_USB_WALL_TRSH_MA);
} else if (unlikely(
ext_ovp_isns_present)) {
qpnp_chg_iusb_trim_set(chip,
chip->usb_trim_default);
qpnp_chg_iusbmax_set(chip,
IOVP_USB_WALL_TRSH_MA);
} else {
qpnp_chg_iusbmax_set(chip,
USB_WALL_THRESHOLD_MA);
}
} else {
qpnp_chg_iusbmax_set(chip, ret.intval / 1000);
}
if ((chip->flags & POWER_STAGE_WA)
&& ((ret.intval / 1000) > USB_WALL_THRESHOLD_MA)
&& !chip->power_stage_workaround_running
&& chip->power_stage_workaround_enable) {
chip->power_stage_workaround_running = true;
pr_debug("usb wall chg inserted starting power stage workaround charger_monitor = %d\n",
charger_monitor);
schedule_work(&chip->reduce_power_stage_work);
}
}
}
skip_set_iusb_max:
pr_debug("end of power supply changed\n");
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
}
static int
qpnp_batt_power_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct qpnp_chg_chip *chip = container_of(psy, struct qpnp_chg_chip,
batt_psy);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
val->intval = get_prop_batt_status(chip);
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
val->intval = get_prop_charge_type(chip);
break;
case POWER_SUPPLY_PROP_HEALTH:
val->intval = get_prop_batt_health(chip);
break;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = get_prop_batt_present(chip);
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
val->intval = chip->max_voltage_mv * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = chip->min_voltage_mv * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
val->intval = get_prop_battery_voltage_now(chip);
break;
case POWER_SUPPLY_PROP_VOLTAGE_OCV:
val->intval = chip->insertion_ocv_uv;
break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = get_prop_batt_temp(chip);
break;
case POWER_SUPPLY_PROP_COOL_TEMP:
val->intval = chip->cool_bat_decidegc;
break;
case POWER_SUPPLY_PROP_WARM_TEMP:
val->intval = chip->warm_bat_decidegc;
break;
case POWER_SUPPLY_PROP_CAPACITY:
val->intval = get_prop_capacity(chip);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
val->intval = get_prop_current_now(chip);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval = get_prop_full_design(chip);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
val->intval = get_prop_charge_full(chip);
break;
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
val->intval = !(chip->charging_disabled);
break;
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
val->intval = chip->therm_lvl_sel;
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
val->intval = get_prop_cycle_count(chip);
break;
case POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION:
val->intval = get_prop_vchg_loop(chip);
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
val->intval = qpnp_chg_usb_iusbmax_get(chip) * 1000;
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM:
val->intval = qpnp_chg_iusb_trim_get(chip);
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
val->intval = chip->aicl_settled;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
val->intval = qpnp_chg_vinmin_get(chip) * 1000;
break;
case POWER_SUPPLY_PROP_ONLINE:
val->intval = get_prop_online(chip);
break;
case POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS:
val->intval = qpnp_chg_vchg_loop_debouncer_setting_get(chip);
break;
default:
return -EINVAL;
}
return 0;
}
#define BTC_CONFIG_ENABLED BIT(7)
#define BTC_COLD BIT(1)
#define BTC_HOT BIT(0)
static int
qpnp_chg_bat_if_configure_btc(struct qpnp_chg_chip *chip)
{
u8 btc_cfg = 0, mask = 0;
/* Do nothing if battery peripheral not present */
if (!chip->bat_if_base)
return 0;
if ((chip->hot_batt_p == HOT_THD_25_PCT)
|| (chip->hot_batt_p == HOT_THD_35_PCT)) {
btc_cfg |= btc_value[chip->hot_batt_p];
mask |= BTC_HOT;
}
if ((chip->cold_batt_p == COLD_THD_70_PCT) ||
(chip->cold_batt_p == COLD_THD_80_PCT)) {
btc_cfg |= btc_value[chip->cold_batt_p];
mask |= BTC_COLD;
}
if (chip->btc_disabled)
mask |= BTC_CONFIG_ENABLED;
return qpnp_chg_masked_write(chip,
chip->bat_if_base + BAT_IF_BTC_CTRL,
mask, btc_cfg, 1);
}
#define QPNP_CHG_IBATSAFE_MIN_MA 100
#define QPNP_CHG_IBATSAFE_MAX_MA 3250
#define QPNP_CHG_I_STEP_MA 50
#define QPNP_CHG_I_MIN_MA 100
#define QPNP_CHG_I_MASK 0x3F
static int
qpnp_chg_ibatsafe_set(struct qpnp_chg_chip *chip, int safe_current)
{
u8 temp;
if (safe_current < QPNP_CHG_IBATSAFE_MIN_MA
|| safe_current > QPNP_CHG_IBATSAFE_MAX_MA) {
pr_err("bad mA=%d asked to set\n", safe_current);
return -EINVAL;
}
temp = safe_current / QPNP_CHG_I_STEP_MA;
return qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_IBAT_SAFE,
QPNP_CHG_I_MASK, temp, 1);
}
#define QPNP_CHG_ITERM_MIN_MA 100
#define QPNP_CHG_ITERM_MAX_MA 250
#define QPNP_CHG_ITERM_STEP_MA 50
#define QPNP_CHG_ITERM_MASK 0x03
static int
qpnp_chg_ibatterm_set(struct qpnp_chg_chip *chip, int term_current)
{
u8 temp;
if (term_current < QPNP_CHG_ITERM_MIN_MA
|| term_current > QPNP_CHG_ITERM_MAX_MA) {
pr_err("bad mA=%d asked to set\n", term_current);
return -EINVAL;
}
temp = (term_current - QPNP_CHG_ITERM_MIN_MA)
/ QPNP_CHG_ITERM_STEP_MA;
return qpnp_chg_masked_write(chip,
chip->chgr_base + CHGR_IBAT_TERM_CHGR,
QPNP_CHG_ITERM_MASK, temp, 1);
}
#define QPNP_CHG_IBATMAX_MIN 50
#define QPNP_CHG_IBATMAX_MAX 3250
static int
qpnp_chg_ibatmax_set(struct qpnp_chg_chip *chip, int chg_current)
{
u8 temp;
if (chg_current < QPNP_CHG_IBATMAX_MIN
|| chg_current > QPNP_CHG_IBATMAX_MAX) {
pr_err("bad mA=%d asked to set\n", chg_current);
return -EINVAL;
}
temp = chg_current / QPNP_CHG_I_STEP_MA;
return qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_IBAT_MAX,
QPNP_CHG_I_MASK, temp, 1);
}
static int
qpnp_chg_ibatmax_get(struct qpnp_chg_chip *chip, int *chg_current)
{
int rc;
u8 temp;
*chg_current = 0;
rc = qpnp_chg_read(chip, &temp, chip->chgr_base + CHGR_IBAT_MAX, 1);
if (rc) {
pr_err("failed read ibat_max rc=%d\n", rc);
return rc;
}
*chg_current = ((temp & QPNP_CHG_I_MASK) * QPNP_CHG_I_STEP_MA);
return 0;
}
#define QPNP_CHG_TCHG_MASK 0x7F
#define QPNP_CHG_TCHG_EN_MASK 0x80
#define QPNP_CHG_TCHG_MIN 4
#define QPNP_CHG_TCHG_MAX 512
#define QPNP_CHG_TCHG_STEP 4
static int qpnp_chg_tchg_max_set(struct qpnp_chg_chip *chip, int minutes)
{
u8 temp;
int rc;
if (minutes < QPNP_CHG_TCHG_MIN || minutes > QPNP_CHG_TCHG_MAX) {
pr_err("bad max minutes =%d asked to set\n", minutes);
return -EINVAL;
}
rc = qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_TCHG_MAX_EN,
QPNP_CHG_TCHG_EN_MASK, 0, 1);
if (rc) {
pr_err("failed write tchg_max_en rc=%d\n", rc);
return rc;
}
temp = minutes / QPNP_CHG_TCHG_STEP - 1;
rc = qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_TCHG_MAX,
QPNP_CHG_TCHG_MASK, temp, 1);
if (rc) {
pr_err("failed write tchg_max_en rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip, chip->chgr_base + CHGR_TCHG_MAX_EN,
QPNP_CHG_TCHG_EN_MASK, QPNP_CHG_TCHG_EN_MASK, 1);
if (rc) {
pr_err("failed write tchg_max_en rc=%d\n", rc);
return rc;
}
return 0;
}
static void
qpnp_chg_set_appropriate_battery_current(struct qpnp_chg_chip *chip)
{
unsigned int chg_current = chip->max_bat_chg_current;
if (chip->bat_is_cool)
chg_current = min(chg_current, chip->cool_bat_chg_ma);
if (chip->bat_is_warm)
chg_current = min(chg_current, chip->warm_bat_chg_ma);
if (chip->therm_lvl_sel != 0 && chip->thermal_mitigation)
chg_current = min(chg_current,
chip->thermal_mitigation[chip->therm_lvl_sel]);
pr_debug("setting %d mA\n", chg_current);
qpnp_chg_ibatmax_set(chip, chg_current);
}
static int
qpnp_chg_vddsafe_set(struct qpnp_chg_chip *chip, int voltage)
{
u8 temp;
if (voltage < QPNP_CHG_V_MIN_MV
|| voltage > QPNP_CHG_V_MAX_MV) {
pr_err("bad mV=%d asked to set\n", voltage);
return -EINVAL;
}
temp = (voltage - QPNP_CHG_V_MIN_MV) / QPNP_CHG_V_STEP_MV;
pr_debug("voltage=%d setting %02x\n", voltage, temp);
return qpnp_chg_write(chip, &temp,
chip->chgr_base + CHGR_VDD_SAFE, 1);
}
#define IBAT_TRIM_TGT_MA 500
#define IBAT_TRIM_OFFSET_MASK 0x7F
#define IBAT_TRIM_GOOD_BIT BIT(7)
#define IBAT_TRIM_LOW_LIM 20
#define IBAT_TRIM_HIGH_LIM 114
#define IBAT_TRIM_MEAN 64
static void
qpnp_chg_trim_ibat(struct qpnp_chg_chip *chip, u8 ibat_trim)
{
int ibat_now_ma, ibat_diff_ma, rc;
struct qpnp_iadc_result i_result;
enum qpnp_iadc_channels iadc_channel;
iadc_channel = chip->use_external_rsense ?
EXTERNAL_RSENSE : INTERNAL_RSENSE;
rc = qpnp_iadc_read(chip->iadc_dev, iadc_channel, &i_result);
if (rc) {
pr_err("Unable to read bat rc=%d\n", rc);
return;
}
ibat_now_ma = i_result.result_ua / 1000;
if (qpnp_chg_is_ibat_loop_active(chip)) {
ibat_diff_ma = ibat_now_ma - IBAT_TRIM_TGT_MA;
if (abs(ibat_diff_ma) > 50) {
ibat_trim += (ibat_diff_ma / 20);
ibat_trim &= IBAT_TRIM_OFFSET_MASK;
/* reject new ibat_trim if it is outside limits */
if (!is_within_range(ibat_trim, IBAT_TRIM_LOW_LIM,
IBAT_TRIM_HIGH_LIM))
return;
}
if (chip->type == SMBBP) {
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS,
0xFF, 0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS: %d\n", rc);
return;
}
}
ibat_trim |= IBAT_TRIM_GOOD_BIT;
rc = qpnp_chg_write(chip, &ibat_trim,
chip->buck_base + BUCK_CTRL_TRIM3, 1);
if (rc)
pr_err("failed to set IBAT_TRIM rc=%d\n", rc);
pr_debug("ibat_now=%dmA, itgt=%dmA, ibat_diff=%dmA, ibat_trim=%x\n",
ibat_now_ma, IBAT_TRIM_TGT_MA,
ibat_diff_ma, ibat_trim);
} else {
pr_debug("ibat loop not active - cannot calibrate ibat\n");
}
}
static int
qpnp_chg_input_current_settled(struct qpnp_chg_chip *chip)
{
int rc, ibat_max_ma;
u8 reg, chgr_sts, ibat_trim, i;
bool usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
if (!usb_present) {
pr_debug("Ignoring AICL settled, since USB is removed\n");
return 0;
}
chip->aicl_settled = true;
/*
* Perform the ibat calibration.
* This is for devices which have a IBAT_TRIM error
* which can show IBAT_MAX out of spec.
*/
if (!chip->ibat_calibration_enabled)
return 0;
if (chip->type != SMBB && chip->type != SMBBP)
return 0;
rc = qpnp_chg_read(chip, ®,
chip->buck_base + BUCK_CTRL_TRIM3, 1);
if (rc) {
pr_err("failed to read BUCK_CTRL_TRIM3 rc=%d\n", rc);
return rc;
}
if (reg & IBAT_TRIM_GOOD_BIT) {
pr_debug("IBAT_TRIM_GOOD bit already set. Quitting!\n");
return 0;
}
ibat_trim = reg & IBAT_TRIM_OFFSET_MASK;
if (!is_within_range(ibat_trim, IBAT_TRIM_LOW_LIM,
IBAT_TRIM_HIGH_LIM)) {
pr_debug("Improper ibat_trim value=%x setting to value=%x\n",
ibat_trim, IBAT_TRIM_MEAN);
ibat_trim = IBAT_TRIM_MEAN;
if (chip->type == SMBBP) {
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS,
0xFF, 0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS: %d\n", rc);
return rc;
}
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + BUCK_CTRL_TRIM3,
IBAT_TRIM_OFFSET_MASK, ibat_trim, 1);
if (rc) {
pr_err("failed to set ibat_trim to %x rc=%d\n",
IBAT_TRIM_MEAN, rc);
return rc;
}
}
rc = qpnp_chg_read(chip, &chgr_sts,
INT_RT_STS(chip->chgr_base), 1);
if (rc) {
pr_err("failed to read interrupt sts rc=%d\n", rc);
return rc;
}
if (!(chgr_sts & FAST_CHG_ON_IRQ)) {
pr_debug("Not in fastchg\n");
return rc;
}
/* save the ibat_max to restore it later */
rc = qpnp_chg_ibatmax_get(chip, &ibat_max_ma);
if (rc) {
pr_debug("failed to save ibatmax rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_ibatmax_set(chip, IBAT_TRIM_TGT_MA);
if (rc) {
pr_err("failed to set ibatmax rc=%d\n", rc);
return rc;
}
for (i = 0; i < 3; i++) {
/*
* ibat settling delay - to make sure the BMS controller
* has sufficient time to sample ibat for the configured
* ibat_max
*/
msleep(20);
if (qpnp_chg_is_ibat_loop_active(chip))
qpnp_chg_trim_ibat(chip, ibat_trim);
else
pr_debug("ibat loop not active\n");
/* read the adjusted ibat_trim for further adjustments */
rc = qpnp_chg_read(chip, &ibat_trim,
chip->buck_base + BUCK_CTRL_TRIM3, 1);
if (rc) {
pr_err("failed to read BUCK_CTRL_TRIM3 rc=%d\n", rc);
break;
}
}
/* restore IBATMAX */
rc = qpnp_chg_ibatmax_set(chip, ibat_max_ma);
if (rc)
pr_err("failed to restore ibatmax rc=%d\n", rc);
return rc;
}
#define BOOST_MIN_UV 4200000
#define BOOST_MAX_UV 5500000
#define BOOST_STEP_UV 50000
#define BOOST_MIN 16
#define N_BOOST_V ((BOOST_MAX_UV - BOOST_MIN_UV) / BOOST_STEP_UV + 1)
static int
qpnp_boost_vset(struct qpnp_chg_chip *chip, int voltage)
{
u8 reg = 0;
if (voltage < BOOST_MIN_UV || voltage > BOOST_MAX_UV) {
pr_err("invalid voltage requested %d uV\n", voltage);
return -EINVAL;
}
reg = DIV_ROUND_UP(voltage - BOOST_MIN_UV, BOOST_STEP_UV) + BOOST_MIN;
pr_debug("voltage=%d setting %02x\n", voltage, reg);
return qpnp_chg_write(chip, ®, chip->boost_base + BOOST_VSET, 1);
}
static int
qpnp_boost_vget_uv(struct qpnp_chg_chip *chip)
{
int rc;
u8 boost_reg;
rc = qpnp_chg_read(chip, &boost_reg,
chip->boost_base + BOOST_VSET, 1);
if (rc) {
pr_err("failed to read BOOST_VSET rc=%d\n", rc);
return rc;
}
if (boost_reg < BOOST_MIN) {
pr_err("Invalid reading from 0x%x\n", boost_reg);
return -EINVAL;
}
return BOOST_MIN_UV + ((boost_reg - BOOST_MIN) * BOOST_STEP_UV);
}
static void
qpnp_batt_system_temp_level_set(struct qpnp_chg_chip *chip, int lvl_sel)
{
if (lvl_sel >= 0 && lvl_sel < chip->thermal_levels) {
chip->therm_lvl_sel = lvl_sel;
if (lvl_sel == (chip->thermal_levels - 1)) {
/* disable charging if highest value selected */
qpnp_chg_buck_control(chip, 0);
} else {
qpnp_chg_buck_control(chip, 1);
qpnp_chg_set_appropriate_battery_current(chip);
}
} else {
pr_err("Unsupported level selected %d\n", lvl_sel);
}
}
/*
* Increase the SMBB/SMBBP charger overtemp threshold to 150C while firing
* the flash (and/or torch for PM8x26) when the bharger is used as the
* power source.
*/
static int
qpnp_chg_temp_threshold_set(struct qpnp_chg_chip *chip, u8 value)
{
int rc;
rc = qpnp_chg_masked_write(chip, chip->chgr_base +
CHGR_CHG_TEMP_THRESH ,
0xFF, value, 1);
if (rc)
pr_err("set CHG_TEMP_THRESH_Flash failed, value = %d, rc = %d\n",
value, rc);
return rc;
}
#define CHG_TEMP_THRESH_FOR_FLASH 0xFD
#define CHG_TEMP_THRESH_DEFAULT 0x94
static int
qpnp_chg_regulator_flash_wa_enable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
int rc = 0;
if (chip->flags & BOOST_FLASH_WA) {
rc = qpnp_chg_temp_threshold_set(chip,
CHG_TEMP_THRESH_FOR_FLASH);
if (rc) {
pr_err("set chg temp threshold failed rc = %d\n", rc);
return rc;
}
}
chip->is_flash_wa_reg_enabled = true;
return rc;
}
static int
qpnp_chg_regulator_flash_wa_disable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
int rc = 0;
if (chip->flags & BOOST_FLASH_WA) {
rc = qpnp_chg_temp_threshold_set(chip,
chip->chg_temp_thresh_default);
if (rc) {
pr_err("set chg temp threshold failed rc = %d\n", rc);
return rc;
}
}
chip->is_flash_wa_reg_enabled = false;
return rc;
}
static int
qpnp_chg_regulator_flash_wa_is_enabled(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
return chip->is_flash_wa_reg_enabled;
}
/* OTG regulator operations */
static int
qpnp_chg_regulator_otg_enable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
return switch_usb_to_host_mode(chip);
}
static int
qpnp_chg_regulator_otg_disable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
return switch_usb_to_charge_mode(chip);
}
static int
qpnp_chg_regulator_otg_is_enabled(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
return qpnp_chg_is_otg_en_set(chip);
}
static int
qpnp_chg_regulator_boost_enable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
int usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
int rc;
if (usb_present && (chip->flags & BOOST_FLASH_WA)) {
if (ext_ovp_isns_present && chip->ext_ovp_ic_gpio_enabled) {
pr_debug("EXT OVP IC ISNS disabled\n");
gpio_direction_output(chip->ext_ovp_isns_gpio, 0);
}
qpnp_chg_usb_suspend_enable(chip, 1);
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + COMP_OVR1,
0xFF,
0x2F, 1);
if (rc) {
pr_err("failed to write COMP_OVR1 rc=%d\n", rc);
return rc;
}
}
rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ENABLE_CONTROL,
BOOST_PWR_EN,
BOOST_PWR_EN, 1);
if (rc) {
pr_err("failed to enable boost rc = %d\n", rc);
return rc;
}
/*
* update battery status when charger is connected and state is full
*/
if (usb_present && (chip->chg_done
|| (get_batt_capacity(chip) == 100)
|| (get_prop_batt_status(chip) ==
POWER_SUPPLY_STATUS_FULL)))
power_supply_changed(&chip->batt_psy);
return rc;
}
/* Boost regulator operations */
#define ABOVE_VBAT_WEAK BIT(1)
static int
qpnp_chg_regulator_boost_disable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
int rc;
u8 vbat_sts;
rc = qpnp_chg_masked_write(chip,
chip->boost_base + BOOST_ENABLE_CONTROL,
BOOST_PWR_EN,
0, 1);
if (rc) {
pr_err("failed to disable boost rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_read(chip, &vbat_sts,
chip->chgr_base + CHGR_VBAT_STATUS, 1);
if (rc) {
pr_err("failed to read bat sts rc=%d\n", rc);
return rc;
}
if (!(vbat_sts & ABOVE_VBAT_WEAK) && (chip->flags & BOOST_FLASH_WA)) {
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + COMP_OVR1,
0xFF,
0x20, 1);
if (rc) {
pr_err("failed to write COMP_OVR1 rc=%d\n", rc);
return rc;
}
usleep(2000);
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + COMP_OVR1,
0xFF,
0x00, 1);
if (rc) {
pr_err("failed to write COMP_OVR1 rc=%d\n", rc);
return rc;
}
}
if (qpnp_chg_is_usb_chg_plugged_in(chip)
&& (chip->flags & BOOST_FLASH_WA)) {
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + SEC_ACCESS,
0xFF,
0xA5, 1);
if (rc) {
pr_err("failed to write SEC_ACCESS rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + COMP_OVR1,
0xFF,
0x00, 1);
if (rc) {
pr_err("failed to write COMP_OVR1 rc=%d\n", rc);
return rc;
}
usleep(1000);
qpnp_chg_usb_suspend_enable(chip, 0);
}
/*
* When a charger is connected,if state of charge is not full
* resumeing charging else update battery status
*/
if (qpnp_chg_is_usb_chg_plugged_in(chip)) {
if (get_batt_capacity(chip) < 100 || !chip->chg_done) {
chip->chg_done = false;
chip->resuming_charging = true;
qpnp_chg_set_appropriate_vbatdet(chip);
} else if (chip->chg_done) {
power_supply_changed(&chip->batt_psy);
}
}
if (ext_ovp_isns_present && chip->ext_ovp_ic_gpio_enabled) {
pr_debug("EXT OVP IC ISNS enable\n");
gpio_direction_output(chip->ext_ovp_isns_gpio, 1);
}
return rc;
}
static int
qpnp_chg_regulator_boost_is_enabled(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
return qpnp_chg_is_boost_en_set(chip);
}
static int
qpnp_chg_regulator_boost_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
int uV = min_uV;
int rc;
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
if (uV < BOOST_MIN_UV && max_uV >= BOOST_MIN_UV)
uV = BOOST_MIN_UV;
if (uV < BOOST_MIN_UV || uV > BOOST_MAX_UV) {
pr_err("request %d uV is out of bounds\n", uV);
return -EINVAL;
}
*selector = DIV_ROUND_UP(uV - BOOST_MIN_UV, BOOST_STEP_UV);
if ((*selector * BOOST_STEP_UV + BOOST_MIN_UV) > max_uV) {
pr_err("no available setpoint [%d, %d] uV\n", min_uV, max_uV);
return -EINVAL;
}
rc = qpnp_boost_vset(chip, uV);
return rc;
}
static int
qpnp_chg_regulator_boost_get_voltage(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
return qpnp_boost_vget_uv(chip);
}
static int
qpnp_chg_regulator_boost_list_voltage(struct regulator_dev *rdev,
unsigned selector)
{
if (selector >= N_BOOST_V)
return 0;
return BOOST_MIN_UV + (selector * BOOST_STEP_UV);
}
static struct regulator_ops qpnp_chg_flash_wa_reg_ops = {
.enable = qpnp_chg_regulator_flash_wa_enable,
.disable = qpnp_chg_regulator_flash_wa_disable,
.is_enabled = qpnp_chg_regulator_flash_wa_is_enabled,
};
static struct regulator_ops qpnp_chg_otg_reg_ops = {
.enable = qpnp_chg_regulator_otg_enable,
.disable = qpnp_chg_regulator_otg_disable,
.is_enabled = qpnp_chg_regulator_otg_is_enabled,
};
static struct regulator_ops qpnp_chg_boost_reg_ops = {
.enable = qpnp_chg_regulator_boost_enable,
.disable = qpnp_chg_regulator_boost_disable,
.is_enabled = qpnp_chg_regulator_boost_is_enabled,
.set_voltage = qpnp_chg_regulator_boost_set_voltage,
.get_voltage = qpnp_chg_regulator_boost_get_voltage,
.list_voltage = qpnp_chg_regulator_boost_list_voltage,
};
static int
qpnp_chg_bat_if_batfet_reg_enabled(struct qpnp_chg_chip *chip)
{
int rc = 0;
u8 reg = 0;
if (!chip->bat_if_base)
return rc;
if (chip->type == SMBB)
rc = qpnp_chg_read(chip, ®,
chip->bat_if_base + CHGR_BAT_IF_SPARE, 1);
else
rc = qpnp_chg_read(chip, ®,
chip->bat_if_base + CHGR_BAT_IF_BATFET_CTRL4, 1);
if (rc) {
pr_err("failed to read batt_if rc=%d\n", rc);
return rc;
}
if ((reg & BATFET_LPM_MASK) == BATFET_NO_LPM)
return 1;
return 0;
}
static int
qpnp_chg_regulator_batfet_enable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
int rc = 0;
mutex_lock(&chip->batfet_vreg_lock);
/* Only enable if not already enabled */
if (!qpnp_chg_bat_if_batfet_reg_enabled(chip)) {
rc = qpnp_chg_regulator_batfet_set(chip, 1);
if (rc)
pr_err("failed to write to batt_if rc=%d\n", rc);
}
chip->batfet_ext_en = true;
mutex_unlock(&chip->batfet_vreg_lock);
return rc;
}
static int
qpnp_chg_regulator_batfet_disable(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
int rc = 0;
mutex_lock(&chip->batfet_vreg_lock);
/* Don't allow disable if charger connected */
if (!qpnp_chg_is_usb_chg_plugged_in(chip) &&
!qpnp_chg_is_dc_chg_plugged_in(chip)) {
rc = qpnp_chg_regulator_batfet_set(chip, 0);
if (rc)
pr_err("failed to write to batt_if rc=%d\n", rc);
}
chip->batfet_ext_en = false;
mutex_unlock(&chip->batfet_vreg_lock);
return rc;
}
static int
qpnp_chg_regulator_batfet_is_enabled(struct regulator_dev *rdev)
{
struct qpnp_chg_chip *chip = rdev_get_drvdata(rdev);
return chip->batfet_ext_en;
}
static struct regulator_ops qpnp_chg_batfet_vreg_ops = {
.enable = qpnp_chg_regulator_batfet_enable,
.disable = qpnp_chg_regulator_batfet_disable,
.is_enabled = qpnp_chg_regulator_batfet_is_enabled,
};
#define MIN_DELTA_MV_TO_INCREASE_VDD_MAX 8
#define MAX_DELTA_VDD_MAX_MV 80
#define VDD_MAX_CENTER_OFFSET 4
static void
qpnp_chg_adjust_vddmax(struct qpnp_chg_chip *chip, int vbat_mv)
{
int delta_mv, closest_delta_mv, sign;
delta_mv = chip->max_voltage_mv - VDD_MAX_CENTER_OFFSET - vbat_mv;
if (delta_mv > 0 && delta_mv < MIN_DELTA_MV_TO_INCREASE_VDD_MAX) {
pr_debug("vbat is not low enough to increase vdd\n");
return;
}
sign = delta_mv > 0 ? 1 : -1;
closest_delta_mv = ((delta_mv + sign * QPNP_CHG_BUCK_TRIM1_STEP / 2)
/ QPNP_CHG_BUCK_TRIM1_STEP) * QPNP_CHG_BUCK_TRIM1_STEP;
pr_debug("max_voltage = %d, vbat_mv = %d, delta_mv = %d, closest = %d\n",
chip->max_voltage_mv, vbat_mv,
delta_mv, closest_delta_mv);
chip->delta_vddmax_mv = clamp(chip->delta_vddmax_mv + closest_delta_mv,
-MAX_DELTA_VDD_MAX_MV, MAX_DELTA_VDD_MAX_MV);
pr_debug("using delta_vddmax_mv = %d\n", chip->delta_vddmax_mv);
qpnp_chg_set_appropriate_vddmax(chip);
}
#define CONSECUTIVE_COUNT 3
#define VBATDET_MAX_ERR_MV 50
static void
qpnp_eoc_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct qpnp_chg_chip *chip = container_of(dwork,
struct qpnp_chg_chip, eoc_work);
static int count;
static int vbat_low_count;
int ibat_ma, vbat_mv, rc = 0;
u8 batt_sts = 0, buck_sts = 0, chg_sts = 0;
bool vbat_lower_than_vbatdet;
pm_stay_awake(chip->dev);
qpnp_chg_charge_en(chip, !chip->charging_disabled);
rc = qpnp_chg_read(chip, &batt_sts, INT_RT_STS(chip->bat_if_base), 1);
if (rc) {
pr_err("failed to read batt_if rc=%d\n", rc);
return;
}
rc = qpnp_chg_read(chip, &buck_sts, INT_RT_STS(chip->buck_base), 1);
if (rc) {
pr_err("failed to read buck rc=%d\n", rc);
return;
}
rc = qpnp_chg_read(chip, &chg_sts, INT_RT_STS(chip->chgr_base), 1);
if (rc) {
pr_err("failed to read chg_sts rc=%d\n", rc);
return;
}
pr_debug("chgr: 0x%x, bat_if: 0x%x, buck: 0x%x\n",
chg_sts, batt_sts, buck_sts);
if (!qpnp_chg_is_usb_chg_plugged_in(chip) &&
!qpnp_chg_is_dc_chg_plugged_in(chip)) {
pr_debug("no chg connected, stopping\n");
goto stop_eoc;
}
if ((batt_sts & BAT_FET_ON_IRQ) && (chg_sts & FAST_CHG_ON_IRQ
|| chg_sts & TRKL_CHG_ON_IRQ)) {
ibat_ma = get_prop_current_now(chip) / 1000;
vbat_mv = get_prop_battery_voltage_now(chip) / 1000;
pr_debug("ibat_ma = %d vbat_mv = %d term_current_ma = %d\n",
ibat_ma, vbat_mv, chip->term_current);
vbat_lower_than_vbatdet = !(chg_sts & VBAT_DET_LOW_IRQ);
if (vbat_lower_than_vbatdet && vbat_mv <
(chip->max_voltage_mv - chip->resume_delta_mv
- chip->vbatdet_max_err_mv)) {
vbat_low_count++;
pr_debug("woke up too early vbat_mv = %d, max_mv = %d, resume_mv = %d tolerance_mv = %d low_count = %d\n",
vbat_mv, chip->max_voltage_mv,
chip->resume_delta_mv,
chip->vbatdet_max_err_mv,
vbat_low_count);
if (vbat_low_count >= CONSECUTIVE_COUNT) {
pr_debug("woke up too early stopping\n");
qpnp_chg_enable_irq(&chip->chg_vbatdet_lo);
goto stop_eoc;
} else {
goto check_again_later;
}
} else {
vbat_low_count = 0;
}
if (buck_sts & VDD_LOOP_IRQ)
qpnp_chg_adjust_vddmax(chip, vbat_mv);
if (!(buck_sts & VDD_LOOP_IRQ)) {
pr_debug("Not in CV\n");
count = 0;
} else if ((ibat_ma * -1) > chip->term_current) {
pr_debug("Not at EOC, battery current too high\n");
count = 0;
} else if (ibat_ma > 0) {
pr_debug("Charging but system demand increased\n");
count = 0;
} else {
if (count == CONSECUTIVE_COUNT) {
if (!chip->bat_is_cool && !chip->bat_is_warm) {
pr_info("End of Charging\n");
chip->chg_done = true;
} else {
pr_info("stop charging: battery is %s, vddmax = %d reached\n",
chip->bat_is_cool
? "cool" : "warm",
qpnp_chg_vddmax_get(chip));
}
qpnp_chg_charge_en(chip, 0);
/* sleep for a second before enabling */
msleep(2000);
qpnp_chg_charge_en(chip,
!chip->charging_disabled);
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
qpnp_chg_enable_irq(&chip->chg_vbatdet_lo);
goto stop_eoc;
} else {
count += 1;
pr_debug("EOC count = %d\n", count);
}
}
} else {
pr_debug("not charging\n");
goto stop_eoc;
}
check_again_later:
schedule_delayed_work(&chip->eoc_work,
msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
return;
stop_eoc:
vbat_low_count = 0;
count = 0;
pm_relax(chip->dev);
}
#define BATT_HOT_MV 630
static void
qpnp_chg_btc_hot_irq_debounce_work(struct work_struct *work)
{
struct qpnp_chg_chip *chip = container_of(work,
struct qpnp_chg_chip,
btc_hot_irq_debounce_work);
struct qpnp_vadc_result results;
bool hot_thd_35_pct = false;
int rc, bat_therm_volt;
u8 reg;
pm_stay_awake(chip->dev);
/* Get current BTC HOT_THD settings */
rc = qpnp_chg_read(chip, ®,
chip->bat_if_base + BAT_IF_BTC_CTRL, 1);
if (rc) {
pr_err("failed to read BTC_CTRL rc=%d\n", rc);
goto relax;
}
hot_thd_35_pct = (reg & BTC_HOT) ? true : false;
/* Read battery temperature by using VADC */
rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX1_BATT_THERM,
&results);
if (rc) {
pr_err("Unable to read batt temperature rc=%d\n",
rc);
goto relax;
}
bat_therm_volt = results.measurement;
pr_debug("hot_thd_35_pct = %d, bat_therm_volt = %dmV\n",
hot_thd_35_pct, bat_therm_volt);
if (hot_thd_35_pct && (bat_therm_volt > BATT_HOT_MV)) {
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + BAT_IF_BTC_CTRL,
BTC_HOT, btc_value[HOT_THD_25_PCT], 1);
if (rc) {
pr_err("failed to change HOT_THD to 25%% rc=%d\n",
rc);
goto relax;
}
bypass_btc_hot_comparator(chip, 1);
/*
* Wait for 2s to take charging back. Clear
* override BAT_TOO_HOT comparator, and restore
* HOT_THD to 35%.
*/
msleep(2000);
bypass_btc_hot_comparator(chip, 0);
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + BAT_IF_BTC_CTRL,
BTC_HOT, btc_value[HOT_THD_35_PCT], 1);
if (rc) {
pr_err("failed to change HOT_THD to 35%% rc=%d\n",
rc);
goto relax;
}
} else {
pr_debug("BAT temp status is not HOT\n");
goto relax;
}
relax:
pm_relax(chip->dev);
return;
}
static void
qpnp_chg_insertion_ocv_work(struct work_struct *work)
{
struct qpnp_chg_chip *chip = container_of(work,
struct qpnp_chg_chip, insertion_ocv_work);
u8 bat_if_sts = 0, charge_en = 0;
int rc;
chip->insertion_ocv_uv = get_prop_battery_voltage_now(chip);
rc = qpnp_chg_read(chip, &bat_if_sts, INT_RT_STS(chip->bat_if_base), 1);
if (rc)
pr_err("failed to read bat_if sts %d\n", rc);
rc = qpnp_chg_read(chip, &charge_en,
chip->chgr_base + CHGR_CHG_CTRL, 1);
if (rc)
pr_err("failed to read bat_if sts %d\n", rc);
pr_debug("batfet sts = %02x, charge_en = %02x ocv = %d\n",
bat_if_sts, charge_en, chip->insertion_ocv_uv);
qpnp_chg_charge_en(chip, !chip->charging_disabled);
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
}
static void
qpnp_chg_soc_check_work(struct work_struct *work)
{
struct qpnp_chg_chip *chip = container_of(work,
struct qpnp_chg_chip, soc_check_work);
get_prop_capacity(chip);
}
#define HYSTERISIS_DECIDEGC 20
static void
qpnp_chg_adc_notification(enum qpnp_tm_state state, void *ctx)
{
struct qpnp_chg_chip *chip = ctx;
bool bat_warm = 0, bat_cool = 0;
int temp;
if (state >= ADC_TM_STATE_NUM) {
pr_err("invalid notification %d\n", state);
return;
}
temp = get_prop_batt_temp(chip);
pr_debug("temp = %d state = %s\n", temp,
state == ADC_TM_WARM_STATE ? "warm" : "cool");
if (state == ADC_TM_WARM_STATE) {
if (temp >= chip->warm_bat_decidegc) {
/* Normal to warm */
bat_warm = true;
bat_cool = false;
chip->adc_param.low_temp =
chip->warm_bat_decidegc - HYSTERISIS_DECIDEGC;
chip->adc_param.state_request =
ADC_TM_COOL_THR_ENABLE;
} else if (temp >=
chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC){
/* Cool to normal */
bat_warm = false;
bat_cool = false;
chip->adc_param.low_temp = chip->cool_bat_decidegc;
chip->adc_param.high_temp = chip->warm_bat_decidegc;
chip->adc_param.state_request =
ADC_TM_HIGH_LOW_THR_ENABLE;
}
} else {
if (temp <= chip->cool_bat_decidegc) {
/* Normal to cool */
bat_warm = false;
bat_cool = true;
chip->adc_param.high_temp =
chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC;
chip->adc_param.state_request =
ADC_TM_WARM_THR_ENABLE;
} else if (temp <=
chip->warm_bat_decidegc - HYSTERISIS_DECIDEGC){
/* Warm to normal */
bat_warm = false;
bat_cool = false;
chip->adc_param.low_temp = chip->cool_bat_decidegc;
chip->adc_param.high_temp = chip->warm_bat_decidegc;
chip->adc_param.state_request =
ADC_TM_HIGH_LOW_THR_ENABLE;
}
}
if (chip->bat_is_cool ^ bat_cool || chip->bat_is_warm ^ bat_warm) {
chip->bat_is_cool = bat_cool;
chip->bat_is_warm = bat_warm;
/**
* set appropriate voltages and currents.
*
* Note that when the battery is hot or cold, the charger
* driver will not resume with SoC. Only vbatdet is used to
* determine resume of charging.
*/
if (bat_cool || bat_warm) {
chip->resuming_charging = false;
qpnp_chg_set_appropriate_vbatdet(chip);
/* To avoid ARB, only vbatdet is configured in
* warm/cold zones. Once vbat < vbatdet the
* appropriate vddmax/ibatmax adjustments will
* be made in the fast charge interrupt. */
bypass_vbatdet_comp(chip, 1);
qpnp_chg_charge_en(chip, !chip->charging_disabled);
qpnp_chg_charge_en(chip, chip->charging_disabled);
qpnp_chg_charge_en(chip, !chip->charging_disabled);
} else {
bypass_vbatdet_comp(chip, 0);
/* restore normal parameters */
qpnp_chg_set_appropriate_vbatdet(chip);
qpnp_chg_set_appropriate_vddmax(chip);
qpnp_chg_set_appropriate_battery_current(chip);
}
}
pr_debug("warm %d, cool %d, low = %d deciDegC, high = %d deciDegC\n",
chip->bat_is_warm, chip->bat_is_cool,
chip->adc_param.low_temp, chip->adc_param.high_temp);
if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
pr_err("request ADC error\n");
}
#define MIN_COOL_TEMP -300
#define MAX_WARM_TEMP 1000
static int
qpnp_chg_configure_jeita(struct qpnp_chg_chip *chip,
enum power_supply_property psp, int temp_degc)
{
int rc = 0;
if ((temp_degc < MIN_COOL_TEMP) || (temp_degc > MAX_WARM_TEMP)) {
pr_err("Bad temperature request %d\n", temp_degc);
return -EINVAL;
}
mutex_lock(&chip->jeita_configure_lock);
switch (psp) {
case POWER_SUPPLY_PROP_COOL_TEMP:
if (temp_degc >=
(chip->warm_bat_decidegc - HYSTERISIS_DECIDEGC)) {
pr_err("Can't set cool %d higher than warm %d - hysterisis %d\n",
temp_degc, chip->warm_bat_decidegc,
HYSTERISIS_DECIDEGC);
rc = -EINVAL;
goto mutex_unlock;
}
if (chip->bat_is_cool)
chip->adc_param.high_temp =
temp_degc + HYSTERISIS_DECIDEGC;
else if (!chip->bat_is_warm)
chip->adc_param.low_temp = temp_degc;
chip->cool_bat_decidegc = temp_degc;
break;
case POWER_SUPPLY_PROP_WARM_TEMP:
if (temp_degc <=
(chip->cool_bat_decidegc + HYSTERISIS_DECIDEGC)) {
pr_err("Can't set warm %d higher than cool %d + hysterisis %d\n",
temp_degc, chip->warm_bat_decidegc,
HYSTERISIS_DECIDEGC);
rc = -EINVAL;
goto mutex_unlock;
}
if (chip->bat_is_warm)
chip->adc_param.low_temp =
temp_degc - HYSTERISIS_DECIDEGC;
else if (!chip->bat_is_cool)
chip->adc_param.high_temp = temp_degc;
chip->warm_bat_decidegc = temp_degc;
break;
default:
rc = -EINVAL;
goto mutex_unlock;
}
schedule_work(&chip->adc_measure_work);
mutex_unlock:
mutex_unlock(&chip->jeita_configure_lock);
return rc;
}
#define POWER_STAGE_REDUCE_CHECK_PERIOD_NS (20LL * NSEC_PER_SEC)
#define POWER_STAGE_REDUCE_MAX_VBAT_UV 3900000
#define POWER_STAGE_REDUCE_MIN_VCHG_UV 4800000
#define POWER_STAGE_SEL_MASK 0x0F
#define POWER_STAGE_REDUCED 0x01
#define POWER_STAGE_DEFAULT 0x0F
static bool
qpnp_chg_is_power_stage_reduced(struct qpnp_chg_chip *chip)
{
int rc;
u8 reg;
rc = qpnp_chg_read(chip, ®,
chip->buck_base + CHGR_BUCK_PSTG_CTRL,
1);
if (rc) {
pr_err("Error %d reading power stage register\n", rc);
return false;
}
if ((reg & POWER_STAGE_SEL_MASK) == POWER_STAGE_DEFAULT)
return false;
return true;
}
static int
qpnp_chg_power_stage_set(struct qpnp_chg_chip *chip, bool reduce)
{
int rc;
u8 reg = 0xA5;
rc = qpnp_chg_write(chip, ®,
chip->buck_base + SEC_ACCESS,
1);
if (rc) {
pr_err("Error %d writing 0xA5 to buck's 0x%x reg\n",
rc, SEC_ACCESS);
return rc;
}
reg = POWER_STAGE_DEFAULT;
if (reduce)
reg = POWER_STAGE_REDUCED;
rc = qpnp_chg_write(chip, ®,
chip->buck_base + CHGR_BUCK_PSTG_CTRL,
1);
if (rc)
pr_err("Error %d writing 0x%x power stage register\n", rc, reg);
return rc;
}
static int
qpnp_chg_get_vusbin_uv(struct qpnp_chg_chip *chip)
{
int rc = 0;
struct qpnp_vadc_result results;
rc = qpnp_vadc_read(chip->vadc_dev, USBIN, &results);
if (rc) {
pr_err("Unable to read vbat rc=%d\n", rc);
return 0;
}
return results.physical;
}
static
int get_vusb_averaged(struct qpnp_chg_chip *chip, int sample_count)
{
int vusb_uv = 0;
int i;
/* avoid overflows */
if (sample_count > 256)
sample_count = 256;
for (i = 0; i < sample_count; i++)
vusb_uv += qpnp_chg_get_vusbin_uv(chip);
vusb_uv = vusb_uv / sample_count;
return vusb_uv;
}
static
int get_vbat_averaged(struct qpnp_chg_chip *chip, int sample_count)
{
int vbat_uv = 0;
int i;
/* avoid overflows */
if (sample_count > 256)
sample_count = 256;
for (i = 0; i < sample_count; i++)
vbat_uv += get_prop_battery_voltage_now(chip);
vbat_uv = vbat_uv / sample_count;
return vbat_uv;
}
static void
qpnp_chg_reduce_power_stage(struct qpnp_chg_chip *chip)
{
ktime_t kt;
bool power_stage_reduced_in_hw = qpnp_chg_is_power_stage_reduced(chip);
bool reduce_power_stage = false;
int vbat_uv = get_vbat_averaged(chip, 16);
int vusb_uv = get_vusb_averaged(chip, 16);
bool fast_chg =
(get_prop_charge_type(chip) == POWER_SUPPLY_CHARGE_TYPE_FAST);
static int count_restore_power_stage;
static int count_reduce_power_stage;
bool vchg_loop = get_prop_vchg_loop(chip);
bool ichg_loop = qpnp_chg_is_ichg_loop_active(chip);
bool usb_present = qpnp_chg_is_usb_chg_plugged_in(chip);
bool usb_ma_above_wall =
(qpnp_chg_usb_iusbmax_get(chip) > USB_WALL_THRESHOLD_MA);
bool target_usb_ma_above_wall =
(chip->prev_usb_max_ma > USB_WALL_THRESHOLD_MA);
if (fast_chg
&& usb_present
&& usb_ma_above_wall
&& vbat_uv < POWER_STAGE_REDUCE_MAX_VBAT_UV
&& vusb_uv > POWER_STAGE_REDUCE_MIN_VCHG_UV)
reduce_power_stage = true;
if ((usb_present && usb_ma_above_wall)
&& (vchg_loop || ichg_loop))
reduce_power_stage = true;
if (power_stage_reduced_in_hw && !reduce_power_stage) {
count_restore_power_stage++;
count_reduce_power_stage = 0;
} else if (!power_stage_reduced_in_hw && reduce_power_stage) {
count_reduce_power_stage++;
count_restore_power_stage = 0;
} else if (power_stage_reduced_in_hw == reduce_power_stage) {
count_restore_power_stage = 0;
count_reduce_power_stage = 0;
}
pr_debug("power_stage_hw = %d reduce_power_stage = %d usb_present = %d usb_ma_above_wall = %d vbat_uv(16) = %d vusb_uv(16) = %d fast_chg = %d , ichg = %d, vchg = %d, restore,reduce = %d, %d\n",
power_stage_reduced_in_hw, reduce_power_stage,
usb_present, usb_ma_above_wall,
vbat_uv, vusb_uv, fast_chg,
ichg_loop, vchg_loop,
count_restore_power_stage, count_reduce_power_stage);
if (!power_stage_reduced_in_hw && reduce_power_stage) {
if (count_reduce_power_stage >= 2) {
qpnp_chg_power_stage_set(chip, true);
power_stage_reduced_in_hw = true;
}
}
if (power_stage_reduced_in_hw && !reduce_power_stage) {
if (count_restore_power_stage >= 6
|| (!usb_present || !usb_ma_above_wall)) {
qpnp_chg_power_stage_set(chip, false);
power_stage_reduced_in_hw = false;
}
}
if (usb_present && target_usb_ma_above_wall) {
kt = ns_to_ktime(POWER_STAGE_REDUCE_CHECK_PERIOD_NS);
alarm_start_relative(&chip->reduce_power_stage_alarm, kt);
} else {
pr_debug("stopping power stage workaround\n");
chip->power_stage_workaround_running = false;
}
}
static void
qpnp_chg_batfet_lcl_work(struct work_struct *work)
{
struct qpnp_chg_chip *chip = container_of(work,
struct qpnp_chg_chip, batfet_lcl_work);
mutex_lock(&chip->batfet_vreg_lock);
if (qpnp_chg_is_usb_chg_plugged_in(chip) ||
qpnp_chg_is_dc_chg_plugged_in(chip)) {
qpnp_chg_regulator_batfet_set(chip, 1);
pr_debug("disabled ULPM\n");
} else if (!chip->batfet_ext_en && !qpnp_chg_is_usb_chg_plugged_in(chip)
&& !qpnp_chg_is_dc_chg_plugged_in(chip)) {
qpnp_chg_regulator_batfet_set(chip, 0);
pr_debug("enabled ULPM\n");
}
mutex_unlock(&chip->batfet_vreg_lock);
}
static void
qpnp_chg_reduce_power_stage_work(struct work_struct *work)
{
struct qpnp_chg_chip *chip = container_of(work,
struct qpnp_chg_chip, reduce_power_stage_work);
qpnp_chg_reduce_power_stage(chip);
}
static enum alarmtimer_restart
qpnp_chg_reduce_power_stage_callback(struct alarm *alarm, ktime_t now)
{
struct qpnp_chg_chip *chip = container_of(alarm, struct qpnp_chg_chip,
reduce_power_stage_alarm);
schedule_work(&chip->reduce_power_stage_work);
return ALARMTIMER_NORESTART;
}
static int
qpnp_dc_power_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
struct qpnp_chg_chip *chip = container_of(psy, struct qpnp_chg_chip,
dc_psy);
int rc = 0;
switch (psp) {
case POWER_SUPPLY_PROP_CURRENT_MAX:
if (!val->intval)
break;
rc = qpnp_chg_idcmax_set(chip, val->intval / 1000);
if (rc) {
pr_err("Error setting idcmax property %d\n", rc);
return rc;
}
chip->maxinput_dc_ma = (val->intval / 1000);
break;
default:
return -EINVAL;
}
pr_debug("psy changed dc_psy\n");
power_supply_changed(&chip->dc_psy);
return rc;
}
static int
qpnp_batt_power_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *val)
{
struct qpnp_chg_chip *chip = container_of(psy, struct qpnp_chg_chip,
batt_psy);
int rc = 0;
switch (psp) {
case POWER_SUPPLY_PROP_COOL_TEMP:
rc = qpnp_chg_configure_jeita(chip, psp, val->intval);
break;
case POWER_SUPPLY_PROP_WARM_TEMP:
rc = qpnp_chg_configure_jeita(chip, psp, val->intval);
break;
case POWER_SUPPLY_PROP_CAPACITY:
chip->fake_battery_soc = val->intval;
power_supply_changed(&chip->batt_psy);
break;
case POWER_SUPPLY_PROP_CHARGING_ENABLED:
chip->charging_disabled = !(val->intval);
if (chip->charging_disabled) {
/* disable charging */
qpnp_chg_charge_en(chip, !chip->charging_disabled);
qpnp_chg_force_run_on_batt(chip,
chip->charging_disabled);
} else {
/* enable charging */
qpnp_chg_force_run_on_batt(chip,
chip->charging_disabled);
qpnp_chg_charge_en(chip, !chip->charging_disabled);
}
break;
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
qpnp_batt_system_temp_level_set(chip, val->intval);
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
if (qpnp_chg_is_usb_chg_plugged_in(chip) &&
!(qpnp_is_dc_higher_prio(chip)
&& qpnp_chg_is_dc_chg_plugged_in(chip)))
qpnp_chg_iusbmax_set(chip, val->intval / 1000);
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM:
qpnp_chg_iusb_trim_set(chip, val->intval);
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
if (val->intval)
qpnp_chg_input_current_settled(chip);
else
chip->aicl_settled = false;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN:
qpnp_chg_vinmin_set(chip, val->intval / 1000);
break;
case POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS:
rc = qpnp_chg_bypass_vchg_loop_debouncer(chip, !!val->intval);
break;
default:
return -EINVAL;
}
pr_debug("psy changed batt_psy\n");
power_supply_changed(&chip->batt_psy);
return rc;
}
static int
qpnp_chg_setup_flags(struct qpnp_chg_chip *chip)
{
if (chip->revision > 0 && chip->type == SMBB)
chip->flags |= CHG_FLAGS_VCP_WA;
if (chip->type == SMBB)
chip->flags |= BOOST_FLASH_WA;
if (chip->type == SMBBP) {
struct device_node *revid_dev_node;
struct pmic_revid_data *revid_data;
chip->flags |= BOOST_FLASH_WA;
revid_dev_node = of_parse_phandle(chip->spmi->dev.of_node,
"qcom,pmic-revid", 0);
if (!revid_dev_node) {
pr_err("Missing qcom,pmic-revid property\n");
return -EINVAL;
}
revid_data = get_revid_data(revid_dev_node);
if (IS_ERR(revid_data)) {
pr_err("Couldnt get revid data rc = %ld\n",
PTR_ERR(revid_data));
return PTR_ERR(revid_data);
}
if (revid_data->rev4 < PM8226_V2P1_REV4
|| ((revid_data->rev4 == PM8226_V2P1_REV4)
&& (revid_data->rev3 <= PM8226_V2P1_REV3))) {
chip->flags |= POWER_STAGE_WA;
}
}
return 0;
}
static int
qpnp_chg_request_irqs(struct qpnp_chg_chip *chip)
{
int rc = 0;
struct resource *resource;
struct spmi_resource *spmi_resource;
u8 subtype;
struct spmi_device *spmi = chip->spmi;
spmi_for_each_container_dev(spmi_resource, chip->spmi) {
if (!spmi_resource) {
pr_err("qpnp_chg: spmi resource absent\n");
return rc;
}
resource = spmi_get_resource(spmi, spmi_resource,
IORESOURCE_MEM, 0);
if (!(resource && resource->start)) {
pr_err("node %s IO resource absent!\n",
spmi->dev.of_node->full_name);
return rc;
}
rc = qpnp_chg_read(chip, &subtype,
resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
if (rc) {
pr_err("Peripheral subtype read failed rc=%d\n", rc);
return rc;
}
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
case SMBBP_CHGR_SUBTYPE:
case SMBCL_CHGR_SUBTYPE:
chip->chg_fastchg.irq = spmi_get_irq_byname(spmi,
spmi_resource, "fast-chg-on");
if (chip->chg_fastchg.irq < 0) {
pr_err("Unable to get fast-chg-on irq\n");
return rc;
}
chip->chg_trklchg.irq = spmi_get_irq_byname(spmi,
spmi_resource, "trkl-chg-on");
if (chip->chg_trklchg.irq < 0) {
pr_err("Unable to get trkl-chg-on irq\n");
return rc;
}
chip->chg_failed.irq = spmi_get_irq_byname(spmi,
spmi_resource, "chg-failed");
if (chip->chg_failed.irq < 0) {
pr_err("Unable to get chg_failed irq\n");
return rc;
}
chip->chg_vbatdet_lo.irq = spmi_get_irq_byname(spmi,
spmi_resource, "vbat-det-lo");
if (chip->chg_vbatdet_lo.irq < 0) {
pr_err("Unable to get fast-chg-on irq\n");
return rc;
}
rc |= devm_request_irq(chip->dev, chip->chg_failed.irq,
qpnp_chg_chgr_chg_failed_irq_handler,
IRQF_TRIGGER_RISING, "chg-failed", chip);
if (rc < 0) {
pr_err("Can't request %d chg-failed: %d\n",
chip->chg_failed.irq, rc);
return rc;
}
rc |= devm_request_irq(chip->dev, chip->chg_fastchg.irq,
qpnp_chg_chgr_chg_fastchg_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
"fast-chg-on", chip);
if (rc < 0) {
pr_err("Can't request %d fast-chg-on: %d\n",
chip->chg_fastchg.irq, rc);
return rc;
}
rc |= devm_request_irq(chip->dev, chip->chg_trklchg.irq,
qpnp_chg_chgr_chg_trklchg_irq_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"trkl-chg-on", chip);
if (rc < 0) {
pr_err("Can't request %d trkl-chg-on: %d\n",
chip->chg_trklchg.irq, rc);
return rc;
}
rc |= devm_request_irq(chip->dev,
chip->chg_vbatdet_lo.irq,
qpnp_chg_vbatdet_lo_irq_handler,
IRQF_TRIGGER_RISING,
"vbat-det-lo", chip);
if (rc < 0) {
pr_err("Can't request %d vbat-det-lo: %d\n",
chip->chg_vbatdet_lo.irq, rc);
return rc;
}
qpnp_chg_irq_wake_enable(&chip->chg_trklchg);
qpnp_chg_irq_wake_enable(&chip->chg_failed);
qpnp_chg_irq_wake_enable(&chip->chg_vbatdet_lo);
qpnp_chg_disable_irq(&chip->chg_vbatdet_lo);
break;
case SMBB_BAT_IF_SUBTYPE:
case SMBBP_BAT_IF_SUBTYPE:
case SMBCL_BAT_IF_SUBTYPE:
chip->batt_pres.irq = spmi_get_irq_byname(spmi,
spmi_resource, "batt-pres");
if (chip->batt_pres.irq < 0) {
pr_err("Unable to get batt-pres irq\n");
return rc;
}
rc = devm_request_irq(chip->dev, chip->batt_pres.irq,
qpnp_chg_bat_if_batt_pres_irq_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
| IRQF_SHARED | IRQF_ONESHOT,
"batt-pres", chip);
if (rc < 0) {
pr_err("Can't request %d batt-pres irq: %d\n",
chip->batt_pres.irq, rc);
return rc;
}
qpnp_chg_irq_wake_enable(&chip->batt_pres);
chip->batt_temp_ok.irq = spmi_get_irq_byname(spmi,
spmi_resource, "bat-temp-ok");
if (chip->batt_temp_ok.irq < 0) {
pr_err("Unable to get bat-temp-ok irq\n");
return rc;
}
rc = devm_request_irq(chip->dev, chip->batt_temp_ok.irq,
qpnp_chg_bat_if_batt_temp_irq_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"bat-temp-ok", chip);
if (rc < 0) {
pr_err("Can't request %d bat-temp-ok irq: %d\n",
chip->batt_temp_ok.irq, rc);
return rc;
}
qpnp_chg_bat_if_batt_temp_irq_handler(0, chip);
qpnp_chg_irq_wake_enable(&chip->batt_temp_ok);
break;
case SMBB_BUCK_SUBTYPE:
case SMBBP_BUCK_SUBTYPE:
case SMBCL_BUCK_SUBTYPE:
break;
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
case SMBCL_USB_CHGPTH_SUBTYPE:
if (chip->ovp_monitor_enable) {
chip->coarse_det_usb.irq =
spmi_get_irq_byname(spmi,
spmi_resource, "coarse-det-usb");
if (chip->coarse_det_usb.irq < 0) {
pr_err("Can't get coarse-det irq\n");
return rc;
}
rc = devm_request_irq(chip->dev,
chip->coarse_det_usb.irq,
qpnp_chg_coarse_det_usb_irq_handler,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
"coarse-det-usb", chip);
if (rc < 0) {
pr_err("Can't req %d coarse-det: %d\n",
chip->coarse_det_usb.irq, rc);
return rc;
}
}
chip->usbin_valid.irq = spmi_get_irq_byname(spmi,
spmi_resource, "usbin-valid");
if (chip->usbin_valid.irq < 0) {
pr_err("Unable to get usbin irq\n");
return rc;
}
rc = devm_request_irq(chip->dev, chip->usbin_valid.irq,
qpnp_chg_usb_usbin_valid_irq_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"usbin-valid", chip);
if (rc < 0) {
pr_err("Can't request %d usbin-valid: %d\n",
chip->usbin_valid.irq, rc);
return rc;
}
chip->chg_gone.irq = spmi_get_irq_byname(spmi,
spmi_resource, "chg-gone");
if (chip->chg_gone.irq < 0) {
pr_err("Unable to get chg-gone irq\n");
return rc;
}
rc = devm_request_irq(chip->dev, chip->chg_gone.irq,
qpnp_chg_usb_chg_gone_irq_handler,
IRQF_TRIGGER_RISING,
"chg-gone", chip);
if (rc < 0) {
pr_err("Can't request %d chg-gone: %d\n",
chip->chg_gone.irq, rc);
return rc;
}
if ((subtype == SMBBP_USB_CHGPTH_SUBTYPE) ||
(subtype == SMBCL_USB_CHGPTH_SUBTYPE)) {
chip->usb_ocp.irq = spmi_get_irq_byname(spmi,
spmi_resource, "usb-ocp");
if (chip->usb_ocp.irq < 0) {
pr_err("Unable to get usbin irq\n");
return rc;
}
rc = devm_request_irq(chip->dev,
chip->usb_ocp.irq,
qpnp_chg_usb_usb_ocp_irq_handler,
IRQF_TRIGGER_RISING, "usb-ocp", chip);
if (rc < 0) {
pr_err("Can't request %d usb-ocp: %d\n",
chip->usb_ocp.irq, rc);
return rc;
}
qpnp_chg_irq_wake_enable(&chip->usb_ocp);
}
qpnp_chg_irq_wake_enable(&chip->usbin_valid);
qpnp_chg_irq_wake_enable(&chip->chg_gone);
break;
case SMBB_DC_CHGPTH_SUBTYPE:
chip->dcin_valid.irq = spmi_get_irq_byname(spmi,
spmi_resource, "dcin-valid");
if (chip->dcin_valid.irq < 0) {
pr_err("Unable to get dcin irq\n");
return -rc;
}
rc = devm_request_irq(chip->dev, chip->dcin_valid.irq,
qpnp_chg_dc_dcin_valid_irq_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"dcin-valid", chip);
if (rc < 0) {
pr_err("Can't request %d dcin-valid: %d\n",
chip->dcin_valid.irq, rc);
return rc;
}
qpnp_chg_irq_wake_enable(&chip->dcin_valid);
break;
}
}
return rc;
}
static int
qpnp_chg_load_battery_data(struct qpnp_chg_chip *chip)
{
struct bms_battery_data batt_data;
struct device_node *node;
struct qpnp_vadc_result result;
int rc;
node = of_find_node_by_name(chip->spmi->dev.of_node,
"qcom,battery-data");
if (node) {
memset(&batt_data, 0, sizeof(struct bms_battery_data));
rc = qpnp_vadc_read(chip->vadc_dev, LR_MUX2_BAT_ID, &result);
if (rc) {
pr_err("error reading batt id channel = %d, rc = %d\n",
LR_MUX2_BAT_ID, rc);
return rc;
}
batt_data.max_voltage_uv = -1;
batt_data.iterm_ua = -1;
rc = of_batterydata_read_data(node,
&batt_data, result.physical);
if (rc) {
pr_err("failed to read battery data: %d\n", rc);
return rc;
}
if (batt_data.max_voltage_uv >= 0) {
chip->max_voltage_mv = batt_data.max_voltage_uv / 1000;
chip->safe_voltage_mv = chip->max_voltage_mv
+ MAX_DELTA_VDD_MAX_MV;
}
if (batt_data.iterm_ua >= 0)
chip->term_current = batt_data.iterm_ua / 1000;
}
return 0;
}
#define WDOG_EN_BIT BIT(7)
static int
qpnp_chg_hwinit(struct qpnp_chg_chip *chip, u8 subtype,
struct spmi_resource *spmi_resource)
{
int rc = 0;
u8 reg = 0;
struct regulator_init_data *init_data;
struct regulator_desc *rdesc;
struct regulator_config cfg = { };
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
case SMBBP_CHGR_SUBTYPE:
case SMBCL_CHGR_SUBTYPE:
qpnp_chg_vbatweak_set(chip, chip->batt_weak_voltage_mv);
rc = qpnp_chg_vinmin_set(chip, chip->min_voltage_mv);
if (rc) {
pr_debug("failed setting min_voltage rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_vddsafe_set(chip, chip->safe_voltage_mv);
if (rc) {
pr_debug("failed setting safe_voltage rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_vbatdet_set(chip,
chip->max_voltage_mv - chip->resume_delta_mv);
if (rc) {
pr_debug("failed setting resume_voltage rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_ibatmax_set(chip, chip->max_bat_chg_current);
if (rc) {
pr_debug("failed setting ibatmax rc=%d\n", rc);
return rc;
}
if (chip->term_current) {
rc = qpnp_chg_ibatterm_set(chip, chip->term_current);
if (rc) {
pr_debug("failed setting ibatterm rc=%d\n", rc);
return rc;
}
}
rc = qpnp_chg_ibatsafe_set(chip, chip->safe_current);
if (rc) {
pr_debug("failed setting ibat_Safe rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_tchg_max_set(chip, chip->tchg_mins);
if (rc) {
pr_debug("failed setting tchg_mins rc=%d\n", rc);
return rc;
}
/* HACK: Disable wdog */
rc = qpnp_chg_masked_write(chip, chip->chgr_base + 0x62,
0xFF, 0xA0, 1);
/* HACK: use analog EOC */
rc = qpnp_chg_masked_write(chip, chip->chgr_base +
CHGR_IBAT_TERM_CHGR,
0xFF, 0x08, 1);
/* HACK: trkl stuck workaround */
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + SEC_ACCESS,
0xFF,
0xA5, 1);
rc = qpnp_chg_masked_write(chip, chip->chgr_base +
CHG_OVR0,
0xFF, 0x00, 1);
rc = qpnp_chg_masked_write(chip,
chip->chgr_base + SEC_ACCESS,
0xFF,
0xA5, 1);
rc = qpnp_chg_masked_write(chip, chip->chgr_base +
CHG_TRICKLE_CLAMP,
0xFF, 0x00, 1);
rc = qpnp_chg_read(chip, &chip->chg_temp_thresh_default,
chip->chgr_base + CHGR_CHG_TEMP_THRESH, 1);
if (rc) {
pr_debug("read CHG_TEMP_THRESH failed, rc = %d\n", rc);
chip->chg_temp_thresh_default =
CHG_TEMP_THRESH_DEFAULT;
}
init_data = of_get_regulator_init_data(chip->dev,
spmi_resource->of_node);
if (!init_data) {
pr_err("unable to get regulator init data for flash_wa\n");
return -ENOMEM;
}
if (init_data->constraints.name) {
rdesc = &(chip->flash_wa_vreg.rdesc);
rdesc->owner = THIS_MODULE;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->ops = &qpnp_chg_flash_wa_reg_ops;
rdesc->name = init_data->constraints.name;
cfg.dev = chip->dev;
cfg.init_data = init_data;
cfg.driver_data = chip;
cfg.of_node = spmi_resource->of_node;
init_data->constraints.valid_ops_mask
|= REGULATOR_CHANGE_STATUS;
chip->flash_wa_vreg.rdev =
regulator_register(rdesc, &cfg);
if (IS_ERR(chip->flash_wa_vreg.rdev)) {
rc = PTR_ERR(chip->flash_wa_vreg.rdev);
chip->flash_wa_vreg.rdev = NULL;
pr_err("Flash wa failed, rc=%d\n", rc);
return rc;
}
}
break;
case SMBB_BUCK_SUBTYPE:
case SMBBP_BUCK_SUBTYPE:
case SMBCL_BUCK_SUBTYPE:
rc = qpnp_chg_toggle_chg_done_logic(chip, 0);
if (rc)
return rc;
rc = qpnp_chg_masked_write(chip,
chip->buck_base + CHGR_BUCK_BCK_VBAT_REG_MODE,
BUCK_VBAT_REG_NODE_SEL_BIT,
BUCK_VBAT_REG_NODE_SEL_BIT, 1);
if (rc) {
pr_debug("failed to enable IR drop comp rc=%d\n", rc);
return rc;
}
rc = qpnp_chg_read(chip, &chip->trim_center,
chip->buck_base + BUCK_CTRL_TRIM1, 1);
if (rc) {
pr_debug("failed to read trim center rc=%d\n", rc);
return rc;
}
chip->trim_center >>= 4;
pr_debug("trim center = %02x\n", chip->trim_center);
break;
case SMBB_BAT_IF_SUBTYPE:
case SMBBP_BAT_IF_SUBTYPE:
case SMBCL_BAT_IF_SUBTYPE:
/* Select battery presence detection */
switch (chip->bpd_detection) {
case BPD_TYPE_BAT_THM:
reg = BAT_THM_EN;
break;
case BPD_TYPE_BAT_ID:
reg = BAT_ID_EN;
break;
case BPD_TYPE_BAT_THM_BAT_ID:
reg = BAT_THM_EN | BAT_ID_EN;
break;
default:
reg = BAT_THM_EN;
break;
}
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + BAT_IF_BPD_CTRL,
BAT_IF_BPD_CTRL_SEL,
reg, 1);
if (rc) {
pr_debug("failed to chose BPD rc=%d\n", rc);
return rc;
}
/* Force on VREF_BAT_THM */
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
VREF_BATT_THERM_FORCE_ON,
VREF_BATT_THERM_FORCE_ON, 1);
if (rc) {
pr_debug("failed to force on VREF_BAT_THM rc=%d\n", rc);
return rc;
}
init_data = of_get_regulator_init_data(chip->dev,
spmi_resource->of_node);
if (init_data->constraints.name) {
rdesc = &(chip->batfet_vreg.rdesc);
rdesc->owner = THIS_MODULE;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->ops = &qpnp_chg_batfet_vreg_ops;
rdesc->name = init_data->constraints.name;
init_data->constraints.valid_ops_mask
|= REGULATOR_CHANGE_STATUS;
cfg.dev = chip->dev;
cfg.init_data = init_data;
cfg.driver_data = chip;
cfg.of_node = spmi_resource->of_node;
chip->batfet_vreg.rdev = regulator_register(rdesc,
&cfg);
if (IS_ERR(chip->batfet_vreg.rdev)) {
rc = PTR_ERR(chip->batfet_vreg.rdev);
chip->batfet_vreg.rdev = NULL;
if (rc != -EPROBE_DEFER)
pr_err("batfet reg failed, rc=%d\n",
rc);
return rc;
}
}
break;
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
case SMBCL_USB_CHGPTH_SUBTYPE:
if (qpnp_chg_is_usb_chg_plugged_in(chip)) {
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + CHGR_USB_ENUM_T_STOP,
ENUM_T_STOP_BIT,
ENUM_T_STOP_BIT, 1);
if (rc) {
pr_err("failed to write enum stop rc=%d\n", rc);
return -ENXIO;
}
}
init_data = of_get_regulator_init_data(chip->dev,
spmi_resource->of_node);
if (!init_data) {
pr_err("unable to allocate memory\n");
return -ENOMEM;
}
if (init_data->constraints.name) {
if (of_get_property(chip->dev->of_node,
"otg-parent-supply", NULL))
init_data->supply_regulator = "otg-parent";
rdesc = &(chip->otg_vreg.rdesc);
rdesc->owner = THIS_MODULE;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->ops = &qpnp_chg_otg_reg_ops;
rdesc->name = init_data->constraints.name;
cfg.dev = chip->dev;
cfg.init_data = init_data;
cfg.driver_data = chip;
cfg.of_node = spmi_resource->of_node;
init_data->constraints.valid_ops_mask
|= REGULATOR_CHANGE_STATUS;
chip->otg_vreg.rdev = regulator_register(rdesc, &cfg);
if (IS_ERR(chip->otg_vreg.rdev)) {
rc = PTR_ERR(chip->otg_vreg.rdev);
chip->otg_vreg.rdev = NULL;
if (rc != -EPROBE_DEFER)
pr_err("OTG reg failed, rc=%d\n", rc);
return rc;
}
}
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + USB_OVP_CTL,
USB_VALID_DEB_20MS,
USB_VALID_DEB_20MS, 1);
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + CHGR_USB_ENUM_T_STOP,
ENUM_T_STOP_BIT,
ENUM_T_STOP_BIT, 1);
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + SEC_ACCESS,
0xFF,
0xA5, 1);
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + USB_CHG_GONE_REV_BST,
0xFF,
0x80, 1);
if ((subtype == SMBBP_USB_CHGPTH_SUBTYPE) ||
(subtype == SMBCL_USB_CHGPTH_SUBTYPE)) {
rc = qpnp_chg_masked_write(chip,
chip->usb_chgpth_base + USB_OCP_THR,
OCP_THR_MASK,
OCP_THR_900_MA, 1);
if (rc)
pr_err("Failed to configure OCP rc = %d\n", rc);
}
break;
case SMBB_DC_CHGPTH_SUBTYPE:
break;
case SMBB_BOOST_SUBTYPE:
case SMBBP_BOOST_SUBTYPE:
init_data = of_get_regulator_init_data(chip->dev,
spmi_resource->of_node);
if (!init_data) {
pr_err("unable to allocate memory\n");
return -ENOMEM;
}
if (init_data->constraints.name) {
if (of_get_property(chip->dev->of_node,
"boost-parent-supply", NULL))
init_data->supply_regulator = "boost-parent";
rdesc = &(chip->boost_vreg.rdesc);
rdesc->owner = THIS_MODULE;
rdesc->type = REGULATOR_VOLTAGE;
rdesc->ops = &qpnp_chg_boost_reg_ops;
rdesc->name = init_data->constraints.name;
cfg.dev = chip->dev;
cfg.init_data = init_data;
cfg.driver_data = chip;
cfg.of_node = spmi_resource->of_node;
init_data->constraints.valid_ops_mask
|= REGULATOR_CHANGE_STATUS
| REGULATOR_CHANGE_VOLTAGE;
chip->boost_vreg.rdev = regulator_register(rdesc, &cfg);
if (IS_ERR(chip->boost_vreg.rdev)) {
rc = PTR_ERR(chip->boost_vreg.rdev);
chip->boost_vreg.rdev = NULL;
if (rc != -EPROBE_DEFER)
pr_err("boost reg failed, rc=%d\n", rc);
return rc;
}
}
break;
case SMBB_MISC_SUBTYPE:
case SMBBP_MISC_SUBTYPE:
case SMBCL_MISC_SUBTYPE:
if (subtype == SMBB_MISC_SUBTYPE)
chip->type = SMBB;
else if (subtype == SMBBP_MISC_SUBTYPE)
chip->type = SMBBP;
else if (subtype == SMBCL_MISC_SUBTYPE)
chip->type = SMBCL;
pr_debug("Setting BOOT_DONE\n");
rc = qpnp_chg_masked_write(chip,
chip->misc_base + CHGR_MISC_BOOT_DONE,
CHGR_BOOT_DONE, CHGR_BOOT_DONE, 1);
rc = qpnp_chg_read(chip, ®,
chip->misc_base + MISC_REVISION2, 1);
if (rc) {
pr_err("failed to read revision register rc=%d\n", rc);
return rc;
}
chip->revision = reg;
break;
default:
pr_err("Invalid peripheral subtype\n");
}
return rc;
}
#define OF_PROP_READ(chip, prop, qpnp_dt_property, retval, optional) \
do { \
if (retval) \
break; \
\
retval = of_property_read_u32(chip->spmi->dev.of_node, \
"qcom," qpnp_dt_property, \
&chip->prop); \
\
if ((retval == -EINVAL) && optional) \
retval = 0; \
else if (retval) \
pr_err("Error reading " #qpnp_dt_property \
" property rc = %d\n", rc); \
} while (0)
static int
qpnp_charger_read_dt_props(struct qpnp_chg_chip *chip)
{
int rc = 0;
const char *bpd;
OF_PROP_READ(chip, max_voltage_mv, "vddmax-mv", rc, 0);
OF_PROP_READ(chip, min_voltage_mv, "vinmin-mv", rc, 0);
OF_PROP_READ(chip, safe_voltage_mv, "vddsafe-mv", rc, 0);
OF_PROP_READ(chip, resume_delta_mv, "vbatdet-delta-mv", rc, 0);
OF_PROP_READ(chip, safe_current, "ibatsafe-ma", rc, 0);
OF_PROP_READ(chip, max_bat_chg_current, "ibatmax-ma", rc, 0);
if (rc)
pr_err("failed to read required dt parameters %d\n", rc);
OF_PROP_READ(chip, term_current, "ibatterm-ma", rc, 1);
OF_PROP_READ(chip, maxinput_dc_ma, "maxinput-dc-ma", rc, 1);
OF_PROP_READ(chip, maxinput_usb_ma, "maxinput-usb-ma", rc, 1);
OF_PROP_READ(chip, warm_bat_decidegc, "warm-bat-decidegc", rc, 1);
OF_PROP_READ(chip, cool_bat_decidegc, "cool-bat-decidegc", rc, 1);
OF_PROP_READ(chip, tchg_mins, "tchg-mins", rc, 1);
OF_PROP_READ(chip, hot_batt_p, "batt-hot-percentage", rc, 1);
OF_PROP_READ(chip, cold_batt_p, "batt-cold-percentage", rc, 1);
OF_PROP_READ(chip, soc_resume_limit, "resume-soc", rc, 1);
OF_PROP_READ(chip, batt_weak_voltage_mv, "vbatweak-mv", rc, 1);
OF_PROP_READ(chip, vbatdet_max_err_mv, "vbatdet-maxerr-mv", rc, 1);
if (rc)
return rc;
rc = of_property_read_string(chip->spmi->dev.of_node,
"qcom,bpd-detection", &bpd);
if (rc) {
/* Select BAT_THM as default BPD scheme */
chip->bpd_detection = BPD_TYPE_BAT_THM;
rc = 0;
} else {
chip->bpd_detection = get_bpd(bpd);
if (chip->bpd_detection < 0) {
pr_err("failed to determine bpd schema %d\n", rc);
return rc;
}
}
if (!chip->vbatdet_max_err_mv)
chip->vbatdet_max_err_mv = VBATDET_MAX_ERR_MV;
/* Look up JEITA compliance parameters if cool and warm temp provided */
if (chip->cool_bat_decidegc || chip->warm_bat_decidegc) {
chip->adc_tm_dev = qpnp_get_adc_tm(chip->dev, "chg");
if (IS_ERR(chip->adc_tm_dev)) {
rc = PTR_ERR(chip->adc_tm_dev);
if (rc != -EPROBE_DEFER)
pr_err("adc-tm not ready, defer probe\n");
return rc;
}
OF_PROP_READ(chip, warm_bat_chg_ma, "ibatmax-warm-ma", rc, 1);
OF_PROP_READ(chip, cool_bat_chg_ma, "ibatmax-cool-ma", rc, 1);
OF_PROP_READ(chip, warm_bat_mv, "warm-bat-mv", rc, 1);
OF_PROP_READ(chip, cool_bat_mv, "cool-bat-mv", rc, 1);
if (rc)
return rc;
}
/* Get the use-external-rsense property */
chip->use_external_rsense = of_property_read_bool(
chip->spmi->dev.of_node,
"qcom,use-external-rsense");
/* Get the btc-disabled property */
chip->btc_disabled = of_property_read_bool(chip->spmi->dev.of_node,
"qcom,btc-disabled");
ext_ovp_present = of_property_read_bool(chip->spmi->dev.of_node,
"qcom,ext-ovp-present");
/* Check if external IOVP part is configured */
chip->ext_ovp_isns_gpio = of_get_named_gpio(chip->spmi->dev.of_node,
"qcom,ext-ovp-isns-enable-gpio", 0);
if (gpio_is_valid(chip->ext_ovp_isns_gpio)) {
ext_ovp_isns_present = true;
rc = of_property_read_u32(chip->spmi->dev.of_node,
"qcom,ext-ovp-isns-r-ohm", &ext_ovp_isns_r);
if (rc)
return rc;
}
/* Get the charging-disabled property */
chip->charging_disabled = of_property_read_bool(chip->spmi->dev.of_node,
"qcom,charging-disabled");
chip->ovp_monitor_enable = of_property_read_bool(
chip->spmi->dev.of_node,
"qcom,ovp-monitor-en");
/* Get the duty-cycle-100p property */
chip->duty_cycle_100p = of_property_read_bool(
chip->spmi->dev.of_node,
"qcom,duty-cycle-100p");
/* Get the fake-batt-values property */
chip->use_default_batt_values =
of_property_read_bool(chip->spmi->dev.of_node,
"qcom,use-default-batt-values");
/* Disable charging when faking battery values */
if (chip->use_default_batt_values)
chip->charging_disabled = true;
chip->ibat_calibration_enabled =
of_property_read_bool(chip->spmi->dev.of_node,
"qcom,ibat-calibration-enabled");
chip->power_stage_workaround_enable =
of_property_read_bool(chip->spmi->dev.of_node,
"qcom,power-stage-reduced");
chip->parallel_ovp_mode =
of_property_read_bool(chip->spmi->dev.of_node,
"qcom,parallel-ovp-mode");
of_get_property(chip->spmi->dev.of_node, "qcom,thermal-mitigation",
&(chip->thermal_levels));
if (chip->thermal_levels > sizeof(int)) {
chip->thermal_mitigation = devm_kzalloc(chip->dev,
chip->thermal_levels,
GFP_KERNEL);
if (chip->thermal_mitigation == NULL) {
pr_err("thermal mitigation kzalloc() failed.\n");
return -ENOMEM;
}
chip->thermal_levels /= sizeof(int);
rc = of_property_read_u32_array(chip->spmi->dev.of_node,
"qcom,thermal-mitigation",
chip->thermal_mitigation, chip->thermal_levels);
if (rc) {
pr_err("qcom,thermal-mitigation missing in dt\n");
return rc;
}
}
return rc;
}
static int
qpnp_charger_probe(struct spmi_device *spmi)
{
u8 subtype;
struct qpnp_chg_chip *chip;
struct resource *resource;
struct spmi_resource *spmi_resource;
int rc = 0;
chip = devm_kzalloc(&spmi->dev,
sizeof(struct qpnp_chg_chip), GFP_KERNEL);
if (chip == NULL) {
pr_err("kzalloc() failed.\n");
return -ENOMEM;
}
chip->prev_usb_max_ma = -EINVAL;
chip->fake_battery_soc = -EINVAL;
chip->dev = &(spmi->dev);
chip->spmi = spmi;
chip->usb_psy = power_supply_get_by_name("usb");
if (!chip->usb_psy) {
pr_err("usb supply not found deferring probe\n");
rc = -EPROBE_DEFER;
goto fail_chg_enable;
}
mutex_init(&chip->jeita_configure_lock);
mutex_init(&chip->batfet_vreg_lock);
spin_lock_init(&chip->usbin_health_monitor_lock);
alarm_init(&chip->reduce_power_stage_alarm, ALARM_REALTIME,
qpnp_chg_reduce_power_stage_callback);
INIT_WORK(&chip->reduce_power_stage_work,
qpnp_chg_reduce_power_stage_work);
INIT_WORK(&chip->ocp_clear_work,
qpnp_chg_ocp_clear_work);
INIT_WORK(&chip->insertion_ocv_work,
qpnp_chg_insertion_ocv_work);
INIT_WORK(&chip->batfet_lcl_work,
qpnp_chg_batfet_lcl_work);
INIT_WORK(&chip->btc_hot_irq_debounce_work,
qpnp_chg_btc_hot_irq_debounce_work);
/* Get all device tree properties */
rc = qpnp_charger_read_dt_props(chip);
if (rc)
return rc;
if (ext_ovp_isns_present)
chip->ext_ovp_ic_gpio_enabled = 0;
/*
* Check if bat_if is set in DT and make sure VADC is present
* Also try loading the battery data profile if bat_if exists
*/
spmi_for_each_container_dev(spmi_resource, spmi) {
if (!spmi_resource) {
pr_err("qpnp_chg: spmi resource absent\n");
rc = -ENXIO;
goto fail_chg_enable;
}
resource = spmi_get_resource(spmi, spmi_resource,
IORESOURCE_MEM, 0);
if (!(resource && resource->start)) {
pr_err("node %s IO resource absent!\n",
spmi->dev.of_node->full_name);
rc = -ENXIO;
goto fail_chg_enable;
}
rc = qpnp_chg_read(chip, &subtype,
resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
if (rc) {
pr_err("Peripheral subtype read failed rc=%d\n", rc);
goto fail_chg_enable;
}
if (subtype == SMBB_BAT_IF_SUBTYPE ||
subtype == SMBBP_BAT_IF_SUBTYPE ||
subtype == SMBCL_BAT_IF_SUBTYPE) {
chip->vadc_dev = qpnp_get_vadc(chip->dev, "chg");
if (IS_ERR(chip->vadc_dev)) {
rc = PTR_ERR(chip->vadc_dev);
if (rc != -EPROBE_DEFER)
pr_err("vadc property missing\n");
goto fail_chg_enable;
}
if (subtype == SMBB_BAT_IF_SUBTYPE ||
subtype == SMBBP_BAT_IF_SUBTYPE) {
chip->iadc_dev = qpnp_get_iadc(chip->dev,
"chg");
if (IS_ERR(chip->iadc_dev)) {
rc = PTR_ERR(chip->iadc_dev);
if (rc != -EPROBE_DEFER)
pr_err("iadc property missing\n");
goto fail_chg_enable;
}
}
rc = qpnp_chg_load_battery_data(chip);
if (rc)
goto fail_chg_enable;
}
}
spmi_for_each_container_dev(spmi_resource, spmi) {
if (!spmi_resource) {
pr_err("qpnp_chg: spmi resource absent\n");
rc = -ENXIO;
goto fail_chg_enable;
}
resource = spmi_get_resource(spmi, spmi_resource,
IORESOURCE_MEM, 0);
if (!(resource && resource->start)) {
pr_err("node %s IO resource absent!\n",
spmi->dev.of_node->full_name);
rc = -ENXIO;
goto fail_chg_enable;
}
rc = qpnp_chg_read(chip, &subtype,
resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
if (rc) {
pr_err("Peripheral subtype read failed rc=%d\n", rc);
goto fail_chg_enable;
}
switch (subtype) {
case SMBB_CHGR_SUBTYPE:
case SMBBP_CHGR_SUBTYPE:
case SMBCL_CHGR_SUBTYPE:
chip->chgr_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
pr_err("Failed to init subtype 0x%x rc=%d\n",
subtype, rc);
goto fail_chg_enable;
}
break;
case SMBB_BUCK_SUBTYPE:
case SMBBP_BUCK_SUBTYPE:
case SMBCL_BUCK_SUBTYPE:
chip->buck_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
pr_err("Failed to init subtype 0x%x rc=%d\n",
subtype, rc);
goto fail_chg_enable;
}
rc = qpnp_chg_masked_write(chip,
chip->buck_base + SEC_ACCESS,
0xFF,
0xA5, 1);
rc = qpnp_chg_masked_write(chip,
chip->buck_base + BUCK_VCHG_OV,
0xff,
0x00, 1);
if (chip->duty_cycle_100p) {
rc = qpnp_buck_set_100_duty_cycle_enable(chip,
1);
if (rc) {
pr_err("failed to set duty cycle %d\n",
rc);
goto fail_chg_enable;
}
}
break;
case SMBB_BAT_IF_SUBTYPE:
case SMBBP_BAT_IF_SUBTYPE:
case SMBCL_BAT_IF_SUBTYPE:
chip->bat_if_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
pr_err("Failed to init subtype 0x%x rc=%d\n",
subtype, rc);
goto fail_chg_enable;
}
break;
case SMBB_USB_CHGPTH_SUBTYPE:
case SMBBP_USB_CHGPTH_SUBTYPE:
case SMBCL_USB_CHGPTH_SUBTYPE:
chip->usb_chgpth_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
if (rc != -EPROBE_DEFER)
pr_err("Failed to init subtype 0x%x rc=%d\n",
subtype, rc);
goto fail_chg_enable;
}
break;
case SMBB_DC_CHGPTH_SUBTYPE:
chip->dc_chgpth_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
pr_err("Failed to init subtype 0x%x rc=%d\n",
subtype, rc);
goto fail_chg_enable;
}
break;
case SMBB_BOOST_SUBTYPE:
case SMBBP_BOOST_SUBTYPE:
chip->boost_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
if (rc != -EPROBE_DEFER)
pr_err("Failed to init subtype 0x%x rc=%d\n",
subtype, rc);
goto fail_chg_enable;
}
break;
case SMBB_MISC_SUBTYPE:
case SMBBP_MISC_SUBTYPE:
case SMBCL_MISC_SUBTYPE:
chip->misc_base = resource->start;
rc = qpnp_chg_hwinit(chip, subtype, spmi_resource);
if (rc) {
pr_err("Failed to init subtype=0x%x rc=%d\n",
subtype, rc);
goto fail_chg_enable;
}
break;
default:
pr_err("Invalid peripheral subtype=0x%x\n", subtype);
rc = -EINVAL;
goto fail_chg_enable;
}
}
dev_set_drvdata(&spmi->dev, chip);
device_init_wakeup(&spmi->dev, 1);
chip->insertion_ocv_uv = -EINVAL;
chip->batt_present = qpnp_chg_is_batt_present(chip);
if (chip->bat_if_base) {
chip->batt_psy.name = "battery";
chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
chip->batt_psy.properties = msm_batt_power_props;
chip->batt_psy.num_properties =
ARRAY_SIZE(msm_batt_power_props);
chip->batt_psy.get_property = qpnp_batt_power_get_property;
chip->batt_psy.set_property = qpnp_batt_power_set_property;
chip->batt_psy.property_is_writeable =
qpnp_batt_property_is_writeable;
chip->batt_psy.external_power_changed =
qpnp_batt_external_power_changed;
chip->batt_psy.supplied_to = pm_batt_supplied_to;
chip->batt_psy.num_supplicants =
ARRAY_SIZE(pm_batt_supplied_to);
rc = power_supply_register(chip->dev, &chip->batt_psy);
if (rc < 0) {
pr_err("batt failed to register rc = %d\n", rc);
goto fail_chg_enable;
}
INIT_WORK(&chip->adc_measure_work,
qpnp_bat_if_adc_measure_work);
INIT_WORK(&chip->adc_disable_work,
qpnp_bat_if_adc_disable_work);
}
INIT_DELAYED_WORK(&chip->eoc_work, qpnp_eoc_work);
INIT_DELAYED_WORK(&chip->arb_stop_work, qpnp_arb_stop_work);
INIT_DELAYED_WORK(&chip->usbin_health_check,
qpnp_usbin_health_check_work);
INIT_WORK(&chip->soc_check_work, qpnp_chg_soc_check_work);
INIT_DELAYED_WORK(&chip->aicl_check_work, qpnp_aicl_check_work);
if (chip->dc_chgpth_base) {
chip->dc_psy.name = "qpnp-dc";
chip->dc_psy.type = POWER_SUPPLY_TYPE_MAINS;
chip->dc_psy.supplied_to = pm_power_supplied_to;
chip->dc_psy.num_supplicants = ARRAY_SIZE(pm_power_supplied_to);
chip->dc_psy.properties = pm_power_props_mains;
chip->dc_psy.num_properties = ARRAY_SIZE(pm_power_props_mains);
chip->dc_psy.get_property = qpnp_power_get_property_mains;
chip->dc_psy.set_property = qpnp_dc_power_set_property;
chip->dc_psy.property_is_writeable =
qpnp_dc_property_is_writeable;
rc = power_supply_register(chip->dev, &chip->dc_psy);
if (rc < 0) {
pr_err("power_supply_register dc failed rc=%d\n", rc);
goto unregister_batt;
}
}
/* Turn on appropriate workaround flags */
rc = qpnp_chg_setup_flags(chip);
if (rc < 0) {
pr_err("failed to setup flags rc=%d\n", rc);
goto unregister_dc_psy;
}
if (chip->maxinput_dc_ma && chip->dc_chgpth_base) {
rc = qpnp_chg_idcmax_set(chip, chip->maxinput_dc_ma);
if (rc) {
pr_err("Error setting idcmax property %d\n", rc);
goto unregister_dc_psy;
}
}
if ((chip->cool_bat_decidegc || chip->warm_bat_decidegc)
&& chip->bat_if_base) {
chip->adc_param.low_temp = chip->cool_bat_decidegc;
chip->adc_param.high_temp = chip->warm_bat_decidegc;
chip->adc_param.timer_interval = ADC_MEAS2_INTERVAL_1S;
chip->adc_param.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
chip->adc_param.btm_ctx = chip;
chip->adc_param.threshold_notification =
qpnp_chg_adc_notification;
chip->adc_param.channel = LR_MUX1_BATT_THERM;
if (get_prop_batt_present(chip)) {
rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
&chip->adc_param);
if (rc) {
pr_err("request ADC error %d\n", rc);
goto unregister_dc_psy;
}
}
}
rc = qpnp_chg_bat_if_configure_btc(chip);
if (rc) {
pr_err("failed to configure btc %d\n", rc);
goto unregister_dc_psy;
}
chip->usb_trim_default = qpnp_chg_iusb_trim_get(chip);
qpnp_chg_charge_en(chip, !chip->charging_disabled);
qpnp_chg_force_run_on_batt(chip, chip->charging_disabled);
qpnp_chg_set_appropriate_vddmax(chip);
if (chip->parallel_ovp_mode) {
rc = override_dcin_ilimit(chip, 1);
if (rc) {
pr_err("Override DCIN LLIMIT %d\n", rc);
goto unregister_dc_psy;
}
}
rc = qpnp_chg_request_irqs(chip);
if (rc) {
pr_err("failed to request interrupts %d\n", rc);
goto unregister_dc_psy;
}
qpnp_chg_usb_chg_gone_irq_handler(chip->chg_gone.irq, chip);
qpnp_chg_usb_usbin_valid_irq_handler(chip->usbin_valid.irq, chip);
qpnp_chg_dc_dcin_valid_irq_handler(chip->dcin_valid.irq, chip);
power_supply_set_present(chip->usb_psy,
qpnp_chg_is_usb_chg_plugged_in(chip));
/* Set USB psy online to avoid userspace from shutting down if battery
* capacity is at zero and no chargers online. */
if (qpnp_chg_is_usb_chg_plugged_in(chip))
power_supply_set_online(chip->usb_psy, 1);
schedule_delayed_work(&chip->aicl_check_work,
msecs_to_jiffies(EOC_CHECK_PERIOD_MS));
pr_info("success chg_dis = %d, bpd = %d, usb = %d, dc = %d b_health = %d batt_present = %d\n",
chip->charging_disabled,
chip->bpd_detection,
qpnp_chg_is_usb_chg_plugged_in(chip),
qpnp_chg_is_dc_chg_plugged_in(chip),
get_prop_batt_present(chip),
get_prop_batt_health(chip));
return 0;
unregister_dc_psy:
if (chip->dc_chgpth_base)
power_supply_unregister(&chip->dc_psy);
unregister_batt:
if (chip->bat_if_base)
power_supply_unregister(&chip->batt_psy);
fail_chg_enable:
regulator_unregister(chip->otg_vreg.rdev);
regulator_unregister(chip->boost_vreg.rdev);
return rc;
}
static int
qpnp_charger_remove(struct spmi_device *spmi)
{
struct qpnp_chg_chip *chip = dev_get_drvdata(&spmi->dev);
if ((chip->cool_bat_decidegc || chip->warm_bat_decidegc)
&& chip->batt_present) {
qpnp_adc_tm_disable_chan_meas(chip->adc_tm_dev,
&chip->adc_param);
}
cancel_delayed_work_sync(&chip->aicl_check_work);
power_supply_unregister(&chip->dc_psy);
cancel_work_sync(&chip->soc_check_work);
cancel_delayed_work_sync(&chip->usbin_health_check);
cancel_delayed_work_sync(&chip->arb_stop_work);
cancel_delayed_work_sync(&chip->eoc_work);
cancel_work_sync(&chip->adc_disable_work);
cancel_work_sync(&chip->adc_measure_work);
power_supply_unregister(&chip->batt_psy);
cancel_work_sync(&chip->batfet_lcl_work);
cancel_work_sync(&chip->insertion_ocv_work);
cancel_work_sync(&chip->reduce_power_stage_work);
alarm_cancel(&chip->reduce_power_stage_alarm);
mutex_destroy(&chip->batfet_vreg_lock);
mutex_destroy(&chip->jeita_configure_lock);
regulator_unregister(chip->otg_vreg.rdev);
regulator_unregister(chip->boost_vreg.rdev);
return 0;
}
static int qpnp_chg_resume(struct device *dev)
{
struct qpnp_chg_chip *chip = dev_get_drvdata(dev);
int rc = 0;
if (chip->bat_if_base) {
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
VREF_BATT_THERM_FORCE_ON,
VREF_BATT_THERM_FORCE_ON, 1);
if (rc)
pr_debug("failed to force on VREF_BAT_THM rc=%d\n", rc);
}
return rc;
}
static int qpnp_chg_suspend(struct device *dev)
{
struct qpnp_chg_chip *chip = dev_get_drvdata(dev);
int rc = 0;
if (chip->bat_if_base) {
rc = qpnp_chg_masked_write(chip,
chip->bat_if_base + BAT_IF_VREF_BAT_THM_CTRL,
VREF_BATT_THERM_FORCE_ON,
VREF_BAT_THM_ENABLED_FSM, 1);
if (rc)
pr_debug("failed to set FSM VREF_BAT_THM rc=%d\n", rc);
}
return rc;
}
static const struct dev_pm_ops qpnp_chg_pm_ops = {
.resume = qpnp_chg_resume,
.suspend = qpnp_chg_suspend,
};
static struct spmi_driver qpnp_charger_driver = {
.probe = qpnp_charger_probe,
.remove = qpnp_charger_remove,
.driver = {
.name = QPNP_CHARGER_DEV_NAME,
.owner = THIS_MODULE,
.of_match_table = qpnp_charger_match_table,
.pm = &qpnp_chg_pm_ops,
},
};
/**
* qpnp_chg_init() - register spmi driver for qpnp-chg
*/
int __init
qpnp_chg_init(void)
{
return spmi_driver_register(&qpnp_charger_driver);
}
module_init(qpnp_chg_init);
static void __exit
qpnp_chg_exit(void)
{
spmi_driver_unregister(&qpnp_charger_driver);
}
module_exit(qpnp_chg_exit);
MODULE_DESCRIPTION("QPNP charger driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" QPNP_CHARGER_DEV_NAME);
| gpl-2.0 |
GiulianoFranchetto/linux-at91 | drivers/clk/tegra/clk-divider.c | 734 | 4433 | /*
* Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include "clk.h"
#define pll_out_override(p) (BIT((p->shift - 6)))
#define div_mask(d) ((1 << (d->width)) - 1)
#define get_mul(d) (1 << d->frac_width)
#define get_max_div(d) div_mask(d)
#define PERIPH_CLK_UART_DIV_ENB BIT(24)
static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
unsigned long parent_rate)
{
s64 divider_ux1 = parent_rate;
u8 flags = divider->flags;
int mul;
if (!rate)
return 0;
mul = get_mul(divider);
if (!(flags & TEGRA_DIVIDER_INT))
divider_ux1 *= mul;
if (flags & TEGRA_DIVIDER_ROUND_UP)
divider_ux1 += rate - 1;
do_div(divider_ux1, rate);
if (flags & TEGRA_DIVIDER_INT)
divider_ux1 *= mul;
divider_ux1 -= mul;
if (divider_ux1 < 0)
return 0;
if (divider_ux1 > get_max_div(divider))
return get_max_div(divider);
return divider_ux1;
}
static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
u32 reg;
int div, mul;
u64 rate = parent_rate;
reg = readl_relaxed(divider->reg) >> divider->shift;
div = reg & div_mask(divider);
mul = get_mul(divider);
div += mul;
rate *= mul;
rate += div - 1;
do_div(rate, div);
return rate;
}
static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
int div, mul;
unsigned long output_rate = *prate;
if (!rate)
return output_rate;
div = get_div(divider, rate, output_rate);
if (div < 0)
return *prate;
mul = get_mul(divider);
return DIV_ROUND_UP(output_rate * mul, div + mul);
}
static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
int div;
unsigned long flags = 0;
u32 val;
div = get_div(divider, rate, parent_rate);
if (div < 0)
return div;
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
val = readl_relaxed(divider->reg);
val &= ~(div_mask(divider) << divider->shift);
val |= div << divider->shift;
if (divider->flags & TEGRA_DIVIDER_UART) {
if (div)
val |= PERIPH_CLK_UART_DIV_ENB;
else
val &= ~PERIPH_CLK_UART_DIV_ENB;
}
if (divider->flags & TEGRA_DIVIDER_FIXED)
val |= pll_out_override(divider);
writel_relaxed(val, divider->reg);
if (divider->lock)
spin_unlock_irqrestore(divider->lock, flags);
return 0;
}
const struct clk_ops tegra_clk_frac_div_ops = {
.recalc_rate = clk_frac_div_recalc_rate,
.set_rate = clk_frac_div_set_rate,
.round_rate = clk_frac_div_round_rate,
};
struct clk *tegra_clk_register_divider(const char *name,
const char *parent_name, void __iomem *reg,
unsigned long flags, u8 clk_divider_flags, u8 shift, u8 width,
u8 frac_width, spinlock_t *lock)
{
struct tegra_clk_frac_div *divider;
struct clk *clk;
struct clk_init_data init;
divider = kzalloc(sizeof(*divider), GFP_KERNEL);
if (!divider) {
pr_err("%s: could not allocate fractional divider clk\n",
__func__);
return ERR_PTR(-ENOMEM);
}
init.name = name;
init.ops = &tegra_clk_frac_div_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
divider->reg = reg;
divider->shift = shift;
divider->width = width;
divider->frac_width = frac_width;
divider->lock = lock;
divider->flags = clk_divider_flags;
/* Data in .init is copied by clk_register(), so stack variable OK */
divider->hw.init = &init;
clk = clk_register(NULL, ÷r->hw);
if (IS_ERR(clk))
kfree(divider);
return clk;
}
| gpl-2.0 |
MinimumLaw/uccu-kernel | arch/x86/kernel/reboot_fixups_32.c | 990 | 2283 | /*
* This is a good place to put board specific reboot fixups.
*
* List of supported fixups:
* geode-gx1/cs5530a - Jaya Kumar <jayalk@intworks.biz>
* geode-gx/lx/cs5536 - Andres Salomon <dilinger@debian.org>
*
*/
#include <asm/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <asm/reboot_fixups.h>
#include <asm/msr.h>
#include <linux/cs5535.h>
static void cs5530a_warm_reset(struct pci_dev *dev)
{
/* writing 1 to the reset control register, 0x44 causes the
cs5530a to perform a system warm reset */
pci_write_config_byte(dev, 0x44, 0x1);
udelay(50); /* shouldn't get here but be safe and spin-a-while */
return;
}
static void cs5536_warm_reset(struct pci_dev *dev)
{
/* writing 1 to the LSB of this MSR causes a hard reset */
wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL);
udelay(50); /* shouldn't get here but be safe and spin a while */
}
static void rdc321x_reset(struct pci_dev *dev)
{
unsigned i;
/* Voluntary reset the watchdog timer */
outl(0x80003840, 0xCF8);
/* Generate a CPU reset on next tick */
i = inl(0xCFC);
/* Use the minimum timer resolution */
i |= 0x1600;
outl(i, 0xCFC);
outb(1, 0x92);
}
struct device_fixup {
unsigned int vendor;
unsigned int device;
void (*reboot_fixup)(struct pci_dev *);
};
static const struct device_fixup fixups_table[] = {
{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, cs5536_warm_reset },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE, cs5530a_warm_reset },
{ PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030, rdc321x_reset },
};
/*
* we see if any fixup is available for our current hardware. if there
* is a fixup, we call it and we expect to never return from it. if we
* do return, we keep looking and then eventually fall back to the
* standard mach_reboot on return.
*/
void mach_reboot_fixups(void)
{
const struct device_fixup *cur;
struct pci_dev *dev;
int i;
/* we can be called from sysrq-B code. In such a case it is
* prohibited to dig PCI */
if (in_interrupt())
return;
for (i=0; i < ARRAY_SIZE(fixups_table); i++) {
cur = &(fixups_table[i]);
dev = pci_get_device(cur->vendor, cur->device, NULL);
if (!dev)
continue;
cur->reboot_fixup(dev);
pci_dev_put(dev);
}
}
| gpl-2.0 |
wengpingbo/linux | drivers/hwmon/smsc47b397.c | 1502 | 8959 | /*
* smsc47b397.c - Part of lm_sensors, Linux kernel modules
* for hardware monitoring
*
* Supports the SMSC LPC47B397-NC Super-I/O chip.
*
* Author/Maintainer: Mark M. Hoffman <mhoffman@lightlink.com>
* Copyright (C) 2004 Utilitek Systems, Inc.
*
* derived in part from smsc47m1.c:
* Copyright (C) 2002 Mark D. Studebaker <mdsxyz123@yahoo.com>
* Copyright (C) 2004 Jean Delvare <jdelvare@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/platform_device.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/acpi.h>
#include <linux/io.h>
static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
static struct platform_device *pdev;
#define DRVNAME "smsc47b397"
/* Super-I/0 registers and commands */
#define REG 0x2e /* The register to read/write */
#define VAL 0x2f /* The value to read/write */
static inline void superio_outb(int reg, int val)
{
outb(reg, REG);
outb(val, VAL);
}
static inline int superio_inb(int reg)
{
outb(reg, REG);
return inb(VAL);
}
/* select superio logical device */
static inline void superio_select(int ld)
{
superio_outb(0x07, ld);
}
static inline void superio_enter(void)
{
outb(0x55, REG);
}
static inline void superio_exit(void)
{
outb(0xAA, REG);
}
#define SUPERIO_REG_DEVID 0x20
#define SUPERIO_REG_DEVREV 0x21
#define SUPERIO_REG_BASE_MSB 0x60
#define SUPERIO_REG_BASE_LSB 0x61
#define SUPERIO_REG_LD8 0x08
#define SMSC_EXTENT 0x02
/* 0 <= nr <= 3 */
static u8 smsc47b397_reg_temp[] = {0x25, 0x26, 0x27, 0x80};
#define SMSC47B397_REG_TEMP(nr) (smsc47b397_reg_temp[(nr)])
/* 0 <= nr <= 3 */
#define SMSC47B397_REG_FAN_LSB(nr) (0x28 + 2 * (nr))
#define SMSC47B397_REG_FAN_MSB(nr) (0x29 + 2 * (nr))
struct smsc47b397_data {
unsigned short addr;
struct mutex lock;
struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
int valid;
/* register values */
u16 fan[4];
u8 temp[4];
};
static int smsc47b397_read_value(struct smsc47b397_data *data, u8 reg)
{
int res;
mutex_lock(&data->lock);
outb(reg, data->addr);
res = inb_p(data->addr + 1);
mutex_unlock(&data->lock);
return res;
}
static struct smsc47b397_data *smsc47b397_update_device(struct device *dev)
{
struct smsc47b397_data *data = dev_get_drvdata(dev);
int i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
dev_dbg(dev, "starting device update...\n");
/* 4 temperature inputs, 4 fan inputs */
for (i = 0; i < 4; i++) {
data->temp[i] = smsc47b397_read_value(data,
SMSC47B397_REG_TEMP(i));
/* must read LSB first */
data->fan[i] = smsc47b397_read_value(data,
SMSC47B397_REG_FAN_LSB(i));
data->fan[i] |= smsc47b397_read_value(data,
SMSC47B397_REG_FAN_MSB(i)) << 8;
}
data->last_updated = jiffies;
data->valid = 1;
dev_dbg(dev, "... device update complete\n");
}
mutex_unlock(&data->update_lock);
return data;
}
/*
* TEMP: 0.001C/bit (-128C to +127C)
* REG: 1C/bit, two's complement
*/
static int temp_from_reg(u8 reg)
{
return (s8)reg * 1000;
}
static ssize_t show_temp(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47b397_data *data = smsc47b397_update_device(dev);
return sprintf(buf, "%d\n", temp_from_reg(data->temp[attr->index]));
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
/*
* FAN: 1 RPM/bit
* REG: count of 90kHz pulses / revolution
*/
static int fan_from_reg(u16 reg)
{
if (reg == 0 || reg == 0xffff)
return 0;
return 90000 * 60 / reg;
}
static ssize_t show_fan(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct smsc47b397_data *data = smsc47b397_update_device(dev);
return sprintf(buf, "%d\n", fan_from_reg(data->fan[attr->index]));
}
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
static struct attribute *smsc47b397_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(smsc47b397);
static int smsc47b397_probe(struct platform_device *pdev);
static struct platform_driver smsc47b397_driver = {
.driver = {
.name = DRVNAME,
},
.probe = smsc47b397_probe,
};
static int smsc47b397_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct smsc47b397_data *data;
struct device *hwmon_dev;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(dev, res->start, SMSC_EXTENT,
smsc47b397_driver.driver.name)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start,
(unsigned long)res->start + SMSC_EXTENT - 1);
return -EBUSY;
}
data = devm_kzalloc(dev, sizeof(struct smsc47b397_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->addr = res->start;
mutex_init(&data->lock);
mutex_init(&data->update_lock);
hwmon_dev = devm_hwmon_device_register_with_groups(dev, "smsc47b397",
data,
smsc47b397_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
static int __init smsc47b397_device_add(unsigned short address)
{
struct resource res = {
.start = address,
.end = address + SMSC_EXTENT - 1,
.name = DRVNAME,
.flags = IORESOURCE_IO,
};
int err;
err = acpi_check_resource_conflict(&res);
if (err)
goto exit;
pdev = platform_device_alloc(DRVNAME, address);
if (!pdev) {
err = -ENOMEM;
pr_err("Device allocation failed\n");
goto exit;
}
err = platform_device_add_resources(pdev, &res, 1);
if (err) {
pr_err("Device resource addition failed (%d)\n", err);
goto exit_device_put;
}
err = platform_device_add(pdev);
if (err) {
pr_err("Device addition failed (%d)\n", err);
goto exit_device_put;
}
return 0;
exit_device_put:
platform_device_put(pdev);
exit:
return err;
}
static int __init smsc47b397_find(void)
{
u8 id, rev;
char *name;
unsigned short addr;
superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
switch (id) {
case 0x81:
name = "SCH5307-NS";
break;
case 0x6f:
name = "LPC47B397-NC";
break;
case 0x85:
case 0x8c:
name = "SCH5317";
break;
default:
superio_exit();
return -ENODEV;
}
rev = superio_inb(SUPERIO_REG_DEVREV);
superio_select(SUPERIO_REG_LD8);
addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
| superio_inb(SUPERIO_REG_BASE_LSB);
pr_info("found SMSC %s (base address 0x%04x, revision %u)\n",
name, addr, rev);
superio_exit();
return addr;
}
static int __init smsc47b397_init(void)
{
unsigned short address;
int ret;
ret = smsc47b397_find();
if (ret < 0)
return ret;
address = ret;
ret = platform_driver_register(&smsc47b397_driver);
if (ret)
goto exit;
/* Sets global pdev as a side effect */
ret = smsc47b397_device_add(address);
if (ret)
goto exit_driver;
return 0;
exit_driver:
platform_driver_unregister(&smsc47b397_driver);
exit:
return ret;
}
static void __exit smsc47b397_exit(void)
{
platform_device_unregister(pdev);
platform_driver_unregister(&smsc47b397_driver);
}
MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
MODULE_DESCRIPTION("SMSC LPC47B397 driver");
MODULE_LICENSE("GPL");
module_init(smsc47b397_init);
module_exit(smsc47b397_exit);
| gpl-2.0 |
cbolumar/android_kernel_samsung_a3ulte | drivers/usb/storage/uas.c | 2526 | 29750 | /*
* USB Attached SCSI
* Note that this is not the same as the USB Mass Storage driver
*
* Copyright Matthew Wilcox for Intel Corp, 2010
* Copyright Sarah Sharp for Intel Corp, 2010
*
* Distributed under the terms of the GNU GPL, version two.
*/
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/storage.h>
#include <linux/usb/uas.h>
#include <scsi/scsi.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
/*
* The r00-r01c specs define this version of the SENSE IU data structure.
* It's still in use by several different firmware releases.
*/
struct sense_iu_old {
__u8 iu_id;
__u8 rsvd1;
__be16 tag;
__be16 len;
__u8 status;
__u8 service_response;
__u8 sense[SCSI_SENSE_BUFFERSIZE];
};
struct uas_dev_info {
struct usb_interface *intf;
struct usb_device *udev;
struct usb_anchor cmd_urbs;
struct usb_anchor sense_urbs;
struct usb_anchor data_urbs;
int qdepth, resetting;
struct response_ui response;
unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
unsigned use_streams:1;
unsigned uas_sense_old:1;
struct scsi_cmnd *cmnd;
spinlock_t lock;
};
enum {
SUBMIT_STATUS_URB = (1 << 1),
ALLOC_DATA_IN_URB = (1 << 2),
SUBMIT_DATA_IN_URB = (1 << 3),
ALLOC_DATA_OUT_URB = (1 << 4),
SUBMIT_DATA_OUT_URB = (1 << 5),
ALLOC_CMD_URB = (1 << 6),
SUBMIT_CMD_URB = (1 << 7),
COMMAND_INFLIGHT = (1 << 8),
DATA_IN_URB_INFLIGHT = (1 << 9),
DATA_OUT_URB_INFLIGHT = (1 << 10),
COMMAND_COMPLETED = (1 << 11),
COMMAND_ABORTED = (1 << 12),
UNLINK_DATA_URBS = (1 << 13),
IS_IN_WORK_LIST = (1 << 14),
};
/* Overrides scsi_pointer */
struct uas_cmd_info {
unsigned int state;
unsigned int stream;
struct urb *cmd_urb;
struct urb *data_in_urb;
struct urb *data_out_urb;
struct list_head list;
};
/* I hate forward declarations, but I actually have a loop */
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp);
static void uas_do_work(struct work_struct *work);
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
static DECLARE_WORK(uas_work, uas_do_work);
static DEFINE_SPINLOCK(uas_work_lock);
static LIST_HEAD(uas_work_list);
static void uas_unlink_data_urbs(struct uas_dev_info *devinfo,
struct uas_cmd_info *cmdinfo)
{
unsigned long flags;
/*
* The UNLINK_DATA_URBS flag makes sure uas_try_complete
* (called by urb completion) doesn't release cmdinfo
* underneath us.
*/
spin_lock_irqsave(&devinfo->lock, flags);
cmdinfo->state |= UNLINK_DATA_URBS;
spin_unlock_irqrestore(&devinfo->lock, flags);
if (cmdinfo->data_in_urb)
usb_unlink_urb(cmdinfo->data_in_urb);
if (cmdinfo->data_out_urb)
usb_unlink_urb(cmdinfo->data_out_urb);
spin_lock_irqsave(&devinfo->lock, flags);
cmdinfo->state &= ~UNLINK_DATA_URBS;
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_do_work(struct work_struct *work)
{
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
struct list_head list;
unsigned long flags;
int err;
spin_lock_irq(&uas_work_lock);
list_replace_init(&uas_work_list, &list);
spin_unlock_irq(&uas_work_lock);
list_for_each_entry_safe(cmdinfo, temp, &list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp,
struct scsi_cmnd, SCp);
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
spin_lock_irqsave(&devinfo->lock, flags);
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
if (!err)
cmdinfo->state &= ~IS_IN_WORK_LIST;
spin_unlock_irqrestore(&devinfo->lock, flags);
if (err) {
list_del(&cmdinfo->list);
spin_lock_irq(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
spin_unlock_irq(&uas_work_lock);
schedule_work(&uas_work);
}
}
}
static void uas_abort_work(struct uas_dev_info *devinfo)
{
struct uas_cmd_info *cmdinfo;
struct uas_cmd_info *temp;
struct list_head list;
unsigned long flags;
spin_lock_irq(&uas_work_lock);
list_replace_init(&uas_work_list, &list);
spin_unlock_irq(&uas_work_lock);
spin_lock_irqsave(&devinfo->lock, flags);
list_for_each_entry_safe(cmdinfo, temp, &list, list) {
struct scsi_pointer *scp = (void *)cmdinfo;
struct scsi_cmnd *cmnd = container_of(scp,
struct scsi_cmnd, SCp);
struct uas_dev_info *di = (void *)cmnd->device->hostdata;
if (di == devinfo) {
cmdinfo->state |= COMMAND_ABORTED;
cmdinfo->state &= ~IS_IN_WORK_LIST;
if (devinfo->resetting) {
/* uas_stat_cmplt() will not do that
* when a device reset is in
* progress */
cmdinfo->state &= ~COMMAND_INFLIGHT;
}
uas_try_complete(cmnd, __func__);
} else {
/* not our uas device, relink into list */
list_del(&cmdinfo->list);
spin_lock_irq(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
spin_unlock_irq(&uas_work_lock);
}
}
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
{
struct sense_iu *sense_iu = urb->transfer_buffer;
struct scsi_device *sdev = cmnd->device;
if (urb->actual_length > 16) {
unsigned len = be16_to_cpup(&sense_iu->len);
if (len + 16 != urb->actual_length) {
int newlen = min(len + 16, urb->actual_length) - 16;
if (newlen < 0)
newlen = 0;
sdev_printk(KERN_INFO, sdev, "%s: urb length %d "
"disagrees with IU sense data length %d, "
"using %d bytes of sense data\n", __func__,
urb->actual_length, len, newlen);
len = newlen;
}
memcpy(cmnd->sense_buffer, sense_iu->sense, len);
}
cmnd->result = sense_iu->status;
}
static void uas_sense_old(struct urb *urb, struct scsi_cmnd *cmnd)
{
struct sense_iu_old *sense_iu = urb->transfer_buffer;
struct scsi_device *sdev = cmnd->device;
if (urb->actual_length > 8) {
unsigned len = be16_to_cpup(&sense_iu->len) - 2;
if (len + 8 != urb->actual_length) {
int newlen = min(len + 8, urb->actual_length) - 8;
if (newlen < 0)
newlen = 0;
sdev_printk(KERN_INFO, sdev, "%s: urb length %d "
"disagrees with IU sense data length %d, "
"using %d bytes of sense data\n", __func__,
urb->actual_length, len, newlen);
len = newlen;
}
memcpy(cmnd->sense_buffer, sense_iu->sense, len);
}
cmnd->result = sense_iu->status;
}
static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *caller)
{
struct uas_cmd_info *ci = (void *)&cmnd->SCp;
scmd_printk(KERN_INFO, cmnd, "%s %p tag %d, inflight:"
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
caller, cmnd, cmnd->request->tag,
(ci->state & SUBMIT_STATUS_URB) ? " s-st" : "",
(ci->state & ALLOC_DATA_IN_URB) ? " a-in" : "",
(ci->state & SUBMIT_DATA_IN_URB) ? " s-in" : "",
(ci->state & ALLOC_DATA_OUT_URB) ? " a-out" : "",
(ci->state & SUBMIT_DATA_OUT_URB) ? " s-out" : "",
(ci->state & ALLOC_CMD_URB) ? " a-cmd" : "",
(ci->state & SUBMIT_CMD_URB) ? " s-cmd" : "",
(ci->state & COMMAND_INFLIGHT) ? " CMD" : "",
(ci->state & DATA_IN_URB_INFLIGHT) ? " IN" : "",
(ci->state & DATA_OUT_URB_INFLIGHT) ? " OUT" : "",
(ci->state & COMMAND_COMPLETED) ? " done" : "",
(ci->state & COMMAND_ABORTED) ? " abort" : "",
(ci->state & UNLINK_DATA_URBS) ? " unlink": "",
(ci->state & IS_IN_WORK_LIST) ? " work" : "");
}
static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
WARN_ON(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & (COMMAND_INFLIGHT |
DATA_IN_URB_INFLIGHT |
DATA_OUT_URB_INFLIGHT |
UNLINK_DATA_URBS))
return -EBUSY;
BUG_ON(cmdinfo->state & COMMAND_COMPLETED);
cmdinfo->state |= COMMAND_COMPLETED;
usb_free_urb(cmdinfo->data_in_urb);
usb_free_urb(cmdinfo->data_out_urb);
if (cmdinfo->state & COMMAND_ABORTED) {
scmd_printk(KERN_INFO, cmnd, "abort completed\n");
cmnd->result = DID_ABORT << 16;
}
cmnd->scsi_done(cmnd);
return 0;
}
static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
unsigned direction)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
int err;
cmdinfo->state |= direction | SUBMIT_STATUS_URB;
err = uas_submit_urbs(cmnd, cmnd->device->hostdata, GFP_ATOMIC);
if (err) {
spin_lock(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
cmdinfo->state |= IS_IN_WORK_LIST;
spin_unlock(&uas_work_lock);
schedule_work(&uas_work);
}
}
static void uas_stat_cmplt(struct urb *urb)
{
struct iu *iu = urb->transfer_buffer;
struct Scsi_Host *shost = urb->context;
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
struct scsi_cmnd *cmnd;
struct uas_cmd_info *cmdinfo;
unsigned long flags;
u16 tag;
if (urb->status) {
dev_err(&urb->dev->dev, "URB BAD STATUS %d\n", urb->status);
usb_free_urb(urb);
return;
}
if (devinfo->resetting) {
usb_free_urb(urb);
return;
}
spin_lock_irqsave(&devinfo->lock, flags);
tag = be16_to_cpup(&iu->tag) - 1;
if (tag == 0)
cmnd = devinfo->cmnd;
else
cmnd = scsi_host_find_tag(shost, tag - 1);
if (!cmnd) {
if (iu->iu_id == IU_ID_RESPONSE) {
/* store results for uas_eh_task_mgmt() */
memcpy(&devinfo->response, iu, sizeof(devinfo->response));
}
usb_free_urb(urb);
spin_unlock_irqrestore(&devinfo->lock, flags);
return;
}
cmdinfo = (void *)&cmnd->SCp;
switch (iu->iu_id) {
case IU_ID_STATUS:
if (devinfo->cmnd == cmnd)
devinfo->cmnd = NULL;
if (urb->actual_length < 16)
devinfo->uas_sense_old = 1;
if (devinfo->uas_sense_old)
uas_sense_old(urb, cmnd);
else
uas_sense(urb, cmnd);
if (cmnd->result != 0) {
/* cancel data transfers on error */
spin_unlock_irqrestore(&devinfo->lock, flags);
uas_unlink_data_urbs(devinfo, cmdinfo);
spin_lock_irqsave(&devinfo->lock, flags);
}
cmdinfo->state &= ~COMMAND_INFLIGHT;
uas_try_complete(cmnd, __func__);
break;
case IU_ID_READ_READY:
uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB);
break;
case IU_ID_WRITE_READY:
uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
break;
default:
scmd_printk(KERN_ERR, cmnd,
"Bogus IU (%d) received on status pipe\n", iu->iu_id);
}
usb_free_urb(urb);
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static void uas_data_cmplt(struct urb *urb)
{
struct scsi_cmnd *cmnd = urb->context;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct scsi_data_buffer *sdb = NULL;
unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
if (cmdinfo->data_in_urb == urb) {
sdb = scsi_in(cmnd);
cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
} else if (cmdinfo->data_out_urb == urb) {
sdb = scsi_out(cmnd);
cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
}
BUG_ON(sdb == NULL);
if (urb->status) {
/* error: no data transfered */
sdb->resid = sdb->length;
} else {
sdb->resid = sdb->length - urb->actual_length;
}
uas_try_complete(cmnd, __func__);
spin_unlock_irqrestore(&devinfo->lock, flags);
}
static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
unsigned int pipe, u16 stream_id,
struct scsi_cmnd *cmnd,
enum dma_data_direction dir)
{
struct usb_device *udev = devinfo->udev;
struct urb *urb = usb_alloc_urb(0, gfp);
struct scsi_data_buffer *sdb = (dir == DMA_FROM_DEVICE)
? scsi_in(cmnd) : scsi_out(cmnd);
if (!urb)
goto out;
usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
uas_data_cmplt, cmnd);
if (devinfo->use_streams)
urb->stream_id = stream_id;
urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
urb->sg = sdb->table.sgl;
out:
return urb;
}
static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct Scsi_Host *shost, u16 stream_id)
{
struct usb_device *udev = devinfo->udev;
struct urb *urb = usb_alloc_urb(0, gfp);
struct sense_iu *iu;
if (!urb)
goto out;
iu = kzalloc(sizeof(*iu), gfp);
if (!iu)
goto free;
usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
uas_stat_cmplt, shost);
urb->stream_id = stream_id;
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
free:
usb_free_urb(urb);
return NULL;
}
static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
struct scsi_cmnd *cmnd, u16 stream_id)
{
struct usb_device *udev = devinfo->udev;
struct scsi_device *sdev = cmnd->device;
struct urb *urb = usb_alloc_urb(0, gfp);
struct command_iu *iu;
int len;
if (!urb)
goto out;
len = cmnd->cmd_len - 16;
if (len < 0)
len = 0;
len = ALIGN(len, 4);
iu = kzalloc(sizeof(*iu) + len, gfp);
if (!iu)
goto free;
iu->iu_id = IU_ID_COMMAND;
if (blk_rq_tagged(cmnd->request))
iu->tag = cpu_to_be16(cmnd->request->tag + 2);
else
iu->tag = cpu_to_be16(1);
iu->prio_attr = UAS_SIMPLE_TAG;
iu->len = len;
int_to_scsilun(sdev->lun, &iu->lun);
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len,
usb_free_urb, NULL);
urb->transfer_flags |= URB_FREE_BUFFER;
out:
return urb;
free:
usb_free_urb(urb);
return NULL;
}
static int uas_submit_task_urb(struct scsi_cmnd *cmnd, gfp_t gfp,
u8 function, u16 stream_id)
{
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
struct usb_device *udev = devinfo->udev;
struct urb *urb = usb_alloc_urb(0, gfp);
struct task_mgmt_iu *iu;
int err = -ENOMEM;
if (!urb)
goto err;
iu = kzalloc(sizeof(*iu), gfp);
if (!iu)
goto err;
iu->iu_id = IU_ID_TASK_MGMT;
iu->tag = cpu_to_be16(stream_id);
int_to_scsilun(cmnd->device->lun, &iu->lun);
iu->function = function;
switch (function) {
case TMF_ABORT_TASK:
if (blk_rq_tagged(cmnd->request))
iu->task_tag = cpu_to_be16(cmnd->request->tag + 2);
else
iu->task_tag = cpu_to_be16(1);
break;
}
usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu),
usb_free_urb, NULL);
urb->transfer_flags |= URB_FREE_BUFFER;
err = usb_submit_urb(urb, gfp);
if (err)
goto err;
usb_anchor_urb(urb, &devinfo->cmd_urbs);
return 0;
err:
usb_free_urb(urb);
return err;
}
/*
* Why should I request the Status IU before sending the Command IU? Spec
* says to, but also says the device may receive them in any order. Seems
* daft to me.
*/
static int uas_submit_sense_urb(struct Scsi_Host *shost,
gfp_t gfp, unsigned int stream)
{
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
struct urb *urb;
urb = uas_alloc_sense_urb(devinfo, gfp, shost, stream);
if (!urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
if (usb_submit_urb(urb, gfp)) {
shost_printk(KERN_INFO, shost,
"sense urb submission failure\n");
usb_free_urb(urb);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
usb_anchor_urb(urb, &devinfo->sense_urbs);
return 0;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo, gfp_t gfp)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
int err;
WARN_ON(!spin_is_locked(&devinfo->lock));
if (cmdinfo->state & SUBMIT_STATUS_URB) {
err = uas_submit_sense_urb(cmnd->device->host, gfp,
cmdinfo->stream);
if (err) {
return err;
}
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
if (cmdinfo->state & ALLOC_DATA_IN_URB) {
cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, gfp,
devinfo->data_in_pipe, cmdinfo->stream,
cmnd, DMA_FROM_DEVICE);
if (!cmdinfo->data_in_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_DATA_IN_URB;
}
if (cmdinfo->state & SUBMIT_DATA_IN_URB) {
if (usb_submit_urb(cmdinfo->data_in_urb, gfp)) {
scmd_printk(KERN_INFO, cmnd,
"data in urb submission failure\n");
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
}
if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, gfp,
devinfo->data_out_pipe, cmdinfo->stream,
cmnd, DMA_TO_DEVICE);
if (!cmdinfo->data_out_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
}
if (cmdinfo->state & SUBMIT_DATA_OUT_URB) {
if (usb_submit_urb(cmdinfo->data_out_urb, gfp)) {
scmd_printk(KERN_INFO, cmnd,
"data out urb submission failure\n");
return SCSI_MLQUEUE_DEVICE_BUSY;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
}
if (cmdinfo->state & ALLOC_CMD_URB) {
cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, gfp, cmnd,
cmdinfo->stream);
if (!cmdinfo->cmd_urb)
return SCSI_MLQUEUE_DEVICE_BUSY;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
if (cmdinfo->state & SUBMIT_CMD_URB) {
usb_get_urb(cmdinfo->cmd_urb);
if (usb_submit_urb(cmdinfo->cmd_urb, gfp)) {
scmd_printk(KERN_INFO, cmnd,
"cmd urb submission failure\n");
return SCSI_MLQUEUE_DEVICE_BUSY;
}
usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
usb_put_urb(cmdinfo->cmd_urb);
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
cmdinfo->state |= COMMAND_INFLIGHT;
}
return 0;
}
static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
void (*done)(struct scsi_cmnd *))
{
struct scsi_device *sdev = cmnd->device;
struct uas_dev_info *devinfo = sdev->hostdata;
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
unsigned long flags;
int err;
BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
if (devinfo->resetting) {
cmnd->result = DID_ERROR << 16;
cmnd->scsi_done(cmnd);
return 0;
}
spin_lock_irqsave(&devinfo->lock, flags);
if (devinfo->cmnd) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
if (blk_rq_tagged(cmnd->request)) {
cmdinfo->stream = cmnd->request->tag + 2;
} else {
devinfo->cmnd = cmnd;
cmdinfo->stream = 1;
}
cmnd->scsi_done = done;
cmdinfo->state = SUBMIT_STATUS_URB |
ALLOC_CMD_URB | SUBMIT_CMD_URB;
switch (cmnd->sc_data_direction) {
case DMA_FROM_DEVICE:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
break;
case DMA_BIDIRECTIONAL:
cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
case DMA_TO_DEVICE:
cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
case DMA_NONE:
break;
}
if (!devinfo->use_streams) {
cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
cmdinfo->stream = 0;
}
err = uas_submit_urbs(cmnd, devinfo, GFP_ATOMIC);
if (err) {
/* If we did nothing, give up now */
if (cmdinfo->state & SUBMIT_STATUS_URB) {
spin_unlock_irqrestore(&devinfo->lock, flags);
return SCSI_MLQUEUE_DEVICE_BUSY;
}
spin_lock(&uas_work_lock);
list_add_tail(&cmdinfo->list, &uas_work_list);
cmdinfo->state |= IS_IN_WORK_LIST;
spin_unlock(&uas_work_lock);
schedule_work(&uas_work);
}
spin_unlock_irqrestore(&devinfo->lock, flags);
return 0;
}
static DEF_SCSI_QCMD(uas_queuecommand)
static int uas_eh_task_mgmt(struct scsi_cmnd *cmnd,
const char *fname, u8 function)
{
struct Scsi_Host *shost = cmnd->device->host;
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
u16 tag = devinfo->qdepth - 1;
unsigned long flags;
spin_lock_irqsave(&devinfo->lock, flags);
memset(&devinfo->response, 0, sizeof(devinfo->response));
if (uas_submit_sense_urb(shost, GFP_ATOMIC, tag)) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit sense urb failed\n",
__func__, fname);
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
if (uas_submit_task_urb(cmnd, GFP_ATOMIC, function, tag)) {
shost_printk(KERN_INFO, shost,
"%s: %s: submit task mgmt urb failed\n",
__func__, fname);
spin_unlock_irqrestore(&devinfo->lock, flags);
return FAILED;
}
spin_unlock_irqrestore(&devinfo->lock, flags);
if (usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 3000) == 0) {
shost_printk(KERN_INFO, shost,
"%s: %s timed out\n", __func__, fname);
return FAILED;
}
if (be16_to_cpu(devinfo->response.tag) != tag) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (wrong tag %d/%d)\n", __func__,
fname, be16_to_cpu(devinfo->response.tag), tag);
return FAILED;
}
if (devinfo->response.response_code != RC_TMF_COMPLETE) {
shost_printk(KERN_INFO, shost,
"%s: %s failed (rc 0x%x)\n", __func__,
fname, devinfo->response.response_code);
return FAILED;
}
return SUCCESS;
}
static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
{
struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
unsigned long flags;
int ret;
uas_log_cmd_state(cmnd, __func__);
spin_lock_irqsave(&devinfo->lock, flags);
cmdinfo->state |= COMMAND_ABORTED;
if (cmdinfo->state & IS_IN_WORK_LIST) {
spin_lock(&uas_work_lock);
list_del(&cmdinfo->list);
cmdinfo->state &= ~IS_IN_WORK_LIST;
spin_unlock(&uas_work_lock);
}
if (cmdinfo->state & COMMAND_INFLIGHT) {
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = uas_eh_task_mgmt(cmnd, "ABORT TASK", TMF_ABORT_TASK);
} else {
spin_unlock_irqrestore(&devinfo->lock, flags);
uas_unlink_data_urbs(devinfo, cmdinfo);
spin_lock_irqsave(&devinfo->lock, flags);
uas_try_complete(cmnd, __func__);
spin_unlock_irqrestore(&devinfo->lock, flags);
ret = SUCCESS;
}
return ret;
}
static int uas_eh_device_reset_handler(struct scsi_cmnd *cmnd)
{
sdev_printk(KERN_INFO, cmnd->device, "%s\n", __func__);
return uas_eh_task_mgmt(cmnd, "LOGICAL UNIT RESET",
TMF_LOGICAL_UNIT_RESET);
}
static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
{
struct scsi_device *sdev = cmnd->device;
struct uas_dev_info *devinfo = sdev->hostdata;
struct usb_device *udev = devinfo->udev;
int err;
devinfo->resetting = 1;
uas_abort_work(devinfo);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
err = usb_reset_device(udev);
devinfo->resetting = 0;
if (err) {
shost_printk(KERN_INFO, sdev->host, "%s FAILED\n", __func__);
return FAILED;
}
shost_printk(KERN_INFO, sdev->host, "%s success\n", __func__);
return SUCCESS;
}
static int uas_slave_alloc(struct scsi_device *sdev)
{
sdev->hostdata = (void *)sdev->host->hostdata[0];
return 0;
}
static int uas_slave_configure(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo = sdev->hostdata;
scsi_set_tag_type(sdev, MSG_ORDERED_TAG);
scsi_activate_tcq(sdev, devinfo->qdepth - 3);
return 0;
}
static struct scsi_host_template uas_host_template = {
.module = THIS_MODULE,
.name = "uas",
.queuecommand = uas_queuecommand,
.slave_alloc = uas_slave_alloc,
.slave_configure = uas_slave_configure,
.eh_abort_handler = uas_eh_abort_handler,
.eh_device_reset_handler = uas_eh_device_reset_handler,
.eh_bus_reset_handler = uas_eh_bus_reset_handler,
.can_queue = 65536, /* Is there a limit on the _host_ ? */
.this_id = -1,
.sg_tablesize = SG_NONE,
.cmd_per_lun = 1, /* until we override it */
.skip_settle_delay = 1,
.ordered_tag = 1,
};
static struct usb_device_id uas_usb_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
/* 0xaa is a prototype device I happen to have access to */
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, 0xaa) },
{ }
};
MODULE_DEVICE_TABLE(usb, uas_usb_ids);
static int uas_is_interface(struct usb_host_interface *intf)
{
return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE &&
intf->desc.bInterfaceSubClass == USB_SC_SCSI &&
intf->desc.bInterfaceProtocol == USB_PR_UAS);
}
static int uas_isnt_supported(struct usb_device *udev)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
dev_warn(&udev->dev, "The driver for the USB controller %s does not "
"support scatter-gather which is\n",
hcd->driver->description);
dev_warn(&udev->dev, "required by the UAS driver. Please try an"
"alternative USB controller if you wish to use UAS.\n");
return -ENODEV;
}
static int uas_switch_interface(struct usb_device *udev,
struct usb_interface *intf)
{
int i;
int sg_supported = udev->bus->sg_tablesize != 0;
for (i = 0; i < intf->num_altsetting; i++) {
struct usb_host_interface *alt = &intf->altsetting[i];
if (uas_is_interface(alt)) {
if (!sg_supported)
return uas_isnt_supported(udev);
return usb_set_interface(udev,
alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
}
}
return -ENODEV;
}
static void uas_configure_endpoints(struct uas_dev_info *devinfo)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_interface *intf = devinfo->intf;
struct usb_device *udev = devinfo->udev;
struct usb_host_endpoint *endpoint = intf->cur_altsetting->endpoint;
unsigned i, n_endpoints = intf->cur_altsetting->desc.bNumEndpoints;
devinfo->uas_sense_old = 0;
devinfo->cmnd = NULL;
for (i = 0; i < n_endpoints; i++) {
unsigned char *extra = endpoint[i].extra;
int len = endpoint[i].extralen;
while (len > 1) {
if (extra[1] == USB_DT_PIPE_USAGE) {
unsigned pipe_id = extra[2];
if (pipe_id > 0 && pipe_id < 5)
eps[pipe_id - 1] = &endpoint[i];
break;
}
len -= extra[0];
extra += extra[0];
}
}
/*
* Assume that if we didn't find a control pipe descriptor, we're
* using a device with old firmware that happens to be set up like
* this.
*/
if (!eps[0]) {
devinfo->cmd_pipe = usb_sndbulkpipe(udev, 1);
devinfo->status_pipe = usb_rcvbulkpipe(udev, 1);
devinfo->data_in_pipe = usb_rcvbulkpipe(udev, 2);
devinfo->data_out_pipe = usb_sndbulkpipe(udev, 2);
eps[1] = usb_pipe_endpoint(udev, devinfo->status_pipe);
eps[2] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
eps[3] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
} else {
devinfo->cmd_pipe = usb_sndbulkpipe(udev,
eps[0]->desc.bEndpointAddress);
devinfo->status_pipe = usb_rcvbulkpipe(udev,
eps[1]->desc.bEndpointAddress);
devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
eps[2]->desc.bEndpointAddress);
devinfo->data_out_pipe = usb_sndbulkpipe(udev,
eps[3]->desc.bEndpointAddress);
}
devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1, 3, 256,
GFP_KERNEL);
if (devinfo->qdepth < 0) {
devinfo->qdepth = 256;
devinfo->use_streams = 0;
} else {
devinfo->use_streams = 1;
}
}
static void uas_free_streams(struct uas_dev_info *devinfo)
{
struct usb_device *udev = devinfo->udev;
struct usb_host_endpoint *eps[3];
eps[0] = usb_pipe_endpoint(udev, devinfo->status_pipe);
eps[1] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
eps[2] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
usb_free_streams(devinfo->intf, eps, 3, GFP_KERNEL);
}
/*
* XXX: What I'd like to do here is register a SCSI host for each USB host in
* the system. Follow usb-storage's design of registering a SCSI host for
* each USB device for the moment. Can implement this by walking up the
* USB hierarchy until we find a USB host.
*/
static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int result;
struct Scsi_Host *shost;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
if (uas_switch_interface(udev, intf))
return -ENODEV;
devinfo = kmalloc(sizeof(struct uas_dev_info), GFP_KERNEL);
if (!devinfo)
return -ENOMEM;
result = -ENOMEM;
shost = scsi_host_alloc(&uas_host_template, sizeof(void *));
if (!shost)
goto free;
shost->max_cmd_len = 16 + 252;
shost->max_id = 1;
shost->max_lun = 256;
shost->max_channel = 0;
shost->sg_tablesize = udev->bus->sg_tablesize;
devinfo->intf = intf;
devinfo->udev = udev;
devinfo->resetting = 0;
init_usb_anchor(&devinfo->cmd_urbs);
init_usb_anchor(&devinfo->sense_urbs);
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
uas_configure_endpoints(devinfo);
result = scsi_init_shared_tag_map(shost, devinfo->qdepth - 3);
if (result)
goto free;
result = scsi_add_host(shost, &intf->dev);
if (result)
goto deconfig_eps;
shost->hostdata[0] = (unsigned long)devinfo;
scsi_scan_host(shost);
usb_set_intfdata(intf, shost);
return result;
deconfig_eps:
uas_free_streams(devinfo);
free:
kfree(devinfo);
if (shost)
scsi_host_put(shost);
return result;
}
static int uas_pre_reset(struct usb_interface *intf)
{
/* XXX: Need to return 1 if it's not our device in error handling */
return 0;
}
static int uas_post_reset(struct usb_interface *intf)
{
/* XXX: Need to return 1 if it's not our device in error handling */
return 0;
}
static void uas_disconnect(struct usb_interface *intf)
{
struct Scsi_Host *shost = usb_get_intfdata(intf);
struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
devinfo->resetting = 1;
uas_abort_work(devinfo);
usb_kill_anchored_urbs(&devinfo->cmd_urbs);
usb_kill_anchored_urbs(&devinfo->sense_urbs);
usb_kill_anchored_urbs(&devinfo->data_urbs);
scsi_remove_host(shost);
uas_free_streams(devinfo);
kfree(devinfo);
}
/*
* XXX: Should this plug into libusual so we can auto-upgrade devices from
* Bulk-Only to UAS?
*/
static struct usb_driver uas_driver = {
.name = "uas",
.probe = uas_probe,
.disconnect = uas_disconnect,
.pre_reset = uas_pre_reset,
.post_reset = uas_post_reset,
.id_table = uas_usb_ids,
};
module_usb_driver(uas_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matthew Wilcox and Sarah Sharp");
| gpl-2.0 |
stevegaron/android-kernel-tuna | drivers/hwmon/ultra45_env.c | 2782 | 8635 | /* ultra45_env.c: Driver for Ultra45 PIC16F747 environmental monitor.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <linux/io.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#define DRV_MODULE_VERSION "0.1"
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("Ultra45 environmental monitor driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
/* PIC device registers */
#define REG_CMD 0x00UL
#define REG_CMD_RESET 0x80
#define REG_CMD_ESTAR 0x01
#define REG_STAT 0x01UL
#define REG_STAT_FWVER 0xf0
#define REG_STAT_TGOOD 0x08
#define REG_STAT_STALE 0x04
#define REG_STAT_BUSY 0x02
#define REG_STAT_FAULT 0x01
#define REG_DATA 0x40UL
#define REG_ADDR 0x41UL
#define REG_SIZE 0x42UL
/* Registers accessed indirectly via REG_DATA/REG_ADDR */
#define IREG_FAN0 0x00
#define IREG_FAN1 0x01
#define IREG_FAN2 0x02
#define IREG_FAN3 0x03
#define IREG_FAN4 0x04
#define IREG_FAN5 0x05
#define IREG_LCL_TEMP 0x06
#define IREG_RMT1_TEMP 0x07
#define IREG_RMT2_TEMP 0x08
#define IREG_RMT3_TEMP 0x09
#define IREG_LM95221_TEMP 0x0a
#define IREG_FIRE_TEMP 0x0b
#define IREG_LSI1064_TEMP 0x0c
#define IREG_FRONT_TEMP 0x0d
#define IREG_FAN_STAT 0x0e
#define IREG_VCORE0 0x0f
#define IREG_VCORE1 0x10
#define IREG_VMEM0 0x11
#define IREG_VMEM1 0x12
#define IREG_PSU_TEMP 0x13
struct env {
void __iomem *regs;
spinlock_t lock;
struct device *hwmon_dev;
};
static u8 env_read(struct env *p, u8 ireg)
{
u8 ret;
spin_lock(&p->lock);
writeb(ireg, p->regs + REG_ADDR);
ret = readb(p->regs + REG_DATA);
spin_unlock(&p->lock);
return ret;
}
static void env_write(struct env *p, u8 ireg, u8 val)
{
spin_lock(&p->lock);
writeb(ireg, p->regs + REG_ADDR);
writeb(val, p->regs + REG_DATA);
spin_unlock(&p->lock);
}
/* There seems to be a adr7462 providing these values, thus a lot
* of these calculations are borrowed from the adt7470 driver.
*/
#define FAN_PERIOD_TO_RPM(x) ((90000 * 60) / (x))
#define FAN_RPM_TO_PERIOD FAN_PERIOD_TO_RPM
#define FAN_PERIOD_INVALID (0xff << 8)
#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr, char *buf)
{
int fan_nr = to_sensor_dev_attr(attr)->index;
struct env *p = dev_get_drvdata(dev);
int rpm, period;
u8 val;
val = env_read(p, IREG_FAN0 + fan_nr);
period = (int) val << 8;
if (FAN_DATA_VALID(period))
rpm = FAN_PERIOD_TO_RPM(period);
else
rpm = 0;
return sprintf(buf, "%d\n", rpm);
}
static ssize_t set_fan_speed(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int fan_nr = to_sensor_dev_attr(attr)->index;
int rpm = simple_strtol(buf, NULL, 10);
struct env *p = dev_get_drvdata(dev);
int period;
u8 val;
if (!rpm)
return -EINVAL;
period = FAN_RPM_TO_PERIOD(rpm);
val = period >> 8;
env_write(p, IREG_FAN0 + fan_nr, val);
return count;
}
static ssize_t show_fan_fault(struct device *dev, struct device_attribute *attr, char *buf)
{
int fan_nr = to_sensor_dev_attr(attr)->index;
struct env *p = dev_get_drvdata(dev);
u8 val = env_read(p, IREG_FAN_STAT);
return sprintf(buf, "%d\n", (val & (1 << fan_nr)) ? 1 : 0);
}
#define fan(index) \
static SENSOR_DEVICE_ATTR(fan##index##_speed, S_IRUGO | S_IWUSR, \
show_fan_speed, set_fan_speed, index); \
static SENSOR_DEVICE_ATTR(fan##index##_fault, S_IRUGO, \
show_fan_fault, NULL, index)
fan(0);
fan(1);
fan(2);
fan(3);
fan(4);
static SENSOR_DEVICE_ATTR(psu_fan_fault, S_IRUGO, show_fan_fault, NULL, 6);
static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
int temp_nr = to_sensor_dev_attr(attr)->index;
struct env *p = dev_get_drvdata(dev);
s8 val;
val = env_read(p, IREG_LCL_TEMP + temp_nr);
return sprintf(buf, "%d\n", ((int) val) - 64);
}
static SENSOR_DEVICE_ATTR(adt7462_local_temp, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(cpu0_temp, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(cpu1_temp, S_IRUGO, show_temp, NULL, 2);
static SENSOR_DEVICE_ATTR(motherboard_temp, S_IRUGO, show_temp, NULL, 3);
static SENSOR_DEVICE_ATTR(lm95221_local_temp, S_IRUGO, show_temp, NULL, 4);
static SENSOR_DEVICE_ATTR(fire_temp, S_IRUGO, show_temp, NULL, 5);
static SENSOR_DEVICE_ATTR(lsi1064_local_temp, S_IRUGO, show_temp, NULL, 6);
static SENSOR_DEVICE_ATTR(front_panel_temp, S_IRUGO, show_temp, NULL, 7);
static SENSOR_DEVICE_ATTR(psu_temp, S_IRUGO, show_temp, NULL, 13);
static ssize_t show_stat_bit(struct device *dev, struct device_attribute *attr, char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct env *p = dev_get_drvdata(dev);
u8 val;
val = readb(p->regs + REG_STAT);
return sprintf(buf, "%d\n", (val & (1 << index)) ? 1 : 0);
}
static SENSOR_DEVICE_ATTR(fan_failure, S_IRUGO, show_stat_bit, NULL, 0);
static SENSOR_DEVICE_ATTR(env_bus_busy, S_IRUGO, show_stat_bit, NULL, 1);
static SENSOR_DEVICE_ATTR(env_data_stale, S_IRUGO, show_stat_bit, NULL, 2);
static SENSOR_DEVICE_ATTR(tpm_self_test_passed, S_IRUGO, show_stat_bit, NULL, 3);
static ssize_t show_fwver(struct device *dev, struct device_attribute *attr, char *buf)
{
struct env *p = dev_get_drvdata(dev);
u8 val;
val = readb(p->regs + REG_STAT);
return sprintf(buf, "%d\n", val >> 4);
}
static SENSOR_DEVICE_ATTR(firmware_version, S_IRUGO, show_fwver, NULL, 0);
static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "ultra45\n");
}
static SENSOR_DEVICE_ATTR(name, S_IRUGO, show_name, NULL, 0);
static struct attribute *env_attributes[] = {
&sensor_dev_attr_fan0_speed.dev_attr.attr,
&sensor_dev_attr_fan0_fault.dev_attr.attr,
&sensor_dev_attr_fan1_speed.dev_attr.attr,
&sensor_dev_attr_fan1_fault.dev_attr.attr,
&sensor_dev_attr_fan2_speed.dev_attr.attr,
&sensor_dev_attr_fan2_fault.dev_attr.attr,
&sensor_dev_attr_fan3_speed.dev_attr.attr,
&sensor_dev_attr_fan3_fault.dev_attr.attr,
&sensor_dev_attr_fan4_speed.dev_attr.attr,
&sensor_dev_attr_fan4_fault.dev_attr.attr,
&sensor_dev_attr_psu_fan_fault.dev_attr.attr,
&sensor_dev_attr_adt7462_local_temp.dev_attr.attr,
&sensor_dev_attr_cpu0_temp.dev_attr.attr,
&sensor_dev_attr_cpu1_temp.dev_attr.attr,
&sensor_dev_attr_motherboard_temp.dev_attr.attr,
&sensor_dev_attr_lm95221_local_temp.dev_attr.attr,
&sensor_dev_attr_fire_temp.dev_attr.attr,
&sensor_dev_attr_lsi1064_local_temp.dev_attr.attr,
&sensor_dev_attr_front_panel_temp.dev_attr.attr,
&sensor_dev_attr_psu_temp.dev_attr.attr,
&sensor_dev_attr_fan_failure.dev_attr.attr,
&sensor_dev_attr_env_bus_busy.dev_attr.attr,
&sensor_dev_attr_env_data_stale.dev_attr.attr,
&sensor_dev_attr_tpm_self_test_passed.dev_attr.attr,
&sensor_dev_attr_firmware_version.dev_attr.attr,
&sensor_dev_attr_name.dev_attr.attr,
NULL,
};
static const struct attribute_group env_group = {
.attrs = env_attributes,
};
static int __devinit env_probe(struct platform_device *op)
{
struct env *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
if (!p)
goto out;
spin_lock_init(&p->lock);
p->regs = of_ioremap(&op->resource[0], 0, REG_SIZE, "pic16f747");
if (!p->regs)
goto out_free;
err = sysfs_create_group(&op->dev.kobj, &env_group);
if (err)
goto out_iounmap;
p->hwmon_dev = hwmon_device_register(&op->dev);
if (IS_ERR(p->hwmon_dev)) {
err = PTR_ERR(p->hwmon_dev);
goto out_sysfs_remove_group;
}
platform_set_drvdata(op, p);
err = 0;
out:
return err;
out_sysfs_remove_group:
sysfs_remove_group(&op->dev.kobj, &env_group);
out_iounmap:
of_iounmap(&op->resource[0], p->regs, REG_SIZE);
out_free:
kfree(p);
goto out;
}
static int __devexit env_remove(struct platform_device *op)
{
struct env *p = platform_get_drvdata(op);
if (p) {
sysfs_remove_group(&op->dev.kobj, &env_group);
hwmon_device_unregister(p->hwmon_dev);
of_iounmap(&op->resource[0], p->regs, REG_SIZE);
kfree(p);
}
return 0;
}
static const struct of_device_id env_match[] = {
{
.name = "env-monitor",
.compatible = "SUNW,ebus-pic16f747-env",
},
{},
};
MODULE_DEVICE_TABLE(of, env_match);
static struct platform_driver env_driver = {
.driver = {
.name = "ultra45_env",
.owner = THIS_MODULE,
.of_match_table = env_match,
},
.probe = env_probe,
.remove = __devexit_p(env_remove),
};
static int __init env_init(void)
{
return platform_driver_register(&env_driver);
}
static void __exit env_exit(void)
{
platform_driver_unregister(&env_driver);
}
module_init(env_init);
module_exit(env_exit);
| gpl-2.0 |
TheYorickable/tf300t_jb_kernel | fs/proc/vmcore.c | 2782 | 17522 | /*
* fs/proc/vmcore.c Interface for accessing the crash
* dump from the system's previous life.
* Heavily borrowed from fs/proc/kcore.c
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*
*/
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/crash_dump.h>
#include <linux/list.h>
#include <asm/uaccess.h>
#include <asm/io.h>
/* List representing chunks of contiguous memory areas and their offsets in
* vmcore file.
*/
static LIST_HEAD(vmcore_list);
/* Stores the pointer to the buffer containing kernel elf core headers. */
static char *elfcorebuf;
static size_t elfcorebuf_sz;
/* Total size of vmcore file. */
static u64 vmcore_size;
static struct proc_dir_entry *proc_vmcore = NULL;
/*
* Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
* The called function has to take care of module refcounting.
*/
static int (*oldmem_pfn_is_ram)(unsigned long pfn);
int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
{
if (oldmem_pfn_is_ram)
return -EBUSY;
oldmem_pfn_is_ram = fn;
return 0;
}
EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
void unregister_oldmem_pfn_is_ram(void)
{
oldmem_pfn_is_ram = NULL;
wmb();
}
EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
static int pfn_is_ram(unsigned long pfn)
{
int (*fn)(unsigned long pfn);
/* pfn is ram unless fn() checks pagetype */
int ret = 1;
/*
* Ask hypervisor if the pfn is really ram.
* A ballooned page contains no data and reading from such a page
* will cause high load in the hypervisor.
*/
fn = oldmem_pfn_is_ram;
if (fn)
ret = fn(pfn);
return ret;
}
/* Reads a page from the oldmem device from given offset. */
static ssize_t read_from_oldmem(char *buf, size_t count,
u64 *ppos, int userbuf)
{
unsigned long pfn, offset;
size_t nr_bytes;
ssize_t read = 0, tmp;
if (!count)
return 0;
offset = (unsigned long)(*ppos % PAGE_SIZE);
pfn = (unsigned long)(*ppos / PAGE_SIZE);
do {
if (count > (PAGE_SIZE - offset))
nr_bytes = PAGE_SIZE - offset;
else
nr_bytes = count;
/* If pfn is not ram, return zeros for sparse dump files */
if (pfn_is_ram(pfn) == 0)
memset(buf, 0, nr_bytes);
else {
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
offset, userbuf);
if (tmp < 0)
return tmp;
}
*ppos += nr_bytes;
count -= nr_bytes;
buf += nr_bytes;
read += nr_bytes;
++pfn;
offset = 0;
} while (count);
return read;
}
/* Maps vmcore file offset to respective physical address in memroy. */
static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
struct vmcore **m_ptr)
{
struct vmcore *m;
u64 paddr;
list_for_each_entry(m, vc_list, list) {
u64 start, end;
start = m->offset;
end = m->offset + m->size - 1;
if (offset >= start && offset <= end) {
paddr = m->paddr + offset - start;
*m_ptr = m;
return paddr;
}
}
*m_ptr = NULL;
return 0;
}
/* Read from the ELF header and then the crash dump. On error, negative value is
* returned otherwise number of bytes read are returned.
*/
static ssize_t read_vmcore(struct file *file, char __user *buffer,
size_t buflen, loff_t *fpos)
{
ssize_t acc = 0, tmp;
size_t tsz;
u64 start, nr_bytes;
struct vmcore *curr_m = NULL;
if (buflen == 0 || *fpos >= vmcore_size)
return 0;
/* trim buflen to not go beyond EOF */
if (buflen > vmcore_size - *fpos)
buflen = vmcore_size - *fpos;
/* Read ELF core header */
if (*fpos < elfcorebuf_sz) {
tsz = elfcorebuf_sz - *fpos;
if (buflen < tsz)
tsz = buflen;
if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
return -EFAULT;
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
acc += tsz;
/* leave now if filled buffer already */
if (buflen == 0)
return acc;
}
start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
if (!curr_m)
return -EINVAL;
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
/* Calculate left bytes in current memory segment. */
nr_bytes = (curr_m->size - (start - curr_m->paddr));
if (tsz > nr_bytes)
tsz = nr_bytes;
while (buflen) {
tmp = read_from_oldmem(buffer, tsz, &start, 1);
if (tmp < 0)
return tmp;
buflen -= tsz;
*fpos += tsz;
buffer += tsz;
acc += tsz;
if (start >= (curr_m->paddr + curr_m->size)) {
if (curr_m->list.next == &vmcore_list)
return acc; /*EOF*/
curr_m = list_entry(curr_m->list.next,
struct vmcore, list);
start = curr_m->paddr;
}
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
/* Calculate left bytes in current memory segment. */
nr_bytes = (curr_m->size - (start - curr_m->paddr));
if (tsz > nr_bytes)
tsz = nr_bytes;
}
return acc;
}
static const struct file_operations proc_vmcore_operations = {
.read = read_vmcore,
.llseek = default_llseek,
};
static struct vmcore* __init get_new_element(void)
{
return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
}
static u64 __init get_vmcore_size_elf64(char *elfptr)
{
int i;
u64 size;
Elf64_Ehdr *ehdr_ptr;
Elf64_Phdr *phdr_ptr;
ehdr_ptr = (Elf64_Ehdr *)elfptr;
phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
for (i = 0; i < ehdr_ptr->e_phnum; i++) {
size += phdr_ptr->p_memsz;
phdr_ptr++;
}
return size;
}
static u64 __init get_vmcore_size_elf32(char *elfptr)
{
int i;
u64 size;
Elf32_Ehdr *ehdr_ptr;
Elf32_Phdr *phdr_ptr;
ehdr_ptr = (Elf32_Ehdr *)elfptr;
phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr));
for (i = 0; i < ehdr_ptr->e_phnum; i++) {
size += phdr_ptr->p_memsz;
phdr_ptr++;
}
return size;
}
/* Merges all the PT_NOTE headers into one. */
static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
struct list_head *vc_list)
{
int i, nr_ptnote=0, rc=0;
char *tmp;
Elf64_Ehdr *ehdr_ptr;
Elf64_Phdr phdr, *phdr_ptr;
Elf64_Nhdr *nhdr_ptr;
u64 phdr_sz = 0, note_off;
ehdr_ptr = (Elf64_Ehdr *)elfptr;
phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
int j;
void *notes_section;
struct vmcore *new;
u64 offset, max_sz, sz, real_sz = 0;
if (phdr_ptr->p_type != PT_NOTE)
continue;
nr_ptnote++;
max_sz = phdr_ptr->p_memsz;
offset = phdr_ptr->p_offset;
notes_section = kmalloc(max_sz, GFP_KERNEL);
if (!notes_section)
return -ENOMEM;
rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
if (rc < 0) {
kfree(notes_section);
return rc;
}
nhdr_ptr = notes_section;
for (j = 0; j < max_sz; j += sz) {
if (nhdr_ptr->n_namesz == 0)
break;
sz = sizeof(Elf64_Nhdr) +
((nhdr_ptr->n_namesz + 3) & ~3) +
((nhdr_ptr->n_descsz + 3) & ~3);
real_sz += sz;
nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
}
/* Add this contiguous chunk of notes section to vmcore list.*/
new = get_new_element();
if (!new) {
kfree(notes_section);
return -ENOMEM;
}
new->paddr = phdr_ptr->p_offset;
new->size = real_sz;
list_add_tail(&new->list, vc_list);
phdr_sz += real_sz;
kfree(notes_section);
}
/* Prepare merged PT_NOTE program header. */
phdr.p_type = PT_NOTE;
phdr.p_flags = 0;
note_off = sizeof(Elf64_Ehdr) +
(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
phdr.p_offset = note_off;
phdr.p_vaddr = phdr.p_paddr = 0;
phdr.p_filesz = phdr.p_memsz = phdr_sz;
phdr.p_align = 0;
/* Add merged PT_NOTE program header*/
tmp = elfptr + sizeof(Elf64_Ehdr);
memcpy(tmp, &phdr, sizeof(phdr));
tmp += sizeof(phdr);
/* Remove unwanted PT_NOTE program headers. */
i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
*elfsz = *elfsz - i;
memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
/* Modify e_phnum to reflect merged headers. */
ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
return 0;
}
/* Merges all the PT_NOTE headers into one. */
static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
struct list_head *vc_list)
{
int i, nr_ptnote=0, rc=0;
char *tmp;
Elf32_Ehdr *ehdr_ptr;
Elf32_Phdr phdr, *phdr_ptr;
Elf32_Nhdr *nhdr_ptr;
u64 phdr_sz = 0, note_off;
ehdr_ptr = (Elf32_Ehdr *)elfptr;
phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
int j;
void *notes_section;
struct vmcore *new;
u64 offset, max_sz, sz, real_sz = 0;
if (phdr_ptr->p_type != PT_NOTE)
continue;
nr_ptnote++;
max_sz = phdr_ptr->p_memsz;
offset = phdr_ptr->p_offset;
notes_section = kmalloc(max_sz, GFP_KERNEL);
if (!notes_section)
return -ENOMEM;
rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
if (rc < 0) {
kfree(notes_section);
return rc;
}
nhdr_ptr = notes_section;
for (j = 0; j < max_sz; j += sz) {
if (nhdr_ptr->n_namesz == 0)
break;
sz = sizeof(Elf32_Nhdr) +
((nhdr_ptr->n_namesz + 3) & ~3) +
((nhdr_ptr->n_descsz + 3) & ~3);
real_sz += sz;
nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
}
/* Add this contiguous chunk of notes section to vmcore list.*/
new = get_new_element();
if (!new) {
kfree(notes_section);
return -ENOMEM;
}
new->paddr = phdr_ptr->p_offset;
new->size = real_sz;
list_add_tail(&new->list, vc_list);
phdr_sz += real_sz;
kfree(notes_section);
}
/* Prepare merged PT_NOTE program header. */
phdr.p_type = PT_NOTE;
phdr.p_flags = 0;
note_off = sizeof(Elf32_Ehdr) +
(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
phdr.p_offset = note_off;
phdr.p_vaddr = phdr.p_paddr = 0;
phdr.p_filesz = phdr.p_memsz = phdr_sz;
phdr.p_align = 0;
/* Add merged PT_NOTE program header*/
tmp = elfptr + sizeof(Elf32_Ehdr);
memcpy(tmp, &phdr, sizeof(phdr));
tmp += sizeof(phdr);
/* Remove unwanted PT_NOTE program headers. */
i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
*elfsz = *elfsz - i;
memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
/* Modify e_phnum to reflect merged headers. */
ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
return 0;
}
/* Add memory chunks represented by program headers to vmcore list. Also update
* the new offset fields of exported program headers. */
static int __init process_ptload_program_headers_elf64(char *elfptr,
size_t elfsz,
struct list_head *vc_list)
{
int i;
Elf64_Ehdr *ehdr_ptr;
Elf64_Phdr *phdr_ptr;
loff_t vmcore_off;
struct vmcore *new;
ehdr_ptr = (Elf64_Ehdr *)elfptr;
phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
/* First program header is PT_NOTE header. */
vmcore_off = sizeof(Elf64_Ehdr) +
(ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
phdr_ptr->p_memsz; /* Note sections */
for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
if (phdr_ptr->p_type != PT_LOAD)
continue;
/* Add this contiguous chunk of memory to vmcore list.*/
new = get_new_element();
if (!new)
return -ENOMEM;
new->paddr = phdr_ptr->p_offset;
new->size = phdr_ptr->p_memsz;
list_add_tail(&new->list, vc_list);
/* Update the program header offset. */
phdr_ptr->p_offset = vmcore_off;
vmcore_off = vmcore_off + phdr_ptr->p_memsz;
}
return 0;
}
static int __init process_ptload_program_headers_elf32(char *elfptr,
size_t elfsz,
struct list_head *vc_list)
{
int i;
Elf32_Ehdr *ehdr_ptr;
Elf32_Phdr *phdr_ptr;
loff_t vmcore_off;
struct vmcore *new;
ehdr_ptr = (Elf32_Ehdr *)elfptr;
phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
/* First program header is PT_NOTE header. */
vmcore_off = sizeof(Elf32_Ehdr) +
(ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) +
phdr_ptr->p_memsz; /* Note sections */
for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
if (phdr_ptr->p_type != PT_LOAD)
continue;
/* Add this contiguous chunk of memory to vmcore list.*/
new = get_new_element();
if (!new)
return -ENOMEM;
new->paddr = phdr_ptr->p_offset;
new->size = phdr_ptr->p_memsz;
list_add_tail(&new->list, vc_list);
/* Update the program header offset */
phdr_ptr->p_offset = vmcore_off;
vmcore_off = vmcore_off + phdr_ptr->p_memsz;
}
return 0;
}
/* Sets offset fields of vmcore elements. */
static void __init set_vmcore_list_offsets_elf64(char *elfptr,
struct list_head *vc_list)
{
loff_t vmcore_off;
Elf64_Ehdr *ehdr_ptr;
struct vmcore *m;
ehdr_ptr = (Elf64_Ehdr *)elfptr;
/* Skip Elf header and program headers. */
vmcore_off = sizeof(Elf64_Ehdr) +
(ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
list_for_each_entry(m, vc_list, list) {
m->offset = vmcore_off;
vmcore_off += m->size;
}
}
/* Sets offset fields of vmcore elements. */
static void __init set_vmcore_list_offsets_elf32(char *elfptr,
struct list_head *vc_list)
{
loff_t vmcore_off;
Elf32_Ehdr *ehdr_ptr;
struct vmcore *m;
ehdr_ptr = (Elf32_Ehdr *)elfptr;
/* Skip Elf header and program headers. */
vmcore_off = sizeof(Elf32_Ehdr) +
(ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr);
list_for_each_entry(m, vc_list, list) {
m->offset = vmcore_off;
vmcore_off += m->size;
}
}
static int __init parse_crash_elf64_headers(void)
{
int rc=0;
Elf64_Ehdr ehdr;
u64 addr;
addr = elfcorehdr_addr;
/* Read Elf header */
rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
if (rc < 0)
return rc;
/* Do some basic Verification. */
if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
(ehdr.e_type != ET_CORE) ||
!vmcore_elf64_check_arch(&ehdr) ||
ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
ehdr.e_version != EV_CURRENT ||
ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
ehdr.e_phnum == 0) {
printk(KERN_WARNING "Warning: Core image elf header is not"
"sane\n");
return -EINVAL;
}
/* Read in all elf headers. */
elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
if (!elfcorebuf)
return -ENOMEM;
addr = elfcorehdr_addr;
rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
if (rc < 0) {
kfree(elfcorebuf);
return rc;
}
/* Merge all PT_NOTE headers into one. */
rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
if (rc) {
kfree(elfcorebuf);
return rc;
}
rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
&vmcore_list);
if (rc) {
kfree(elfcorebuf);
return rc;
}
set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
return 0;
}
static int __init parse_crash_elf32_headers(void)
{
int rc=0;
Elf32_Ehdr ehdr;
u64 addr;
addr = elfcorehdr_addr;
/* Read Elf header */
rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
if (rc < 0)
return rc;
/* Do some basic Verification. */
if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
(ehdr.e_type != ET_CORE) ||
!elf_check_arch(&ehdr) ||
ehdr.e_ident[EI_CLASS] != ELFCLASS32||
ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
ehdr.e_version != EV_CURRENT ||
ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
ehdr.e_phnum == 0) {
printk(KERN_WARNING "Warning: Core image elf header is not"
"sane\n");
return -EINVAL;
}
/* Read in all elf headers. */
elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
if (!elfcorebuf)
return -ENOMEM;
addr = elfcorehdr_addr;
rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
if (rc < 0) {
kfree(elfcorebuf);
return rc;
}
/* Merge all PT_NOTE headers into one. */
rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
if (rc) {
kfree(elfcorebuf);
return rc;
}
rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
&vmcore_list);
if (rc) {
kfree(elfcorebuf);
return rc;
}
set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
return 0;
}
static int __init parse_crash_elf_headers(void)
{
unsigned char e_ident[EI_NIDENT];
u64 addr;
int rc=0;
addr = elfcorehdr_addr;
rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
if (rc < 0)
return rc;
if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
printk(KERN_WARNING "Warning: Core image elf header"
" not found\n");
return -EINVAL;
}
if (e_ident[EI_CLASS] == ELFCLASS64) {
rc = parse_crash_elf64_headers();
if (rc)
return rc;
/* Determine vmcore size. */
vmcore_size = get_vmcore_size_elf64(elfcorebuf);
} else if (e_ident[EI_CLASS] == ELFCLASS32) {
rc = parse_crash_elf32_headers();
if (rc)
return rc;
/* Determine vmcore size. */
vmcore_size = get_vmcore_size_elf32(elfcorebuf);
} else {
printk(KERN_WARNING "Warning: Core image elf header is not"
" sane\n");
return -EINVAL;
}
return 0;
}
/* Init function for vmcore module. */
static int __init vmcore_init(void)
{
int rc = 0;
/* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
if (!(is_vmcore_usable()))
return rc;
rc = parse_crash_elf_headers();
if (rc) {
printk(KERN_WARNING "Kdump: vmcore not initialized\n");
return rc;
}
proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
if (proc_vmcore)
proc_vmcore->size = vmcore_size;
return 0;
}
module_init(vmcore_init)
| gpl-2.0 |
lnfamous/Kernel_Htc_Pico_CyanogenMod9 | drivers/scsi/in2000.c | 3038 | 73554 | /*
* in2000.c - Linux device driver for the
* Always IN2000 ISA SCSI card.
*
* Copyright (c) 1996 John Shifflett, GeoLog Consulting
* john@geolog.com
* jshiffle@netcom.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* For the avoidance of doubt the "preferred form" of this code is one which
* is in an open non patent encumbered format. Where cryptographic key signing
* forms part of the process of creating an executable the information
* including keys needed to generate an equivalently functional executable
* are deemed to be part of the source code.
*
* Drew Eckhardt's excellent 'Generic NCR5380' sources provided
* much of the inspiration and some of the code for this driver.
* The Linux IN2000 driver distributed in the Linux kernels through
* version 1.2.13 was an extremely valuable reference on the arcane
* (and still mysterious) workings of the IN2000's fifo. It also
* is where I lifted in2000_biosparam(), the gist of the card
* detection scheme, and other bits of code. Many thanks to the
* talented and courageous people who wrote, contributed to, and
* maintained that driver (including Brad McLean, Shaun Savage,
* Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey,
* Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric
* Youngdale). I should also mention the driver written by
* Hamish Macdonald for the (GASP!) Amiga A2091 card, included
* in the Linux-m68k distribution; it gave me a good initial
* understanding of the proper way to run a WD33c93 chip, and I
* ended up stealing lots of code from it.
*
* _This_ driver is (I feel) an improvement over the old one in
* several respects:
* - All problems relating to the data size of a SCSI request are
* gone (as far as I know). The old driver couldn't handle
* swapping to partitions because that involved 4k blocks, nor
* could it deal with the st.c tape driver unmodified, because
* that usually involved 4k - 32k blocks. The old driver never
* quite got away from a morbid dependence on 2k block sizes -
* which of course is the size of the card's fifo.
*
* - Target Disconnection/Reconnection is now supported. Any
* system with more than one device active on the SCSI bus
* will benefit from this. The driver defaults to what I'm
* calling 'adaptive disconnect' - meaning that each command
* is evaluated individually as to whether or not it should
* be run with the option to disconnect/reselect (if the
* device chooses), or as a "SCSI-bus-hog".
*
* - Synchronous data transfers are now supported. Because there
* are a few devices (and many improperly terminated systems)
* that choke when doing sync, the default is sync DISABLED
* for all devices. This faster protocol can (and should!)
* be enabled on selected devices via the command-line.
*
* - Runtime operating parameters can now be specified through
* either the LILO or the 'insmod' command line. For LILO do:
* "in2000=blah,blah,blah"
* and with insmod go like:
* "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah"
* The defaults should be good for most people. See the comment
* for 'setup_strings' below for more details.
*
* - The old driver relied exclusively on what the Western Digital
* docs call "Combination Level 2 Commands", which are a great
* idea in that the CPU is relieved of a lot of interrupt
* overhead. However, by accepting a certain (user-settable)
* amount of additional interrupts, this driver achieves
* better control over the SCSI bus, and data transfers are
* almost as fast while being much easier to define, track,
* and debug.
*
* - You can force detection of a card whose BIOS has been disabled.
*
* - Multiple IN2000 cards might almost be supported. I've tried to
* keep it in mind, but have no way to test...
*
*
* TODO:
* tagged queuing. multiple cards.
*
*
* NOTE:
* When using this or any other SCSI driver as a module, you'll
* find that with the stock kernel, at most _two_ SCSI hard
* drives will be linked into the device list (ie, usable).
* If your IN2000 card has more than 2 disks on its bus, you
* might want to change the define of 'SD_EXTRA_DEVS' in the
* 'hosts.h' file from 2 to whatever is appropriate. It took
* me a while to track down this surprisingly obscure and
* undocumented little "feature".
*
*
* People with bug reports, wish-lists, complaints, comments,
* or improvements are asked to pah-leeez email me (John Shifflett)
* at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
* this thing into as good a shape as possible, and I'm positive
* there are lots of lurking bugs and "Stupid Places".
*
* Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
* - Using new_eh handler
* - Hopefully got all the locking right again
* See "FIXME" notes for items that could do with more work
*/
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/ioport.h>
#include <linux/stat.h>
#include <asm/io.h>
#include <asm/system.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#define IN2000_VERSION "1.33-2.5"
#define IN2000_DATE "2002/11/03"
#include "in2000.h"
/*
* 'setup_strings' is a single string used to pass operating parameters and
* settings from the kernel/module command-line to the driver. 'setup_args[]'
* is an array of strings that define the compile-time default values for
* these settings. If Linux boots with a LILO or insmod command-line, those
* settings are combined with 'setup_args[]'. Note that LILO command-lines
* are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix.
* The driver recognizes the following keywords (lower case required) and
* arguments:
*
* - ioport:addr -Where addr is IO address of a (usually ROM-less) card.
* - noreset -No optional args. Prevents SCSI bus reset at boot time.
* - nosync:x -x is a bitmask where the 1st 7 bits correspond with
* the 7 possible SCSI devices (bit 0 for device #0, etc).
* Set a bit to PREVENT sync negotiation on that device.
* The driver default is sync DISABLED on all devices.
* - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer
* period. Default is 500; acceptable values are 250 - 1000.
* - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them.
* x = 1 does 'adaptive' disconnects, which is the default
* and generally the best choice.
* - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes
* various types of debug output to printed - see the DB_xxx
* defines in in2000.h
* - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that
* determines how the /proc interface works and what it
* does - see the PR_xxx defines in in2000.h
*
* Syntax Notes:
* - Numeric arguments can be decimal or the '0x' form of hex notation. There
* _must_ be a colon between a keyword and its numeric argument, with no
* spaces.
* - Keywords are separated by commas, no spaces, in the standard kernel
* command-line manner.
* - A keyword in the 'nth' comma-separated command-line member will overwrite
* the 'nth' element of setup_args[]. A blank command-line member (in
* other words, a comma with no preceding keyword) will _not_ overwrite
* the corresponding setup_args[] element.
*
* A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'):
* - in2000=ioport:0x220,noreset
* - in2000=period:250,disconnect:2,nosync:0x03
* - in2000=debug:0x1e
* - in2000=proc:3
*/
/* Normally, no defaults are specified... */
static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
/* filled in by 'insmod' */
static char *setup_strings;
module_param(setup_strings, charp, 0);
static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
{
write1_io(reg_num, IO_WD_ADDR);
return read1_io(IO_WD_DATA);
}
#define READ_AUX_STAT() read1_io(IO_WD_ASR)
static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value)
{
write1_io(reg_num, IO_WD_ADDR);
write1_io(value, IO_WD_DATA);
}
static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd)
{
/* while (READ_AUX_STAT() & ASR_CIP)
printk("|");*/
write1_io(WD_COMMAND, IO_WD_ADDR);
write1_io(cmd, IO_WD_DATA);
}
static uchar read_1_byte(struct IN2000_hostdata *hostdata)
{
uchar asr, x = 0;
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80);
do {
asr = READ_AUX_STAT();
if (asr & ASR_DBR)
x = read_3393(hostdata, WD_DATA);
} while (!(asr & ASR_INT));
return x;
}
static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value)
{
write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
write1_io((value >> 16), IO_WD_DATA);
write1_io((value >> 8), IO_WD_DATA);
write1_io(value, IO_WD_DATA);
}
static unsigned long read_3393_count(struct IN2000_hostdata *hostdata)
{
unsigned long value;
write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
value = read1_io(IO_WD_DATA) << 16;
value |= read1_io(IO_WD_DATA) << 8;
value |= read1_io(IO_WD_DATA);
return value;
}
/* The 33c93 needs to be told which direction a command transfers its
* data; we use this function to figure it out. Returns true if there
* will be a DATA_OUT phase with this command, false otherwise.
* (Thanks to Joerg Dorchain for the research and suggestion.)
*/
static int is_dir_out(Scsi_Cmnd * cmd)
{
switch (cmd->cmnd[0]) {
case WRITE_6:
case WRITE_10:
case WRITE_12:
case WRITE_LONG:
case WRITE_SAME:
case WRITE_BUFFER:
case WRITE_VERIFY:
case WRITE_VERIFY_12:
case COMPARE:
case COPY:
case COPY_VERIFY:
case SEARCH_EQUAL:
case SEARCH_HIGH:
case SEARCH_LOW:
case SEARCH_EQUAL_12:
case SEARCH_HIGH_12:
case SEARCH_LOW_12:
case FORMAT_UNIT:
case REASSIGN_BLOCKS:
case RESERVE:
case MODE_SELECT:
case MODE_SELECT_10:
case LOG_SELECT:
case SEND_DIAGNOSTIC:
case CHANGE_DEFINITION:
case UPDATE_BLOCK:
case SET_WINDOW:
case MEDIUM_SCAN:
case SEND_VOLUME_TAG:
case 0xea:
return 1;
default:
return 0;
}
}
static struct sx_period sx_table[] = {
{1, 0x20},
{252, 0x20},
{376, 0x30},
{500, 0x40},
{624, 0x50},
{752, 0x60},
{876, 0x70},
{1000, 0x00},
{0, 0}
};
static int round_period(unsigned int period)
{
int x;
for (x = 1; sx_table[x].period_ns; x++) {
if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) {
return x;
}
}
return 7;
}
static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
{
uchar result;
period *= 4; /* convert SDTR code to ns */
result = sx_table[round_period(period)].reg_value;
result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF;
return result;
}
static void in2000_execute(struct Scsi_Host *instance);
static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
Scsi_Cmnd *tmp;
instance = cmd->device->host;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0]))
/* Set up a few fields in the Scsi_Cmnd structure for our own use:
* - host_scribble is the pointer to the next cmd in the input queue
* - scsi_done points to the routine we call when a cmd is finished
* - result is what you'd expect
*/
cmd->host_scribble = NULL;
cmd->scsi_done = done;
cmd->result = 0;
/* We use the Scsi_Pointer structure that's included with each command
* as a scratchpad (as it's intended to be used!). The handy thing about
* the SCp.xxx fields is that they're always associated with a given
* cmd, and are preserved across disconnect-reselect. This means we
* can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
* if we keep all the critical pointers and counters in SCp:
* - SCp.ptr is the pointer into the RAM buffer
* - SCp.this_residual is the size of that buffer
* - SCp.buffer points to the current scatter-gather buffer
* - SCp.buffers_residual tells us how many S.G. buffers there are
* - SCp.have_data_in helps keep track of >2048 byte transfers
* - SCp.sent_command is not used
* - SCp.phase records this command's SRCID_ER bit setting
*/
if (scsi_bufflen(cmd)) {
cmd->SCp.buffer = scsi_sglist(cmd);
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
cmd->SCp.ptr = NULL;
cmd->SCp.this_residual = 0;
}
cmd->SCp.have_data_in = 0;
/* We don't set SCp.phase here - that's done in in2000_execute() */
/* WD docs state that at the conclusion of a "LEVEL2" command, the
* status byte can be retrieved from the LUN register. Apparently,
* this is the case only for *uninterrupted* LEVEL2 commands! If
* there are any unexpected phases entered, even if they are 100%
* legal (different devices may choose to do things differently),
* the LEVEL2 command sequence is exited. This often occurs prior
* to receiving the status byte, in which case the driver does a
* status phase interrupt and gets the status byte on its own.
* While such a command can then be "resumed" (ie restarted to
* finish up as a LEVEL2 command), the LUN register will NOT be
* a valid status byte at the command's conclusion, and we must
* use the byte obtained during the earlier interrupt. Here, we
* preset SCp.Status to an illegal value (0xff) so that when
* this command finally completes, we can tell where the actual
* status byte is stored.
*/
cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
/* We need to disable interrupts before messing with the input
* queue and calling in2000_execute().
*/
/*
* Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE
* commands are added to the head of the queue so that the desired
* sense data is not lost before REQUEST_SENSE executes.
*/
if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
cmd->host_scribble = (uchar *) hostdata->input_Q;
hostdata->input_Q = cmd;
} else { /* find the end of the queue */
for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble);
tmp->host_scribble = (uchar *) cmd;
}
/* We know that there's at least one command in 'input_Q' now.
* Go see if any of them are runnable!
*/
in2000_execute(cmd->device->host);
DB(DB_QUEUE_COMMAND, printk(")Q "))
return 0;
}
static DEF_SCSI_QCMD(in2000_queuecommand)
/*
* This routine attempts to start a scsi command. If the host_card is
* already connected, we give up immediately. Otherwise, look through
* the input_Q, using the first command we find that's intended
* for a currently non-busy target/lun.
* Note that this function is always called with interrupts already
* disabled (either from in2000_queuecommand() or in2000_intr()).
*/
static void in2000_execute(struct Scsi_Host *instance)
{
struct IN2000_hostdata *hostdata;
Scsi_Cmnd *cmd, *prev;
int i;
unsigned short *sp;
unsigned short f;
unsigned short flushbuf[16];
hostdata = (struct IN2000_hostdata *) instance->hostdata;
DB(DB_EXECUTE, printk("EX("))
if (hostdata->selecting || hostdata->connected) {
DB(DB_EXECUTE, printk(")EX-0 "))
return;
}
/*
* Search through the input_Q for a command destined
* for an idle target/lun.
*/
cmd = (Scsi_Cmnd *) hostdata->input_Q;
prev = NULL;
while (cmd) {
if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
break;
prev = cmd;
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
/* quit if queue empty or all possible targets are busy */
if (!cmd) {
DB(DB_EXECUTE, printk(")EX-1 "))
return;
}
/* remove command from queue */
if (prev)
prev->host_scribble = cmd->host_scribble;
else
hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble;
#ifdef PROC_STATISTICS
hostdata->cmd_cnt[cmd->device->id]++;
#endif
/*
* Start the selection process
*/
if (is_dir_out(cmd))
write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
else
write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
/* Now we need to figure out whether or not this command is a good
* candidate for disconnect/reselect. We guess to the best of our
* ability, based on a set of hierarchical rules. When several
* devices are operating simultaneously, disconnects are usually
* an advantage. In a single device system, or if only 1 device
* is being accessed, transfers usually go faster if disconnects
* are not allowed:
*
* + Commands should NEVER disconnect if hostdata->disconnect =
* DIS_NEVER (this holds for tape drives also), and ALWAYS
* disconnect if hostdata->disconnect = DIS_ALWAYS.
* + Tape drive commands should always be allowed to disconnect.
* + Disconnect should be allowed if disconnected_Q isn't empty.
* + Commands should NOT disconnect if input_Q is empty.
* + Disconnect should be allowed if there are commands in input_Q
* for a different target/lun. In this case, the other commands
* should be made disconnect-able, if not already.
*
* I know, I know - this code would flunk me out of any
* "C Programming 101" class ever offered. But it's easy
* to change around and experiment with for now.
*/
cmd->SCp.phase = 0; /* assume no disconnect */
if (hostdata->disconnect == DIS_NEVER)
goto no;
if (hostdata->disconnect == DIS_ALWAYS)
goto yes;
if (cmd->device->type == 1) /* tape drive? */
goto yes;
if (hostdata->disconnected_Q) /* other commands disconnected? */
goto yes;
if (!(hostdata->input_Q)) /* input_Q empty? */
goto no;
for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) {
if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) {
for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble)
prev->SCp.phase = 1;
goto yes;
}
}
goto no;
yes:
cmd->SCp.phase = 1;
#ifdef PROC_STATISTICS
hostdata->disc_allowed_cnt[cmd->device->id]++;
#endif
no:
write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun);
write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) {
/*
* Do a 'Select-With-ATN' command. This will end with
* one of the following interrupts:
* CSR_RESEL_AM: failure - can try again later.
* CSR_TIMEOUT: failure - give up.
* CSR_SELECT: success - proceed.
*/
hostdata->selecting = cmd;
/* Every target has its own synchronous transfer setting, kept in
* the sync_xfer array, and a corresponding status byte in sync_stat[].
* Each target's sync_stat[] entry is initialized to SS_UNSET, and its
* sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
* means that the parameters are undetermined as yet, and that we
* need to send an SDTR message to this device after selection is
* complete. We set SS_FIRST to tell the interrupt routine to do so,
* unless we don't want to even _try_ synchronous transfers: In this
* case we set SS_SET to make the defaults final.
*/
if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) {
if (hostdata->sync_off & (1 << cmd->device->id))
hostdata->sync_stat[cmd->device->id] = SS_SET;
else
hostdata->sync_stat[cmd->device->id] = SS_FIRST;
}
hostdata->state = S_SELECTING;
write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
write_3393_cmd(hostdata, WD_CMD_SEL_ATN);
}
else {
/*
* Do a 'Select-With-ATN-Xfer' command. This will end with
* one of the following interrupts:
* CSR_RESEL_AM: failure - can try again later.
* CSR_TIMEOUT: failure - give up.
* anything else: success - proceed.
*/
hostdata->connected = cmd;
write_3393(hostdata, WD_COMMAND_PHASE, 0);
/* copy command_descriptor_block into WD chip
* (take advantage of auto-incrementing)
*/
write1_io(WD_CDB_1, IO_WD_ADDR);
for (i = 0; i < cmd->cmd_len; i++)
write1_io(cmd->cmnd[i], IO_WD_DATA);
/* The wd33c93 only knows about Group 0, 1, and 5 commands when
* it's doing a 'select-and-transfer'. To be safe, we write the
* size of the CDB into the OWN_ID register for every case. This
* way there won't be problems with vendor-unique, audio, etc.
*/
write_3393(hostdata, WD_OWN_ID, cmd->cmd_len);
/* When doing a non-disconnect command, we can save ourselves a DATA
* phase interrupt later by setting everything up now. With writes we
* need to pre-fill the fifo; if there's room for the 32 flush bytes,
* put them in there too - that'll avoid a fifo interrupt. Reads are
* somewhat simpler.
* KLUDGE NOTE: It seems that you can't completely fill the fifo here:
* This results in the IO_FIFO_COUNT register rolling over to zero,
* and apparently the gate array logic sees this as empty, not full,
* so the 3393 chip is never signalled to start reading from the
* fifo. Or maybe it's seen as a permanent fifo interrupt condition.
* Regardless, we fix this by temporarily pretending that the fifo
* is 16 bytes smaller. (I see now that the old driver has a comment
* about "don't fill completely" in an analogous place - must be the
* same deal.) This results in CDROM, swap partitions, and tape drives
* needing an extra interrupt per write command - I think we can live
* with that!
*/
if (!(cmd->SCp.phase)) {
write_3393_count(hostdata, cmd->SCp.this_residual);
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */
if (is_dir_out(cmd)) {
hostdata->fifo = FI_FIFO_WRITING;
if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16))
i = IN2000_FIFO_SIZE - 16;
cmd->SCp.have_data_in = i; /* this much data in fifo */
i >>= 1; /* Gulp. Assuming modulo 2. */
sp = (unsigned short *) cmd->SCp.ptr;
f = hostdata->io_base + IO_FIFO;
#ifdef FAST_WRITE_IO
FAST_WRITE2_IO();
#else
while (i--)
write2_io(*sp++, IO_FIFO);
#endif
/* Is there room for the flush bytes? */
if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) {
sp = flushbuf;
i = 16;
#ifdef FAST_WRITE_IO
FAST_WRITE2_IO();
#else
while (i--)
write2_io(0, IO_FIFO);
#endif
}
}
else {
write1_io(0, IO_FIFO_READ); /* put fifo in read mode */
hostdata->fifo = FI_FIFO_READING;
cmd->SCp.have_data_in = 0; /* nothing transferred yet */
}
} else {
write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
}
hostdata->state = S_RUNNING_LEVEL2;
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
}
/*
* Since the SCSI bus can handle only 1 connection at a time,
* we get out of here now. If the selection fails, or when
* the command disconnects, we'll come back to this routine
* to search the input_Q again...
*/
DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
}
static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata)
{
uchar asr;
DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out"))
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write_3393_count(hostdata, cnt);
write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
if (data_in_dir) {
do {
asr = READ_AUX_STAT();
if (asr & ASR_DBR)
*buf++ = read_3393(hostdata, WD_DATA);
} while (!(asr & ASR_INT));
} else {
do {
asr = READ_AUX_STAT();
if (asr & ASR_DBR)
write_3393(hostdata, WD_DATA, *buf++);
} while (!(asr & ASR_INT));
}
/* Note: we are returning with the interrupt UN-cleared.
* Since (presumably) an entire I/O operation has
* completed, the bus phase is probably different, and
* the interrupt routine will discover this when it
* responds to the uncleared int.
*/
}
static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
{
struct IN2000_hostdata *hostdata;
unsigned short *sp;
unsigned short f;
int i;
hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata;
/* Normally, you'd expect 'this_residual' to be non-zero here.
* In a series of scatter-gather transfers, however, this
* routine will usually be called with 'this_residual' equal
* to 0 and 'buffers_residual' non-zero. This means that a
* previous transfer completed, clearing 'this_residual', and
* now we need to setup the next scatter-gather buffer as the
* source or destination for THIS transfer.
*/
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
}
/* Set up hardware registers */
write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
write_3393_count(hostdata, cmd->SCp.this_residual);
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
write1_io(0, IO_FIFO_WRITE); /* zero counter, assume write */
/* Reading is easy. Just issue the command and return - we'll
* get an interrupt later when we have actual data to worry about.
*/
if (data_in_dir) {
write1_io(0, IO_FIFO_READ);
if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
} else
write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
hostdata->fifo = FI_FIFO_READING;
cmd->SCp.have_data_in = 0;
return;
}
/* Writing is more involved - we'll start the WD chip and write as
* much data to the fifo as we can right now. Later interrupts will
* write any bytes that don't make it at this stage.
*/
if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
} else
write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
hostdata->fifo = FI_FIFO_WRITING;
sp = (unsigned short *) cmd->SCp.ptr;
if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE)
i = IN2000_FIFO_SIZE;
cmd->SCp.have_data_in = i;
i >>= 1; /* Gulp. We assume this_residual is modulo 2 */
f = hostdata->io_base + IO_FIFO;
#ifdef FAST_WRITE_IO
FAST_WRITE2_IO();
#else
while (i--)
write2_io(*sp++, IO_FIFO);
#endif
}
/* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this
* function in order to work in an SMP environment. (I'd be surprised
* if the driver is ever used by anyone on a real multi-CPU motherboard,
* but it _does_ need to be able to compile and run in an SMP kernel.)
*/
static irqreturn_t in2000_intr(int irqnum, void *dev_id)
{
struct Scsi_Host *instance = dev_id;
struct IN2000_hostdata *hostdata;
Scsi_Cmnd *patch, *cmd;
uchar asr, sr, phs, id, lun, *ucp, msg;
int i, j;
unsigned long length;
unsigned short *sp;
unsigned short f;
unsigned long flags;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
/* Get the spin_lock and disable further ints, for SMP */
spin_lock_irqsave(instance->host_lock, flags);
#ifdef PROC_STATISTICS
hostdata->int_cnt++;
#endif
/* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the
* WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined
* with a big logic array, so it's a little different than what you might
* expect). As far as I know, there's no reason that BOTH can't be active
* at the same time, but there's a problem: while we can read the 3393
* to tell if _it_ wants an interrupt, I don't know of a way to ask the
* fifo the same question. The best we can do is check the 3393 and if
* it _isn't_ the source of the interrupt, then we can be pretty sure
* that the fifo is the culprit.
* UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the
* IO_FIFO_COUNT register mirrors the fifo interrupt state. I
* assume that bit clear means interrupt active. As it turns
* out, the driver really doesn't need to check for this after
* all, so my remarks above about a 'problem' can safely be
* ignored. The way the logic is set up, there's no advantage
* (that I can see) to worrying about it.
*
* It seems that the fifo interrupt signal is negated when we extract
* bytes during read or write bytes during write.
* - fifo will interrupt when data is moving from it to the 3393, and
* there are 31 (or less?) bytes left to go. This is sort of short-
* sighted: what if you don't WANT to do more? In any case, our
* response is to push more into the fifo - either actual data or
* dummy bytes if need be. Note that we apparently have to write at
* least 32 additional bytes to the fifo after an interrupt in order
* to get it to release the ones it was holding on to - writing fewer
* than 32 will result in another fifo int.
* UPDATE: Again, info from Bill Earnest makes this more understandable:
* 32 bytes = two counts of the fifo counter register. He tells
* me that the fifo interrupt is a non-latching signal derived
* from a straightforward boolean interpretation of the 7
* highest bits of the fifo counter and the fifo-read/fifo-write
* state. Who'd a thought?
*/
write1_io(0, IO_LED_ON);
asr = READ_AUX_STAT();
if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */
/* Ok. This is definitely a FIFO-only interrupt.
*
* If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read,
* maybe more to come from the SCSI bus. Read as many as we can out of the
* fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and
* update have_data_in afterwards.
*
* If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move
* into the WD3393 chip (I think the interrupt happens when there are 31
* bytes left, but it may be fewer...). The 3393 is still waiting, so we
* shove some more into the fifo, which gets things moving again. If the
* original SCSI command specified more than 2048 bytes, there may still
* be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]).
* Don't forget to update have_data_in. If we've already written out the
* entire buffer, feed 32 dummy bytes to the fifo - they're needed to
* push out the remaining real data.
* (Big thanks to Bill Earnest for getting me out of the mud in here.)
*/
cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
CHECK_NULL(cmd, "fifo_int")
if (hostdata->fifo == FI_FIFO_READING) {
DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT)))
sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
i = read1_io(IO_FIFO_COUNT) & 0xfe;
i <<= 2; /* # of words waiting in the fifo */
f = hostdata->io_base + IO_FIFO;
#ifdef FAST_READ_IO
FAST_READ2_IO();
#else
while (i--)
*sp++ = read2_io(IO_FIFO);
#endif
i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
i <<= 1;
cmd->SCp.have_data_in += i;
}
else if (hostdata->fifo == FI_FIFO_WRITING) {
DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT)))
/* If all bytes have been written to the fifo, flush out the stragglers.
* Note that while writing 16 dummy words seems arbitrary, we don't
* have another choice that I can see. What we really want is to read
* the 3393 transfer count register (that would tell us how many bytes
* needed flushing), but the TRANSFER_INFO command hasn't completed
* yet (not enough bytes!) and that register won't be accessible. So,
* we use 16 words - a number obtained through trial and error.
* UPDATE: Bill says this is exactly what Always does, so there.
* More thanks due him for help in this section.
*/
if (cmd->SCp.this_residual == cmd->SCp.have_data_in) {
i = 16;
while (i--) /* write 32 dummy bytes */
write2_io(0, IO_FIFO);
}
/* If there are still bytes left in the SCSI buffer, write as many as we
* can out to the fifo.
*/
else {
sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */
j = read1_io(IO_FIFO_COUNT) & 0xfe;
j <<= 2; /* how many words the fifo has room for */
if ((j << 1) > i)
j = (i >> 1);
while (j--)
write2_io(*sp++, IO_FIFO);
i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
i <<= 1;
cmd->SCp.have_data_in += i;
}
}
else {
printk("*** Spurious FIFO interrupt ***");
}
write1_io(0, IO_LED_OFF);
/* release the SMP spin_lock and restore irq state */
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
/* This interrupt was triggered by the WD33c93 chip. The fifo interrupt
* may also be asserted, but we don't bother to check it: we get more
* detailed info from FIFO_READING and FIFO_WRITING (see below).
*/
cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear the interrupt */
phs = read_3393(hostdata, WD_COMMAND_PHASE);
if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) {
printk("\nNR:wd-intr-1\n");
write1_io(0, IO_LED_OFF);
/* release the SMP spin_lock and restore irq state */
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
DB(DB_INTR, printk("{%02x:%02x-", asr, sr))
/* After starting a FIFO-based transfer, the next _WD3393_ interrupt is
* guaranteed to be in response to the completion of the transfer.
* If we were reading, there's probably data in the fifo that needs
* to be copied into RAM - do that here. Also, we have to update
* 'this_residual' and 'ptr' based on the contents of the
* TRANSFER_COUNT register, in case the device decided to do an
* intermediate disconnect (a device may do this if it has to
* do a seek, or just to be nice and let other devices have
* some bus time during long transfers).
* After doing whatever is necessary with the fifo, we go on and
* service the WD3393 interrupt normally.
*/
if (hostdata->fifo == FI_FIFO_READING) {
/* buffer index = start-of-buffer + #-of-bytes-already-read */
sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
/* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */
i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in;
i >>= 1; /* Gulp. We assume this will always be modulo 2 */
f = hostdata->io_base + IO_FIFO;
#ifdef FAST_READ_IO
FAST_READ2_IO();
#else
while (i--)
*sp++ = read2_io(IO_FIFO);
#endif
hostdata->fifo = FI_FIFO_UNUSED;
length = cmd->SCp.this_residual;
cmd->SCp.this_residual = read_3393_count(hostdata);
cmd->SCp.ptr += (length - cmd->SCp.this_residual);
DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
}
else if (hostdata->fifo == FI_FIFO_WRITING) {
hostdata->fifo = FI_FIFO_UNUSED;
length = cmd->SCp.this_residual;
cmd->SCp.this_residual = read_3393_count(hostdata);
cmd->SCp.ptr += (length - cmd->SCp.this_residual);
DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
}
/* Respond to the specific WD3393 interrupt - there are quite a few! */
switch (sr) {
case CSR_TIMEOUT:
DB(DB_INTR, printk("TIMEOUT"))
if (hostdata->state == S_RUNNING_LEVEL2)
hostdata->connected = NULL;
else {
cmd = (Scsi_Cmnd *) hostdata->selecting; /* get a valid cmd */
CHECK_NULL(cmd, "csr_timeout")
hostdata->selecting = NULL;
}
cmd->result = DID_NO_CONNECT << 16;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->state = S_UNCONNECTED;
cmd->scsi_done(cmd);
/* We are not connected to a target - check to see if there
* are commands waiting to be executed.
*/
in2000_execute(instance);
break;
/* Note: this interrupt should not occur in a LEVEL2 command */
case CSR_SELECT:
DB(DB_INTR, printk("SELECT"))
hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting;
CHECK_NULL(cmd, "csr_select")
hostdata->selecting = NULL;
/* construct an IDENTIFY message with correct disconnect bit */
hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun);
if (cmd->SCp.phase)
hostdata->outgoing_msg[0] |= 0x40;
if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
#ifdef SYNC_DEBUG
printk(" sending SDTR ");
#endif
hostdata->sync_stat[cmd->device->id] = SS_WAITING;
/* tack on a 2nd message to ask about synchronous transfers */
hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
hostdata->outgoing_msg[2] = 3;
hostdata->outgoing_msg[3] = EXTENDED_SDTR;
hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4;
hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF;
hostdata->outgoing_len = 6;
} else
hostdata->outgoing_len = 1;
hostdata->state = S_CONNECTED;
break;
case CSR_XFER_DONE | PHS_DATA_IN:
case CSR_UNEXP | PHS_DATA_IN:
case CSR_SRV_REQ | PHS_DATA_IN:
DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
transfer_bytes(cmd, DATA_IN_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
break;
case CSR_XFER_DONE | PHS_DATA_OUT:
case CSR_UNEXP | PHS_DATA_OUT:
case CSR_SRV_REQ | PHS_DATA_OUT:
DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
transfer_bytes(cmd, DATA_OUT_DIR);
if (hostdata->state != S_RUNNING_LEVEL2)
hostdata->state = S_CONNECTED;
break;
/* Note: this interrupt should not occur in a LEVEL2 command */
case CSR_XFER_DONE | PHS_COMMAND:
case CSR_UNEXP | PHS_COMMAND:
case CSR_SRV_REQ | PHS_COMMAND:
DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
hostdata->state = S_CONNECTED;
break;
case CSR_XFER_DONE | PHS_STATUS:
case CSR_UNEXP | PHS_STATUS:
case CSR_SRV_REQ | PHS_STATUS:
DB(DB_INTR, printk("STATUS="))
cmd->SCp.Status = read_1_byte(hostdata);
DB(DB_INTR, printk("%02x", cmd->SCp.Status))
if (hostdata->level2 >= L2_BASIC) {
sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
hostdata->state = S_RUNNING_LEVEL2;
write_3393(hostdata, WD_COMMAND_PHASE, 0x50);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
} else {
hostdata->state = S_CONNECTED;
}
break;
case CSR_XFER_DONE | PHS_MESS_IN:
case CSR_UNEXP | PHS_MESS_IN:
case CSR_SRV_REQ | PHS_MESS_IN:
DB(DB_INTR, printk("MSG_IN="))
msg = read_1_byte(hostdata);
sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
msg = EXTENDED_MESSAGE;
else
hostdata->incoming_ptr = 0;
cmd->SCp.Message = msg;
switch (msg) {
case COMMAND_COMPLETE:
DB(DB_INTR, printk("CCMP"))
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_PRE_CMP_DISC;
break;
case SAVE_POINTERS:
DB(DB_INTR, printk("SDP"))
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
case RESTORE_POINTERS:
DB(DB_INTR, printk("RDP"))
if (hostdata->level2 >= L2_BASIC) {
write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
} else {
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
}
break;
case DISCONNECT:
DB(DB_INTR, printk("DIS"))
cmd->device->disconnect = 1;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_PRE_TMP_DISC;
break;
case MESSAGE_REJECT:
DB(DB_INTR, printk("REJ"))
#ifdef SYNC_DEBUG
printk("-REJ-");
#endif
if (hostdata->sync_stat[cmd->device->id] == SS_WAITING)
hostdata->sync_stat[cmd->device->id] = SS_SET;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
case EXTENDED_MESSAGE:
DB(DB_INTR, printk("EXT"))
ucp = hostdata->incoming_msg;
#ifdef SYNC_DEBUG
printk("%02x", ucp[hostdata->incoming_ptr]);
#endif
/* Is this the last byte of the extended message? */
if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) {
switch (ucp[2]) { /* what's the EXTENDED code? */
case EXTENDED_SDTR:
id = calc_sync_xfer(ucp[3], ucp[4]);
if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) {
/* A device has sent an unsolicited SDTR message; rather than go
* through the effort of decoding it and then figuring out what
* our reply should be, we're just gonna say that we have a
* synchronous fifo depth of 0. This will result in asynchronous
* transfers - not ideal but so much easier.
* Actually, this is OK because it assures us that if we don't
* specifically ask for sync transfers, we won't do any.
*/
write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
hostdata->outgoing_msg[1] = 3;
hostdata->outgoing_msg[2] = EXTENDED_SDTR;
hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4;
hostdata->outgoing_msg[4] = 0;
hostdata->outgoing_len = 5;
hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0);
} else {
hostdata->sync_xfer[cmd->device->id] = id;
}
#ifdef SYNC_DEBUG
printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]);
#endif
hostdata->sync_stat[cmd->device->id] = SS_SET;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
case EXTENDED_WDTR:
write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
printk("sending WDTR ");
hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
hostdata->outgoing_msg[1] = 2;
hostdata->outgoing_msg[2] = EXTENDED_WDTR;
hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */
hostdata->outgoing_len = 4;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
default:
write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]);
hostdata->outgoing_msg[0] = MESSAGE_REJECT;
hostdata->outgoing_len = 1;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
break;
}
hostdata->incoming_ptr = 0;
}
/* We need to read more MESS_IN bytes for the extended message */
else {
hostdata->incoming_ptr++;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
}
break;
default:
printk("Rejecting Unknown Message(%02x) ", msg);
write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
hostdata->outgoing_msg[0] = MESSAGE_REJECT;
hostdata->outgoing_len = 1;
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
hostdata->state = S_CONNECTED;
}
break;
/* Note: this interrupt will occur only after a LEVEL2 command */
case CSR_SEL_XFER_DONE:
/* Make sure that reselection is enabled at this point - it may
* have been turned off for the command that just completed.
*/
write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
if (phs == 0x60) {
DB(DB_INTR, printk("SX-DONE"))
cmd->SCp.Message = COMMAND_COMPLETE;
lun = read_3393(hostdata, WD_TARGET_LUN);
DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->state = S_UNCONNECTED;
if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
cmd->SCp.Status = lun;
if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
else
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
cmd->scsi_done(cmd);
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
*/
in2000_execute(instance);
} else {
printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs);
}
break;
/* Note: this interrupt will occur only after a LEVEL2 command */
case CSR_SDP:
DB(DB_INTR, printk("SDP"))
hostdata->state = S_RUNNING_LEVEL2;
write_3393(hostdata, WD_COMMAND_PHASE, 0x41);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
break;
case CSR_XFER_DONE | PHS_MESS_OUT:
case CSR_UNEXP | PHS_MESS_OUT:
case CSR_SRV_REQ | PHS_MESS_OUT:
DB(DB_INTR, printk("MSG_OUT="))
/* To get here, we've probably requested MESSAGE_OUT and have
* already put the correct bytes in outgoing_msg[] and filled
* in outgoing_len. We simply send them out to the SCSI bus.
* Sometimes we get MESSAGE_OUT phase when we're not expecting
* it - like when our SDTR message is rejected by a target. Some
* targets send the REJECT before receiving all of the extended
* message, and then seem to go back to MESSAGE_OUT for a byte
* or two. Not sure why, or if I'm doing something wrong to
* cause this to happen. Regardless, it seems that sending
* NOP messages in these situations results in no harm and
* makes everyone happy.
*/
if (hostdata->outgoing_len == 0) {
hostdata->outgoing_len = 1;
hostdata->outgoing_msg[0] = NOP;
}
transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata);
DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0]))
hostdata->outgoing_len = 0;
hostdata->state = S_CONNECTED;
break;
case CSR_UNEXP_DISC:
/* I think I've seen this after a request-sense that was in response
* to an error condition, but not sure. We certainly need to do
* something when we get this interrupt - the question is 'what?'.
* Let's think positively, and assume some command has finished
* in a legal manner (like a command that provokes a request-sense),
* so we treat it as a normal command-complete-disconnect.
*/
/* Make sure that reselection is enabled at this point - it may
* have been turned off for the command that just completed.
*/
write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
if (cmd == NULL) {
printk(" - Already disconnected! ");
hostdata->state = S_UNCONNECTED;
/* release the SMP spin_lock and restore irq state */
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
DB(DB_INTR, printk("UNEXP_DISC"))
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->state = S_UNCONNECTED;
if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
else
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
cmd->scsi_done(cmd);
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
*/
in2000_execute(instance);
break;
case CSR_DISC:
/* Make sure that reselection is enabled at this point - it may
* have been turned off for the command that just completed.
*/
write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
DB(DB_INTR, printk("DISC"))
if (cmd == NULL) {
printk(" - Already disconnected! ");
hostdata->state = S_UNCONNECTED;
}
switch (hostdata->state) {
case S_PRE_CMP_DISC:
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->state = S_UNCONNECTED;
DB(DB_INTR, printk(":%d", cmd->SCp.Status))
if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
else
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
cmd->scsi_done(cmd);
break;
case S_PRE_TMP_DISC:
case S_RUNNING_LEVEL2:
cmd->host_scribble = (uchar *) hostdata->disconnected_Q;
hostdata->disconnected_Q = cmd;
hostdata->connected = NULL;
hostdata->state = S_UNCONNECTED;
#ifdef PROC_STATISTICS
hostdata->disc_done_cnt[cmd->device->id]++;
#endif
break;
default:
printk("*** Unexpected DISCONNECT interrupt! ***");
hostdata->state = S_UNCONNECTED;
}
/* We are no longer connected to a target - check to see if
* there are commands waiting to be executed.
*/
in2000_execute(instance);
break;
case CSR_RESEL_AM:
DB(DB_INTR, printk("RESEL"))
/* First we have to make sure this reselection didn't */
/* happen during Arbitration/Selection of some other device. */
/* If yes, put losing command back on top of input_Q. */
if (hostdata->level2 <= L2_NONE) {
if (hostdata->selecting) {
cmd = (Scsi_Cmnd *) hostdata->selecting;
hostdata->selecting = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
cmd->host_scribble = (uchar *) hostdata->input_Q;
hostdata->input_Q = cmd;
}
}
else {
if (cmd) {
if (phs == 0x00) {
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
cmd->host_scribble = (uchar *) hostdata->input_Q;
hostdata->input_Q = cmd;
} else {
printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs);
while (1)
printk("\r");
}
}
}
/* OK - find out which device reselected us. */
id = read_3393(hostdata, WD_SOURCE_ID);
id &= SRCID_MASK;
/* and extract the lun from the ID message. (Note that we don't
* bother to check for a valid message here - I guess this is
* not the right way to go, but....)
*/
lun = read_3393(hostdata, WD_DATA);
if (hostdata->level2 < L2_RESELECT)
write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
lun &= 7;
/* Now we look for the command that's reconnecting. */
cmd = (Scsi_Cmnd *) hostdata->disconnected_Q;
patch = NULL;
while (cmd) {
if (id == cmd->device->id && lun == cmd->device->lun)
break;
patch = cmd;
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
/* Hmm. Couldn't find a valid command.... What to do? */
if (!cmd) {
printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun);
break;
}
/* Ok, found the command - now start it up again. */
if (patch)
patch->host_scribble = cmd->host_scribble;
else
hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble;
hostdata->connected = cmd;
/* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
* because these things are preserved over a disconnect.
* But we DO need to fix the DPD bit so it's correct for this command.
*/
if (is_dir_out(cmd))
write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
else
write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
if (hostdata->level2 >= L2_RESELECT) {
write_3393_count(hostdata, 0); /* we want a DATA_PHASE interrupt */
write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
hostdata->state = S_RUNNING_LEVEL2;
} else
hostdata->state = S_CONNECTED;
break;
default:
printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs);
}
write1_io(0, IO_LED_OFF);
DB(DB_INTR, printk("} "))
/* release the SMP spin_lock and restore irq state */
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
#define RESET_CARD 0
#define RESET_CARD_AND_BUS 1
#define B_FLAG 0x80
/*
* Caller must hold instance lock!
*/
static int reset_hardware(struct Scsi_Host *instance, int type)
{
struct IN2000_hostdata *hostdata;
int qt, x;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
write1_io(0, IO_LED_ON);
if (type == RESET_CARD_AND_BUS) {
write1_io(0, IO_CARD_RESET);
x = read1_io(IO_HARDWARE);
}
x = read_3393(hostdata, WD_SCSI_STATUS); /* clear any WD intrpt */
write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8);
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF));
write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
write_3393(hostdata, WD_COMMAND, WD_CMD_RESET);
/* FIXME: timeout ?? */
while (!(READ_AUX_STAT() & ASR_INT))
cpu_relax(); /* wait for RESET to complete */
x = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
write_3393(hostdata, WD_QUEUE_TAG, 0xa5); /* any random number */
qt = read_3393(hostdata, WD_QUEUE_TAG);
if (qt == 0xa5) {
x |= B_FLAG;
write_3393(hostdata, WD_QUEUE_TAG, 0);
}
write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write1_io(0, IO_LED_OFF);
return x;
}
static int in2000_bus_reset(Scsi_Cmnd * cmd)
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
int x;
unsigned long flags;
instance = cmd->device->host;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no);
spin_lock_irqsave(instance->host_lock, flags);
/* do scsi-reset here */
reset_hardware(instance, RESET_CARD_AND_BUS);
for (x = 0; x < 8; x++) {
hostdata->busy[x] = 0;
hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
}
hostdata->input_Q = NULL;
hostdata->selecting = NULL;
hostdata->connected = NULL;
hostdata->disconnected_Q = NULL;
hostdata->state = S_UNCONNECTED;
hostdata->fifo = FI_FIFO_UNUSED;
hostdata->incoming_ptr = 0;
hostdata->outgoing_len = 0;
cmd->result = DID_RESET << 16;
spin_unlock_irqrestore(instance->host_lock, flags);
return SUCCESS;
}
static int __in2000_abort(Scsi_Cmnd * cmd)
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
Scsi_Cmnd *tmp, *prev;
uchar sr, asr;
unsigned long timeout;
instance = cmd->device->host;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no);
printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT));
/*
* Case 1 : If the command hasn't been issued yet, we simply remove it
* from the inout_Q.
*/
tmp = (Scsi_Cmnd *) hostdata->input_Q;
prev = NULL;
while (tmp) {
if (tmp == cmd) {
if (prev)
prev->host_scribble = cmd->host_scribble;
cmd->host_scribble = NULL;
cmd->result = DID_ABORT << 16;
printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no);
cmd->scsi_done(cmd);
return SUCCESS;
}
prev = tmp;
tmp = (Scsi_Cmnd *) tmp->host_scribble;
}
/*
* Case 2 : If the command is connected, we're going to fail the abort
* and let the high level SCSI driver retry at a later time or
* issue a reset.
*
* Timeouts, and therefore aborted commands, will be highly unlikely
* and handling them cleanly in this situation would make the common
* case of noresets less efficient, and would pollute our code. So,
* we fail.
*/
if (hostdata->connected == cmd) {
printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no);
printk("sending wd33c93 ABORT command - ");
write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
write_3393_cmd(hostdata, WD_CMD_ABORT);
/* Now we have to attempt to flush out the FIFO... */
printk("flushing fifo - ");
timeout = 1000000;
do {
asr = READ_AUX_STAT();
if (asr & ASR_DBR)
read_3393(hostdata, WD_DATA);
} while (!(asr & ASR_INT) && timeout-- > 0);
sr = read_3393(hostdata, WD_SCSI_STATUS);
printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout);
/*
* Abort command processed.
* Still connected.
* We must disconnect.
*/
printk("sending wd33c93 DISCONNECT command - ");
write_3393_cmd(hostdata, WD_CMD_DISCONNECT);
timeout = 1000000;
asr = READ_AUX_STAT();
while ((asr & ASR_CIP) && timeout-- > 0)
asr = READ_AUX_STAT();
sr = read_3393(hostdata, WD_SCSI_STATUS);
printk("asr=%02x, sr=%02x.", asr, sr);
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->connected = NULL;
hostdata->state = S_UNCONNECTED;
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd);
in2000_execute(instance);
return SUCCESS;
}
/*
* Case 3: If the command is currently disconnected from the bus,
* we're not going to expend much effort here: Let's just return
* an ABORT_SNOOZE and hope for the best...
*/
for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
if (cmd == tmp) {
printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no);
return FAILED;
}
/*
* Case 4 : If we reached this point, the command was not found in any of
* the queues.
*
* We probably reached this point because of an unlikely race condition
* between the command completing successfully and the abortion code,
* so we won't panic, but we will notify the user in case something really
* broke.
*/
in2000_execute(instance);
printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no);
return SUCCESS;
}
static int in2000_abort(Scsi_Cmnd * cmd)
{
int rc;
spin_lock_irq(cmd->device->host->host_lock);
rc = __in2000_abort(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
return rc;
}
#define MAX_IN2000_HOSTS 3
#define MAX_SETUP_ARGS ARRAY_SIZE(setup_args)
#define SETUP_BUFFER_SIZE 200
static char setup_buffer[SETUP_BUFFER_SIZE];
static char setup_used[MAX_SETUP_ARGS];
static int done_setup = 0;
static void __init in2000_setup(char *str, int *ints)
{
int i;
char *p1, *p2;
strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE);
p1 = setup_buffer;
i = 0;
while (*p1 && (i < MAX_SETUP_ARGS)) {
p2 = strchr(p1, ',');
if (p2) {
*p2 = '\0';
if (p1 != p2)
setup_args[i] = p1;
p1 = p2 + 1;
i++;
} else {
setup_args[i] = p1;
break;
}
}
for (i = 0; i < MAX_SETUP_ARGS; i++)
setup_used[i] = 0;
done_setup = 1;
}
/* check_setup_args() returns index if key found, 0 if not
*/
static int __init check_setup_args(char *key, int *val, char *buf)
{
int x;
char *cp;
for (x = 0; x < MAX_SETUP_ARGS; x++) {
if (setup_used[x])
continue;
if (!strncmp(setup_args[x], key, strlen(key)))
break;
}
if (x == MAX_SETUP_ARGS)
return 0;
setup_used[x] = 1;
cp = setup_args[x] + strlen(key);
*val = -1;
if (*cp != ':')
return ++x;
cp++;
if ((*cp >= '0') && (*cp <= '9')) {
*val = simple_strtoul(cp, NULL, 0);
}
return ++x;
}
/* The "correct" (ie portable) way to access memory-mapped hardware
* such as the IN2000 EPROM and dip switch is through the use of
* special macros declared in 'asm/io.h'. We use readb() and readl()
* when reading from the card's BIOS area in in2000_detect().
*/
static u32 bios_tab[] in2000__INITDATA = {
0xc8000,
0xd0000,
0xd8000,
0
};
static unsigned short base_tab[] in2000__INITDATA = {
0x220,
0x200,
0x110,
0x100,
};
static int int_tab[] in2000__INITDATA = {
15,
14,
11,
10
};
static int probe_bios(u32 addr, u32 *s1, uchar *switches)
{
void __iomem *p = ioremap(addr, 0x34);
if (!p)
return 0;
*s1 = readl(p + 0x10);
if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) {
/* Read the switch image that's mapped into EPROM space */
*switches = ~readb(p + 0x20);
iounmap(p);
return 1;
}
iounmap(p);
return 0;
}
static int __init in2000_detect(struct scsi_host_template * tpnt)
{
struct Scsi_Host *instance;
struct IN2000_hostdata *hostdata;
int detect_count;
int bios;
int x;
unsigned short base;
uchar switches;
uchar hrev;
unsigned long flags;
int val;
char buf[32];
/* Thanks to help from Bill Earnest, probing for IN2000 cards is a
* pretty straightforward and fool-proof operation. There are 3
* possible locations for the IN2000 EPROM in memory space - if we
* find a BIOS signature, we can read the dip switch settings from
* the byte at BIOS+32 (shadowed in by logic on the card). From 2
* of the switch bits we get the card's address in IO space. There's
* an image of the dip switch there, also, so we have a way to back-
* check that this really is an IN2000 card. Very nifty. Use the
* 'ioport:xx' command-line parameter if your BIOS EPROM is absent
* or disabled.
*/
if (!done_setup && setup_strings)
in2000_setup(setup_strings, NULL);
detect_count = 0;
for (bios = 0; bios_tab[bios]; bios++) {
u32 s1 = 0;
if (check_setup_args("ioport", &val, buf)) {
base = val;
switches = ~inb(base + IO_SWITCHES) & 0xff;
printk("Forcing IN2000 detection at IOport 0x%x ", base);
bios = 2;
}
/*
* There have been a couple of BIOS versions with different layouts
* for the obvious ID strings. We look for the 2 most common ones and
* hope that they cover all the cases...
*/
else if (probe_bios(bios_tab[bios], &s1, &switches)) {
printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]);
/* Find out where the IO space is */
x = switches & (SW_ADDR0 | SW_ADDR1);
base = base_tab[x];
/* Check for the IN2000 signature in IO space. */
x = ~inb(base + IO_SWITCHES) & 0xff;
if (x != switches) {
printk("Bad IO signature: %02x vs %02x.\n", x, switches);
continue;
}
} else
continue;
/* OK. We have a base address for the IO ports - run a few safety checks */
if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */
printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base);
continue;
}
/* Let's assume any hardware version will work, although the driver
* has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll
* print out the rev number for reference later, but accept them all.
*/
hrev = inb(base + IO_HARDWARE);
/* Bit 2 tells us if interrupts are disabled */
if (switches & SW_DISINT) {
printk("The IN-2000 SCSI card at IOport 0x%03x ", base);
printk("is not configured for interrupt operation!\n");
printk("This driver requires an interrupt: cancelling detection.\n");
continue;
}
/* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now
* initialize it.
*/
tpnt->proc_name = "in2000";
instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata));
if (instance == NULL)
continue;
detect_count++;
hostdata = (struct IN2000_hostdata *) instance->hostdata;
instance->io_port = hostdata->io_base = base;
hostdata->dip_switch = switches;
hostdata->hrev = hrev;
write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
write1_io(0, IO_INTR_MASK); /* allow all ints */
x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) {
printk("in2000_detect: Unable to allocate IRQ.\n");
detect_count--;
continue;
}
instance->irq = x;
instance->n_io_port = 13;
request_region(base, 13, "in2000"); /* lock in this IO space for our use */
for (x = 0; x < 8; x++) {
hostdata->busy[x] = 0;
hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
#ifdef PROC_STATISTICS
hostdata->cmd_cnt[x] = 0;
hostdata->disc_allowed_cnt[x] = 0;
hostdata->disc_done_cnt[x] = 0;
#endif
}
hostdata->input_Q = NULL;
hostdata->selecting = NULL;
hostdata->connected = NULL;
hostdata->disconnected_Q = NULL;
hostdata->state = S_UNCONNECTED;
hostdata->fifo = FI_FIFO_UNUSED;
hostdata->level2 = L2_BASIC;
hostdata->disconnect = DIS_ADAPTIVE;
hostdata->args = DEBUG_DEFAULTS;
hostdata->incoming_ptr = 0;
hostdata->outgoing_len = 0;
hostdata->default_sx_per = DEFAULT_SX_PER;
/* Older BIOS's had a 'sync on/off' switch - use its setting */
if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5))
hostdata->sync_off = 0x00; /* sync defaults to on */
else
hostdata->sync_off = 0xff; /* sync defaults to off */
#ifdef PROC_INTERFACE
hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP;
#ifdef PROC_STATISTICS
hostdata->int_cnt = 0;
#endif
#endif
if (check_setup_args("nosync", &val, buf))
hostdata->sync_off = val;
if (check_setup_args("period", &val, buf))
hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns;
if (check_setup_args("disconnect", &val, buf)) {
if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
hostdata->disconnect = val;
else
hostdata->disconnect = DIS_ADAPTIVE;
}
if (check_setup_args("noreset", &val, buf))
hostdata->args ^= A_NO_SCSI_RESET;
if (check_setup_args("level2", &val, buf))
hostdata->level2 = val;
if (check_setup_args("debug", &val, buf))
hostdata->args = (val & DB_MASK);
#ifdef PROC_INTERFACE
if (check_setup_args("proc", &val, buf))
hostdata->proc = val;
#endif
/* FIXME: not strictly needed I think but the called code expects
to be locked */
spin_lock_irqsave(instance->host_lock, flags);
x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS);
spin_unlock_irqrestore(instance->host_lock, flags);
hostdata->microcode = read_3393(hostdata, WD_CDB_1);
if (x & 0x01) {
if (x & B_FLAG)
hostdata->chip = C_WD33C93B;
else
hostdata->chip = C_WD33C93A;
} else
hostdata->chip = C_WD33C93;
printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No");
printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode);
#ifdef DEBUGGING_ON
printk("setup_args = ");
for (x = 0; x < MAX_SETUP_ARGS; x++)
printk("%s,", setup_args[x]);
printk("\n");
#endif
if (hostdata->sync_off == 0xff)
printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n");
printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE);
}
return detect_count;
}
static int in2000_release(struct Scsi_Host *shost)
{
if (shost->irq)
free_irq(shost->irq, shost);
if (shost->io_port && shost->n_io_port)
release_region(shost->io_port, shost->n_io_port);
return 0;
}
/* NOTE: I lifted this function straight out of the old driver,
* and have not tested it. Presumably it does what it's
* supposed to do...
*/
static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo)
{
int size;
size = capacity;
iinfo[0] = 64;
iinfo[1] = 32;
iinfo[2] = size >> 11;
/* This should approximate the large drive handling that the DOS ASPI manager
uses. Drives very near the boundaries may not be handled correctly (i.e.
near 2.0 Gb and 4.0 Gb) */
if (iinfo[2] > 1024) {
iinfo[0] = 64;
iinfo[1] = 63;
iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
}
if (iinfo[2] > 1024) {
iinfo[0] = 128;
iinfo[1] = 63;
iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
}
if (iinfo[2] > 1024) {
iinfo[0] = 255;
iinfo[1] = 63;
iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
}
return 0;
}
static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off, int len, int in)
{
#ifdef PROC_INTERFACE
char *bp;
char tbuf[128];
unsigned long flags;
struct IN2000_hostdata *hd;
Scsi_Cmnd *cmd;
int x, i;
static int stop = 0;
hd = (struct IN2000_hostdata *) instance->hostdata;
/* If 'in' is TRUE we need to _read_ the proc file. We accept the following
* keywords (same format as command-line, but only ONE per read):
* debug
* disconnect
* period
* resync
* proc
*/
if (in) {
buf[len] = '\0';
bp = buf;
if (!strncmp(bp, "debug:", 6)) {
bp += 6;
hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK;
} else if (!strncmp(bp, "disconnect:", 11)) {
bp += 11;
x = simple_strtoul(bp, NULL, 0);
if (x < DIS_NEVER || x > DIS_ALWAYS)
x = DIS_ADAPTIVE;
hd->disconnect = x;
} else if (!strncmp(bp, "period:", 7)) {
bp += 7;
x = simple_strtoul(bp, NULL, 0);
hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns;
} else if (!strncmp(bp, "resync:", 7)) {
bp += 7;
x = simple_strtoul(bp, NULL, 0);
for (i = 0; i < 7; i++)
if (x & (1 << i))
hd->sync_stat[i] = SS_UNSET;
} else if (!strncmp(bp, "proc:", 5)) {
bp += 5;
hd->proc = simple_strtoul(bp, NULL, 0);
} else if (!strncmp(bp, "level2:", 7)) {
bp += 7;
hd->level2 = simple_strtoul(bp, NULL, 0);
}
return len;
}
spin_lock_irqsave(instance->host_lock, flags);
bp = buf;
*bp = '\0';
if (hd->proc & PR_VERSION) {
sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
strcat(bp, tbuf);
}
if (hd->proc & PR_INFO) {
sprintf(tbuf, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
strcat(bp, tbuf);
strcat(bp, "\nsync_xfer[] = ");
for (x = 0; x < 7; x++) {
sprintf(tbuf, "\t%02x", hd->sync_xfer[x]);
strcat(bp, tbuf);
}
strcat(bp, "\nsync_stat[] = ");
for (x = 0; x < 7; x++) {
sprintf(tbuf, "\t%02x", hd->sync_stat[x]);
strcat(bp, tbuf);
}
}
#ifdef PROC_STATISTICS
if (hd->proc & PR_STATISTICS) {
strcat(bp, "\ncommands issued: ");
for (x = 0; x < 7; x++) {
sprintf(tbuf, "\t%ld", hd->cmd_cnt[x]);
strcat(bp, tbuf);
}
strcat(bp, "\ndisconnects allowed:");
for (x = 0; x < 7; x++) {
sprintf(tbuf, "\t%ld", hd->disc_allowed_cnt[x]);
strcat(bp, tbuf);
}
strcat(bp, "\ndisconnects done: ");
for (x = 0; x < 7; x++) {
sprintf(tbuf, "\t%ld", hd->disc_done_cnt[x]);
strcat(bp, tbuf);
}
sprintf(tbuf, "\ninterrupts: \t%ld", hd->int_cnt);
strcat(bp, tbuf);
}
#endif
if (hd->proc & PR_CONNECTED) {
strcat(bp, "\nconnected: ");
if (hd->connected) {
cmd = (Scsi_Cmnd *) hd->connected;
sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
strcat(bp, tbuf);
}
}
if (hd->proc & PR_INPUTQ) {
strcat(bp, "\ninput_Q: ");
cmd = (Scsi_Cmnd *) hd->input_Q;
while (cmd) {
sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
strcat(bp, tbuf);
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
}
if (hd->proc & PR_DISCQ) {
strcat(bp, "\ndisconnected_Q:");
cmd = (Scsi_Cmnd *) hd->disconnected_Q;
while (cmd) {
sprintf(tbuf, " %d:%d(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
strcat(bp, tbuf);
cmd = (Scsi_Cmnd *) cmd->host_scribble;
}
}
if (hd->proc & PR_TEST) {
; /* insert your own custom function here */
}
strcat(bp, "\n");
spin_unlock_irqrestore(instance->host_lock, flags);
*start = buf;
if (stop) {
stop = 0;
return 0; /* return 0 to signal end-of-file */
}
if (off > 0x40000) /* ALWAYS stop after 256k bytes have been read */
stop = 1;
if (hd->proc & PR_STOP) /* stop every other time */
stop = 1;
return strlen(bp);
#else /* PROC_INTERFACE */
return 0;
#endif /* PROC_INTERFACE */
}
MODULE_LICENSE("GPL");
static struct scsi_host_template driver_template = {
.proc_name = "in2000",
.proc_info = in2000_proc_info,
.name = "Always IN2000",
.detect = in2000_detect,
.release = in2000_release,
.queuecommand = in2000_queuecommand,
.eh_abort_handler = in2000_abort,
.eh_bus_reset_handler = in2000_bus_reset,
.bios_param = in2000_biosparam,
.can_queue = IN2000_CAN_Q,
.this_id = IN2000_HOST_ID,
.sg_tablesize = IN2000_SG,
.cmd_per_lun = IN2000_CPL,
.use_clustering = DISABLE_CLUSTERING,
};
#include "scsi_module.c"
| gpl-2.0 |
evitareul/android_kernel_htc_evitareul | fs/jffs2/build.c | 3294 | 11123 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mtd/mtd.h>
#include "nodelist.h"
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *,
struct jffs2_inode_cache *, struct jffs2_full_dirent **);
static inline struct jffs2_inode_cache *
first_inode_chain(int *i, struct jffs2_sb_info *c)
{
for (; *i < c->inocache_hashsize; (*i)++) {
if (c->inocache_list[*i])
return c->inocache_list[*i];
}
return NULL;
}
static inline struct jffs2_inode_cache *
next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
{
/* More in this chain? */
if (ic->next)
return ic->next;
(*i)++;
return first_inode_chain(i, c);
}
#define for_each_inode(i, c, ic) \
for (i = 0, ic = first_inode_chain(&i, (c)); \
ic; \
ic = next_inode(&i, ic, (c)))
static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic)
{
struct jffs2_full_dirent *fd;
dbg_fsbuild("building directory inode #%u\n", ic->ino);
/* For each child, increase nlink */
for(fd = ic->scan_dents; fd; fd = fd->next) {
struct jffs2_inode_cache *child_ic;
if (!fd->ino)
continue;
/* we can get high latency here with huge directories */
child_ic = jffs2_get_ino_cache(c, fd->ino);
if (!child_ic) {
dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
fd->name, fd->ino, ic->ino);
jffs2_mark_node_obsolete(c, fd->raw);
continue;
}
if (fd->type == DT_DIR) {
if (child_ic->pino_nlink) {
JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
fd->name, fd->ino, ic->ino);
/* TODO: What do we do about it? */
} else {
child_ic->pino_nlink = ic->ino;
}
} else
child_ic->pino_nlink++;
dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
/* Can't free scan_dents so far. We might need them in pass 2 */
}
}
/* Scan plan:
- Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
- Scan directory tree from top down, setting nlink in inocaches
- Scan inocaches for inodes with nlink==0
*/
static int jffs2_build_filesystem(struct jffs2_sb_info *c)
{
int ret;
int i;
struct jffs2_inode_cache *ic;
struct jffs2_full_dirent *fd;
struct jffs2_full_dirent *dead_fds = NULL;
dbg_fsbuild("build FS data structures\n");
/* First, scan the medium and build all the inode caches with
lists of physical nodes */
c->flags |= JFFS2_SB_FLAG_SCANNING;
ret = jffs2_scan_medium(c);
c->flags &= ~JFFS2_SB_FLAG_SCANNING;
if (ret)
goto exit;
dbg_fsbuild("scanned flash completely\n");
jffs2_dbg_dump_block_lists_nolock(c);
dbg_fsbuild("pass 1 starting\n");
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
for_each_inode(i, c, ic) {
if (ic->scan_dents) {
jffs2_build_inode_pass1(c, ic);
cond_resched();
}
}
dbg_fsbuild("pass 1 complete\n");
/* Next, scan for inodes with nlink == 0 and remove them. If
they were directories, then decrement the nlink of their
children too, and repeat the scan. As that's going to be
a fairly uncommon occurrence, it's not so evil to do it this
way. Recursion bad. */
dbg_fsbuild("pass 2 starting\n");
for_each_inode(i, c, ic) {
if (ic->pino_nlink)
continue;
jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
cond_resched();
}
dbg_fsbuild("pass 2a starting\n");
while (dead_fds) {
fd = dead_fds;
dead_fds = fd->next;
ic = jffs2_get_ino_cache(c, fd->ino);
if (ic)
jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
jffs2_free_full_dirent(fd);
}
dbg_fsbuild("pass 2a complete\n");
dbg_fsbuild("freeing temporary data structures\n");
/* Finally, we can scan again and free the dirent structs */
for_each_inode(i, c, ic) {
while(ic->scan_dents) {
fd = ic->scan_dents;
ic->scan_dents = fd->next;
jffs2_free_full_dirent(fd);
}
ic->scan_dents = NULL;
cond_resched();
}
jffs2_build_xattr_subsystem(c);
c->flags &= ~JFFS2_SB_FLAG_BUILDING;
dbg_fsbuild("FS build complete\n");
/* Rotate the lists by some number to ensure wear levelling */
jffs2_rotate_lists(c);
ret = 0;
exit:
if (ret) {
for_each_inode(i, c, ic) {
while(ic->scan_dents) {
fd = ic->scan_dents;
ic->scan_dents = fd->next;
jffs2_free_full_dirent(fd);
}
}
jffs2_clear_xattr_subsystem(c);
}
return ret;
}
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
struct jffs2_inode_cache *ic,
struct jffs2_full_dirent **dead_fds)
{
struct jffs2_raw_node_ref *raw;
struct jffs2_full_dirent *fd;
dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino);
raw = ic->nodes;
while (raw != (void *)ic) {
struct jffs2_raw_node_ref *next = raw->next_in_ino;
dbg_fsbuild("obsoleting node at 0x%08x\n", ref_offset(raw));
jffs2_mark_node_obsolete(c, raw);
raw = next;
}
if (ic->scan_dents) {
int whinged = 0;
dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino);
while(ic->scan_dents) {
struct jffs2_inode_cache *child_ic;
fd = ic->scan_dents;
ic->scan_dents = fd->next;
if (!fd->ino) {
/* It's a deletion dirent. Ignore it */
dbg_fsbuild("child \"%s\" is a deletion dirent, skipping...\n", fd->name);
jffs2_free_full_dirent(fd);
continue;
}
if (!whinged)
whinged = 1;
dbg_fsbuild("removing child \"%s\", ino #%u\n", fd->name, fd->ino);
child_ic = jffs2_get_ino_cache(c, fd->ino);
if (!child_ic) {
dbg_fsbuild("cannot remove child \"%s\", ino #%u, because it doesn't exist\n",
fd->name, fd->ino);
jffs2_free_full_dirent(fd);
continue;
}
/* Reduce nlink of the child. If it's now zero, stick it on the
dead_fds list to be cleaned up later. Else just free the fd */
if (fd->type == DT_DIR)
child_ic->pino_nlink = 0;
else
child_ic->pino_nlink--;
if (!child_ic->pino_nlink) {
dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
fd->ino, fd->name);
fd->next = *dead_fds;
*dead_fds = fd;
} else {
dbg_fsbuild("inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
fd->ino, fd->name, child_ic->pino_nlink);
jffs2_free_full_dirent(fd);
}
}
}
/*
We don't delete the inocache from the hash list and free it yet.
The erase code will do that, when all the nodes are completely gone.
*/
}
static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
{
uint32_t size;
/* Deletion should almost _always_ be allowed. We're fairly
buggered once we stop allowing people to delete stuff
because there's not enough free space... */
c->resv_blocks_deletion = 2;
/* Be conservative about how much space we need before we allow writes.
On top of that which is required for deletia, require an extra 2%
of the medium to be available, for overhead caused by nodes being
split across blocks, etc. */
size = c->flash_size / 50; /* 2% of flash size */
size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
size += c->sector_size - 1; /* ... and round up */
c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
/* When do we let the GC thread run in the background */
c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
/* When do we allow garbage collection to merge nodes to make
long-term progress at the expense of short-term space exhaustion? */
c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
/* When do we allow garbage collection to eat from bad blocks rather
than actually making progress? */
c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
/* What number of 'very dirty' eraseblocks do we allow before we
trigger the GC thread even if we don't _need_ the space. When we
can't mark nodes obsolete on the medium, the old dirty nodes cause
performance problems because we have to inspect and discard them. */
c->vdirty_blocks_gctrigger = c->resv_blocks_gctrigger;
if (jffs2_can_mark_obsolete(c))
c->vdirty_blocks_gctrigger *= 10;
/* If there's less than this amount of dirty space, don't bother
trying to GC to make more space. It'll be a fruitless task */
c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks);
dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n",
c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024);
dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n",
c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024);
dbg_fsbuild("Blocks required to quiesce GC thread: %d (%d KiB)\n",
c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024);
dbg_fsbuild("Blocks required to allow GC merges: %d (%d KiB)\n",
c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024);
dbg_fsbuild("Blocks required to GC bad blocks: %d (%d KiB)\n",
c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024);
dbg_fsbuild("Amount of dirty space required to GC: %d bytes\n",
c->nospc_dirty_size);
dbg_fsbuild("Very dirty blocks before GC triggered: %d\n",
c->vdirty_blocks_gctrigger);
}
int jffs2_do_mount_fs(struct jffs2_sb_info *c)
{
int ret;
int i;
int size;
c->free_size = c->flash_size;
c->nr_blocks = c->flash_size / c->sector_size;
size = sizeof(struct jffs2_eraseblock) * c->nr_blocks;
#ifndef __ECOS
if (jffs2_blocks_use_vmalloc(c))
c->blocks = vzalloc(size);
else
#endif
c->blocks = kzalloc(size, GFP_KERNEL);
if (!c->blocks)
return -ENOMEM;
for (i=0; i<c->nr_blocks; i++) {
INIT_LIST_HEAD(&c->blocks[i].list);
c->blocks[i].offset = i * c->sector_size;
c->blocks[i].free_size = c->sector_size;
}
INIT_LIST_HEAD(&c->clean_list);
INIT_LIST_HEAD(&c->very_dirty_list);
INIT_LIST_HEAD(&c->dirty_list);
INIT_LIST_HEAD(&c->erasable_list);
INIT_LIST_HEAD(&c->erasing_list);
INIT_LIST_HEAD(&c->erase_checking_list);
INIT_LIST_HEAD(&c->erase_pending_list);
INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
INIT_LIST_HEAD(&c->erase_complete_list);
INIT_LIST_HEAD(&c->free_list);
INIT_LIST_HEAD(&c->bad_list);
INIT_LIST_HEAD(&c->bad_used_list);
c->highest_ino = 1;
c->summary = NULL;
ret = jffs2_sum_init(c);
if (ret)
goto out_free;
if (jffs2_build_filesystem(c)) {
dbg_fsbuild("build_fs failed\n");
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
ret = -EIO;
goto out_free;
}
jffs2_calc_trigger_levels(c);
return 0;
out_free:
#ifndef __ECOS
if (jffs2_blocks_use_vmalloc(c))
vfree(c->blocks);
else
#endif
kfree(c->blocks);
return ret;
}
| gpl-2.0 |
titusece/linux_imx | arch/arm/mach-omap2/omap_hwmod_common_data.c | 4574 | 1979 | /*
* omap_hwmod common data structures
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Thara Gopinath <thara@ti.com>
* Benoît Cousson
*
* Copyright (C) 2010 Nokia Corporation
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This data/structures are to be used while defining OMAP on-chip module
* data and their integration with other OMAP modules and Linux.
*/
#include "omap_hwmod.h"
#include "omap_hwmod_common_data.h"
/**
* struct omap_hwmod_sysc_type1 - TYPE1 sysconfig scheme.
*
* To be used by hwmod structure to specify the sysconfig offsets
* if the device ip is compliant with the original PRCM protocol
* defined for OMAP2420.
*/
struct omap_hwmod_sysc_fields omap_hwmod_sysc_type1 = {
.midle_shift = SYSC_TYPE1_MIDLEMODE_SHIFT,
.clkact_shift = SYSC_TYPE1_CLOCKACTIVITY_SHIFT,
.sidle_shift = SYSC_TYPE1_SIDLEMODE_SHIFT,
.enwkup_shift = SYSC_TYPE1_ENAWAKEUP_SHIFT,
.srst_shift = SYSC_TYPE1_SOFTRESET_SHIFT,
.autoidle_shift = SYSC_TYPE1_AUTOIDLE_SHIFT,
};
/**
* struct omap_hwmod_sysc_type2 - TYPE2 sysconfig scheme.
*
* To be used by hwmod structure to specify the sysconfig offsets if the
* device ip is compliant with the new PRCM protocol defined for new
* OMAP4 IPs.
*/
struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
.midle_shift = SYSC_TYPE2_MIDLEMODE_SHIFT,
.sidle_shift = SYSC_TYPE2_SIDLEMODE_SHIFT,
.srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
.dmadisable_shift = SYSC_TYPE2_DMADISABLE_SHIFT,
};
/**
* struct omap_hwmod_sysc_type3 - TYPE3 sysconfig scheme.
* Used by some IPs on AM33xx
*/
struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3 = {
.midle_shift = SYSC_TYPE3_MIDLEMODE_SHIFT,
.sidle_shift = SYSC_TYPE3_SIDLEMODE_SHIFT,
};
struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
.manager_count = 2,
.has_framedonetv_irq = 0
};
| gpl-2.0 |
limbo127/KVMGT-kernel | sound/last.c | 4830 | 1272 | /*
* Advanced Linux Sound Architecture
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <sound/core.h>
static int __init alsa_sound_last_init(void)
{
int idx, ok = 0;
printk(KERN_INFO "ALSA device list:\n");
for (idx = 0; idx < SNDRV_CARDS; idx++)
if (snd_cards[idx] != NULL) {
printk(KERN_INFO " #%i: %s\n", idx, snd_cards[idx]->longname);
ok++;
}
if (ok == 0)
printk(KERN_INFO " No soundcards found.\n");
return 0;
}
late_initcall_sync(alsa_sound_last_init);
| gpl-2.0 |
ftCommunity/ft-TXT | board/knobloch/TXT/board-support/ti-linux/sound/last.c | 4830 | 1272 | /*
* Advanced Linux Sound Architecture
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <sound/core.h>
static int __init alsa_sound_last_init(void)
{
int idx, ok = 0;
printk(KERN_INFO "ALSA device list:\n");
for (idx = 0; idx < SNDRV_CARDS; idx++)
if (snd_cards[idx] != NULL) {
printk(KERN_INFO " #%i: %s\n", idx, snd_cards[idx]->longname);
ok++;
}
if (ok == 0)
printk(KERN_INFO " No soundcards found.\n");
return 0;
}
late_initcall_sync(alsa_sound_last_init);
| gpl-2.0 |
AOKP/kernel_asus_flo | sound/oss/pas2_card.c | 5086 | 9558 | /*
* sound/oss/pas2_card.c
*
* Detection routine for the Pro Audio Spectrum cards.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "pas2.h"
#include "sb.h"
static unsigned char dma_bits[] = {
4, 1, 2, 3, 0, 5, 6, 7
};
static unsigned char irq_bits[] = {
0, 0, 1, 2, 3, 4, 5, 6, 0, 1, 7, 8, 9, 0, 10, 11
};
static unsigned char sb_irq_bits[] = {
0x00, 0x00, 0x08, 0x10, 0x00, 0x18, 0x00, 0x20,
0x00, 0x08, 0x28, 0x30, 0x38, 0, 0
};
static unsigned char sb_dma_bits[] = {
0x00, 0x40, 0x80, 0xC0, 0, 0, 0, 0
};
/*
* The Address Translation code is used to convert I/O register addresses to
* be relative to the given base -register
*/
int pas_translate_code = 0;
static int pas_intr_mask;
static int pas_irq;
static int pas_sb_base;
DEFINE_SPINLOCK(pas_lock);
#ifndef CONFIG_PAS_JOYSTICK
static bool joystick;
#else
static bool joystick = 1;
#endif
#ifdef SYMPHONY_PAS
static bool symphony = 1;
#else
static bool symphony;
#endif
#ifdef BROKEN_BUS_CLOCK
static bool broken_bus_clock = 1;
#else
static bool broken_bus_clock;
#endif
static struct address_info cfg;
static struct address_info cfg2;
char pas_model = 0;
static char *pas_model_names[] = {
"",
"Pro AudioSpectrum+",
"CDPC",
"Pro AudioSpectrum 16",
"Pro AudioSpectrum 16D"
};
/*
* pas_read() and pas_write() are equivalents of inb and outb
* These routines perform the I/O address translation required
* to support other than the default base address
*/
extern void mix_write(unsigned char data, int ioaddr);
unsigned char pas_read(int ioaddr)
{
return inb(ioaddr + pas_translate_code);
}
void pas_write(unsigned char data, int ioaddr)
{
outb((data), ioaddr + pas_translate_code);
}
/******************* Begin of the Interrupt Handler ********************/
static irqreturn_t pasintr(int irq, void *dev_id)
{
int status;
status = pas_read(0x0B89);
pas_write(status, 0x0B89); /* Clear interrupt */
if (status & 0x08)
{
pas_pcm_interrupt(status, 1);
status &= ~0x08;
}
if (status & 0x10)
{
pas_midi_interrupt();
status &= ~0x10;
}
return IRQ_HANDLED;
}
int pas_set_intr(int mask)
{
if (!mask)
return 0;
pas_intr_mask |= mask;
pas_write(pas_intr_mask, 0x0B8B);
return 0;
}
int pas_remove_intr(int mask)
{
if (!mask)
return 0;
pas_intr_mask &= ~mask;
pas_write(pas_intr_mask, 0x0B8B);
return 0;
}
/******************* End of the Interrupt handler **********************/
/******************* Begin of the Initialization Code ******************/
static int __init config_pas_hw(struct address_info *hw_config)
{
char ok = 1;
unsigned int_ptrs; /* scsi/sound interrupt pointers */
pas_irq = hw_config->irq;
pas_write(0x00, 0x0B8B);
pas_write(0x36, 0x138B);
pas_write(0x36, 0x1388);
pas_write(0, 0x1388);
pas_write(0x74, 0x138B);
pas_write(0x74, 0x1389);
pas_write(0, 0x1389);
pas_write(0x80 | 0x40 | 0x20 | 1, 0x0B8A);
pas_write(0x80 | 0x20 | 0x10 | 0x08 | 0x01, 0xF8A);
pas_write(0x01 | 0x02 | 0x04 | 0x10 /*
* |
* 0x80
*/ , 0xB88);
pas_write(0x80 | (joystick ? 0x40 : 0), 0xF388);
if (pas_irq < 0 || pas_irq > 15)
{
printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq);
hw_config->irq=-1;
ok = 0;
}
else
{
int_ptrs = pas_read(0xF38A);
int_ptrs = (int_ptrs & 0xf0) | irq_bits[pas_irq];
pas_write(int_ptrs, 0xF38A);
if (!irq_bits[pas_irq])
{
printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq);
hw_config->irq=-1;
ok = 0;
}
else
{
if (request_irq(pas_irq, pasintr, 0, "PAS16",hw_config) < 0) {
printk(KERN_ERR "PAS16: Cannot allocate IRQ %d\n",pas_irq);
hw_config->irq=-1;
ok = 0;
}
}
}
if (hw_config->dma < 0 || hw_config->dma > 7)
{
printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma);
hw_config->dma=-1;
ok = 0;
}
else
{
pas_write(dma_bits[hw_config->dma], 0xF389);
if (!dma_bits[hw_config->dma])
{
printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma);
hw_config->dma=-1;
ok = 0;
}
else
{
if (sound_alloc_dma(hw_config->dma, "PAS16"))
{
printk(KERN_ERR "pas2_card.c: Can't allocate DMA channel\n");
hw_config->dma=-1;
ok = 0;
}
}
}
/*
* This fixes the timing problems of the PAS due to the Symphony chipset
* as per Media Vision. Only define this if your PAS doesn't work correctly.
*/
if(symphony)
{
outb((0x05), 0xa8);
outb((0x60), 0xa9);
}
if(broken_bus_clock)
pas_write(0x01 | 0x10 | 0x20 | 0x04, 0x8388);
else
/*
* pas_write(0x01, 0x8388);
*/
pas_write(0x01 | 0x10 | 0x20, 0x8388);
pas_write(0x18, 0x838A); /* ??? */
pas_write(0x20 | 0x01, 0x0B8A); /* Mute off, filter = 17.897 kHz */
pas_write(8, 0xBF8A);
mix_write(0x80 | 5, 0x078B);
mix_write(5, 0x078B);
{
struct address_info *sb_config;
sb_config = &cfg2;
if (sb_config->io_base)
{
unsigned char irq_dma;
/*
* Turn on Sound Blaster compatibility
* bit 1 = SB emulation
* bit 0 = MPU401 emulation (CDPC only :-( )
*/
pas_write(0x02, 0xF788);
/*
* "Emulation address"
*/
pas_write((sb_config->io_base >> 4) & 0x0f, 0xF789);
pas_sb_base = sb_config->io_base;
if (!sb_dma_bits[sb_config->dma])
printk(KERN_ERR "PAS16 Warning: Invalid SB DMA %d\n\n", sb_config->dma);
if (!sb_irq_bits[sb_config->irq])
printk(KERN_ERR "PAS16 Warning: Invalid SB IRQ %d\n\n", sb_config->irq);
irq_dma = sb_dma_bits[sb_config->dma] |
sb_irq_bits[sb_config->irq];
pas_write(irq_dma, 0xFB8A);
}
else
pas_write(0x00, 0xF788);
}
if (!ok)
printk(KERN_WARNING "PAS16: Driver not enabled\n");
return ok;
}
static int __init detect_pas_hw(struct address_info *hw_config)
{
unsigned char board_id, foo;
/*
* WARNING: Setting an option like W:1 or so that disables warm boot reset
* of the card will screw up this detect code something fierce. Adding code
* to handle this means possibly interfering with other cards on the bus if
* you have something on base port 0x388. SO be forewarned.
*/
outb((0xBC), 0x9A01); /* Activate first board */
outb((hw_config->io_base >> 2), 0x9A01); /* Set base address */
pas_translate_code = hw_config->io_base - 0x388;
pas_write(1, 0xBF88); /* Select one wait states */
board_id = pas_read(0x0B8B);
if (board_id == 0xff)
return 0;
/*
* We probably have a PAS-series board, now check for a PAS16-series board
* by trying to change the board revision bits. PAS16-series hardware won't
* let you do this - the bits are read-only.
*/
foo = board_id ^ 0xe0;
pas_write(foo, 0x0B8B);
foo = pas_read(0x0B8B);
pas_write(board_id, 0x0B8B);
if (board_id != foo)
return 0;
pas_model = pas_read(0xFF88);
return pas_model;
}
static void __init attach_pas_card(struct address_info *hw_config)
{
pas_irq = hw_config->irq;
if (detect_pas_hw(hw_config))
{
if ((pas_model = pas_read(0xFF88)))
{
char temp[100];
sprintf(temp,
"%s rev %d", pas_model_names[(int) pas_model],
pas_read(0x2789));
conf_printf(temp, hw_config);
}
if (config_pas_hw(hw_config))
{
pas_pcm_init(hw_config);
pas_midi_init();
pas_init_mixer();
}
}
}
static inline int __init probe_pas(struct address_info *hw_config)
{
return detect_pas_hw(hw_config);
}
static void __exit unload_pas(struct address_info *hw_config)
{
extern int pas_audiodev;
extern int pas2_mididev;
if (hw_config->dma>0)
sound_free_dma(hw_config->dma);
if (hw_config->irq>0)
free_irq(hw_config->irq, hw_config);
if(pas_audiodev!=-1)
sound_unload_mixerdev(audio_devs[pas_audiodev]->mixer_dev);
if(pas2_mididev!=-1)
sound_unload_mididev(pas2_mididev);
if(pas_audiodev!=-1)
sound_unload_audiodev(pas_audiodev);
}
static int __initdata io = -1;
static int __initdata irq = -1;
static int __initdata dma = -1;
static int __initdata dma16 = -1; /* Set this for modules that need it */
static int __initdata sb_io = 0;
static int __initdata sb_irq = -1;
static int __initdata sb_dma = -1;
static int __initdata sb_dma16 = -1;
module_param(io, int, 0);
module_param(irq, int, 0);
module_param(dma, int, 0);
module_param(dma16, int, 0);
module_param(sb_io, int, 0);
module_param(sb_irq, int, 0);
module_param(sb_dma, int, 0);
module_param(sb_dma16, int, 0);
module_param(joystick, bool, 0);
module_param(symphony, bool, 0);
module_param(broken_bus_clock, bool, 0);
MODULE_LICENSE("GPL");
static int __init init_pas2(void)
{
printk(KERN_INFO "Pro Audio Spectrum driver Copyright (C) by Hannu Savolainen 1993-1996\n");
cfg.io_base = io;
cfg.irq = irq;
cfg.dma = dma;
cfg.dma2 = dma16;
cfg2.io_base = sb_io;
cfg2.irq = sb_irq;
cfg2.dma = sb_dma;
cfg2.dma2 = sb_dma16;
if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) {
printk(KERN_INFO "I/O, IRQ, DMA and type are mandatory\n");
return -EINVAL;
}
if (!probe_pas(&cfg))
return -ENODEV;
attach_pas_card(&cfg);
return 0;
}
static void __exit cleanup_pas2(void)
{
unload_pas(&cfg);
}
module_init(init_pas2);
module_exit(cleanup_pas2);
#ifndef MODULE
static int __init setup_pas2(char *str)
{
/* io, irq, dma, dma2, sb_io, sb_irq, sb_dma, sb_dma2 */
int ints[9];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
irq = ints[2];
dma = ints[3];
dma16 = ints[4];
sb_io = ints[5];
sb_irq = ints[6];
sb_dma = ints[7];
sb_dma16 = ints[8];
return 1;
}
__setup("pas2=", setup_pas2);
#endif
| gpl-2.0 |
HackLinux/kernel-pandaboard-ES-RevB3 | arch/mips/rb532/prom.c | 8670 | 3647 | /*
* RouterBoard 500 specific prom routines
*
* Copyright (C) 2003, Peter Sadik <peter.sadik@idt.com>
* Copyright (C) 2005-2006, P.Christeas <p_christ@hol.gr>
* Copyright (C) 2007, Gabor Juhos <juhosg@openwrt.org>
* Felix Fietkau <nbd@openwrt.org>
* Florian Fainelli <florian@openwrt.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the
* Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/console.h>
#include <linux/bootmem.h>
#include <linux/ioport.h>
#include <linux/blkdev.h>
#include <asm/bootinfo.h>
#include <asm/mach-rc32434/ddr.h>
#include <asm/mach-rc32434/prom.h>
unsigned int idt_cpu_freq = 132000000;
EXPORT_SYMBOL(idt_cpu_freq);
static struct resource ddr_reg[] = {
{
.name = "ddr-reg",
.start = DDR0_PHYS_ADDR,
.end = DDR0_PHYS_ADDR + sizeof(struct ddr_ram),
.flags = IORESOURCE_MEM,
}
};
void __init prom_free_prom_memory(void)
{
/* No prom memory to free */
}
static inline int match_tag(char *arg, const char *tag)
{
return strncmp(arg, tag, strlen(tag)) == 0;
}
static inline unsigned long tag2ul(char *arg, const char *tag)
{
char *num;
num = arg + strlen(tag);
return simple_strtoul(num, 0, 10);
}
void __init prom_setup_cmdline(void)
{
static char cmd_line[COMMAND_LINE_SIZE] __initdata;
char *cp, *board;
int prom_argc;
char **prom_argv, **prom_envp;
int i;
prom_argc = fw_arg0;
prom_argv = (char **) fw_arg1;
prom_envp = (char **) fw_arg2;
cp = cmd_line;
/* Note: it is common that parameters start
* at argv[1] and not argv[0],
* however, our elf loader starts at [0] */
for (i = 0; i < prom_argc; i++) {
if (match_tag(prom_argv[i], FREQ_TAG)) {
idt_cpu_freq = tag2ul(prom_argv[i], FREQ_TAG);
continue;
}
#ifdef IGNORE_CMDLINE_MEM
/* parses out the "mem=xx" arg */
if (match_tag(prom_argv[i], MEM_TAG))
continue;
#endif
if (i > 0)
*(cp++) = ' ';
if (match_tag(prom_argv[i], BOARD_TAG)) {
board = prom_argv[i] + strlen(BOARD_TAG);
if (match_tag(board, BOARD_RB532A))
mips_machtype = MACH_MIKROTIK_RB532A;
else
mips_machtype = MACH_MIKROTIK_RB532;
}
strcpy(cp, prom_argv[i]);
cp += strlen(prom_argv[i]);
}
*(cp++) = ' ';
i = strlen(arcs_cmdline);
if (i > 0) {
*(cp++) = ' ';
strcpy(cp, arcs_cmdline);
cp += strlen(arcs_cmdline);
}
cmd_line[COMMAND_LINE_SIZE - 1] = '\0';
strcpy(arcs_cmdline, cmd_line);
}
void __init prom_init(void)
{
struct ddr_ram __iomem *ddr;
phys_t memsize;
phys_t ddrbase;
ddr = ioremap_nocache(ddr_reg[0].start,
ddr_reg[0].end - ddr_reg[0].start);
if (!ddr) {
printk(KERN_ERR "Unable to remap DDR register\n");
return;
}
ddrbase = (phys_t)&ddr->ddrbase;
memsize = (phys_t)&ddr->ddrmask;
memsize = 0 - memsize;
prom_setup_cmdline();
/* give all RAM to boot allocator,
* except for the first 0x400 and the last 0x200 bytes */
add_memory_region(ddrbase + 0x400, memsize - 0x600, BOOT_MEM_RAM);
}
| gpl-2.0 |
kevinzhang1986/android_kernel_oneplus_msm8994 | drivers/spmi/spmi-pmic-arb.c | 223 | 37344 | /* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "#%d: " fmt, __LINE__
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spmi.h>
#include <linux/of.h>
#include <linux/interrupt.h>
#include <linux/of_spmi.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/syscore_ops.h>
#include <linux/irqchip/qpnp-int.h>
#include "spmi-dbgfs.h"
#define SPMI_PMIC_ARB_NAME "spmi_pmic_arb"
/* PMIC Arbiter configuration registers */
#define PMIC_ARB_VERSION 0x0000
#define PMIC_ARB_INT_EN 0x0004
enum {
PMIC_ARB_GENI_CTRL,
PMIC_ARB_GENI_STATUS,
PMIC_ARB_PROTOCOL_IRQ_STATUS,
};
u32 pmic_arb_regs_v1[] = {
[PMIC_ARB_GENI_CTRL] = 0x0024,
[PMIC_ARB_GENI_STATUS] = 0x0028,
[PMIC_ARB_PROTOCOL_IRQ_STATUS] = (0x700 + 0x820),
};
u32 pmic_arb_regs_v2[] = {
[PMIC_ARB_GENI_CTRL] = 0x0028,
[PMIC_ARB_GENI_STATUS] = 0x002C,
[PMIC_ARB_PROTOCOL_IRQ_STATUS] = (0x700 + 0x900),
};
/* Offset per chnnel-register type */
#define PMIC_ARB_CMD (0x00)
#define PMIC_ARB_CONFIG (0x04)
#define PMIC_ARB_STATUS (0x08)
#define PMIC_ARB_WDATA0 (0x10)
#define PMIC_ARB_WDATA1 (0x14)
#define PMIC_ARB_RDATA0 (0x18)
#define PMIC_ARB_RDATA1 (0x1C)
/* PMIC Arbiter configuration registers values */
#define PMIC_ARB_V2_MIN (0x20010000)
#define PMIC_ARB_CORE_REGISTERS_OBS (0x800000)
/* Mapping Table */
#define SPMI_MAPPING_TABLE_REG(N) (0x0B00 + (4 * (N)))
#define SPMI_MAPPING_BIT_INDEX(X) (((X) >> 18) & 0xF)
#define SPMI_MAPPING_BIT_IS_0_FLAG(X) (((X) >> 17) & 0x1)
#define SPMI_MAPPING_BIT_IS_0_RESULT(X) (((X) >> 9) & 0xFF)
#define SPMI_MAPPING_BIT_IS_1_FLAG(X) (((X) >> 8) & 0x1)
#define SPMI_MAPPING_BIT_IS_1_RESULT(X) (((X) >> 0) & 0xFF)
#define SPMI_MAPPING_TABLE_LEN 255
#define SPMI_MAPPING_TABLE_TREE_DEPTH 16 /* Maximum of 16-bits */
/* Ownership Table */
#define SPMI_OWNERSHIP_TABLE_REG(N) (0x0700 + (4 * (N)))
#define SPMI_OWNERSHIP_PERIPH2OWNER(X) ((X) & 0x7)
/* PPID, SID, PID */
#define PMIC_ARB_PERIPH_ID(spmi_addr) (((spmi_addr) >> 8) & 0xFF)
#define PMIC_ARB_ADDR_IN_PERIPH(spmi_addr) ((spmi_addr) & 0xFF)
#define PMIC_ARB_REG_CHNL(chnl_num) (0x800 + 0x4 * (chnl_num))
#define PMIC_ARB_TO_PPID(sid, pid) ((pid & 0xFF) | ((sid & 0xF) << 8))
/* Channel Status fields */
enum pmic_arb_chnl_status {
PMIC_ARB_STATUS_DONE = (1 << 0),
PMIC_ARB_STATUS_FAILURE = (1 << 1),
PMIC_ARB_STATUS_DENIED = (1 << 2),
PMIC_ARB_STATUS_DROPPED = (1 << 3),
};
/* Command register fields */
#define PMIC_ARB_CMD_MAX_BYTE_COUNT 8
/* Command Opcodes */
enum pmic_arb_cmd_op_code {
PMIC_ARB_OP_EXT_WRITEL = 0,
PMIC_ARB_OP_EXT_READL = 1,
PMIC_ARB_OP_EXT_WRITE = 2,
PMIC_ARB_OP_RESET = 3,
PMIC_ARB_OP_SLEEP = 4,
PMIC_ARB_OP_SHUTDOWN = 5,
PMIC_ARB_OP_WAKEUP = 6,
PMIC_ARB_OP_AUTHENTICATE = 7,
PMIC_ARB_OP_MSTR_READ = 8,
PMIC_ARB_OP_MSTR_WRITE = 9,
PMIC_ARB_OP_EXT_READ = 13,
PMIC_ARB_OP_WRITE = 14,
PMIC_ARB_OP_READ = 15,
PMIC_ARB_OP_ZERO_WRITE = 16,
};
/* Maximum number of support PMIC peripherals */
#define PMIC_ARB_MAX_PERIPHS 256
#define PMIC_ARB_PERIPHS_CHNL_DEFAULT 128
#define PMIC_ARB_PERIPHS_INTR_DEFAULT 256
#define PMIC_ARB_PERIPH_ID_VALID (1 << 15)
#define PMIC_ARB_TIMEOUT_US 100
#define PMIC_ARB_MAX_TRANS_BYTES (8)
#define PMIC_ARB_APID_MASK 0xFF
#define PMIC_ARB_PPID_MASK 0xFFF
/* interrupt enable bit */
#define SPMI_PIC_ACC_ENABLE_BIT BIT(0)
/* lookup channel num, given sid+pid. each sid points to 8bits of pids */
#define PMIC_ARB_CHNL(pmic_arb, sid, pid) \
((pmic_arb)->ppid_2_chnl_tbl[(((sid) << 8) | (pid))])
/*
* spmi_pmic_arb_dbg: information used for debugging
*
* @base_phy physical address of the core register space
* @rdbase_phy physical address of the observer register space
* @wrbase_phy physical address of the channels register space
* @intr_phy physical address of the interrupt register space
*/
struct spmi_pmic_arb_dbg {
phys_addr_t base_phy;
phys_addr_t rdbase_phy;
phys_addr_t wrbase_phy;
phys_addr_t intr_phy;
};
struct spmi_pmic_arb_dev;
/*
* spmi_pmic_arb_ver: version dependent callbacks.
*
* @chnl_ofst ocalc ffset per channel. Note that v1 channel is one per EE, and
* v2 channels are one per PMIC peripheral.
* @fmt_cmd format formats a GENI/SPMI command.
* @owner_acc_status calc offset to PMIC_ARB_SPMI_PIC_OWNERm_ACC_STATUSn on v1,
* and SPMI_PIC_OWNERm_ACC_STATUSn on v2.
* @acc_enable calc offset to PMIC_ARB_SPMI_PIC_ACC_ENABLEn on v1,
* and SPMI_PIC_ACC_ENABLEn on v2.
* @irq_status calc offset to PMIC_ARB_SPMI_PIC_IRQ_STATUSn on v1,
* and SPMI_PIC_IRQ_STATUSn on v2.
* @irq_clear calc offset to PMIC_ARB_SPMI_PIC_IRQ_CLEARn on v,
* and SPMI_PIC_IRQ_CLEARn on v2.
*/
struct spmi_pmic_arb_ver {
int (*non_data_cmd)(struct spmi_pmic_arb_dev *dev, u8 opc, u8 sid);
/* following functions are about phripheral rd/wr */
phys_addr_t (*chnl_ofst)(struct spmi_pmic_arb_dev *dev, u8 sid,
u16 addr);
u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
/* following functions calc offsets to peripheral PIC registers */
phys_addr_t (*owner_acc_status)(u8 m, u8 n);
phys_addr_t (*acc_enable)(u8 n);
phys_addr_t (*irq_status)(u8 n);
phys_addr_t (*irq_clear)(u8 n);
u32 *regs;
};
/*
* @base base address of the PMIC Arbiter core registers.
* @rdbase, @wrbase base address of the PMIC Arbiter read core registers.
* For HW-v1 these are equal to base.
* For HW-v2, the value is the same in eeraly probing, in order to read
* PMIC_ARB_CORE registers, then chnls, and obsrvr are set to
* PMIC_ARB_CORE_REGISTERS and PMIC_ARB_CORE_REGISTERS_OBS respectivly.
* @intr base address of the SPMI interrupt control registers
* @ppid_2_chnl_tbl lookup table f(SID, Periph-ID) -> channle num
* @fmt_cmd formats a command to be set into PMIC_ARBq_CHNLn_CMD
* @chnl_ofst calculates offset of the base of a channel reg space
* @ee execution environment id
* @irq_acc0_init_val initial value of the interrupt accumulator at probe time.
* Use for an HW workaround. On handling interrupts, the first accumulator
* register will be compared against this value, and bits which are set at
* boot will be ignored.
*/
struct spmi_pmic_arb_dev {
struct spmi_controller controller;
struct device *dev;
struct device *slave;
void __iomem *base;
void __iomem *rdbase;
void __iomem *wrbase;
void __iomem *intr;
void __iomem *cnfg;
struct spmi_pmic_arb_dbg dbg;
int pic_irq;
bool allow_wakeup;
spinlock_t lock;
u8 ee;
u8 channel;
u16 max_peripherals;
u16 min_intr_apid;
u16 max_intr_apid;
u16 max_periph_intrs;
u16 periph_id_map[PMIC_ARB_MAX_PERIPHS];
u32 mapping_table[SPMI_MAPPING_TABLE_LEN];
const struct spmi_pmic_arb_ver *ver;
u8 *ppid_2_chnl_tbl;
u32 prev_prtcl_irq_stat;
u32 irq_acc0_init_val;
};
static struct spmi_pmic_arb_dev *the_pmic_arb;
static phys_addr_t pmic_arb_chnl_ofst_v1(struct spmi_pmic_arb_dev *dev,
u8 sid, u16 addr)
{
return 0x800 + 0x80 * (dev->channel);
}
static phys_addr_t pmic_arb_chnl_ofst_v2(struct spmi_pmic_arb_dev *dev,
u8 sid, u16 addr)
{
char chnl = PMIC_ARB_CHNL(dev, sid, PMIC_ARB_PERIPH_ID(addr));
return 0x1000 * (dev->ee) + 0x8000 * (chnl);
}
static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
{
return (opc << 27) | ((sid & 0xF) << 20) | (addr << 4) | (bc & 0x7);
}
static u32 pmic_arb_fmt_cmd_v2(u8 opc, u8 sid, u16 addr, u8 bc)
{
return (opc << 27) | (PMIC_ARB_ADDR_IN_PERIPH(addr) << 4) | (bc & 0x7);
}
static phys_addr_t pmic_arb_owner_acc_status_v1(u8 m, u8 n)
{
return 0x20 * (m) + 0x4 * (n);
}
static phys_addr_t pmic_arb_owner_acc_status_v2(u8 m, u8 n)
{
return 0x100000 + 0x1000 * (m) + 0x4 * (n);
}
static phys_addr_t pmic_arb_acc_enable_v1(u8 n)
{
return 0x200 + 0x4 * (n);
}
static phys_addr_t pmic_arb_acc_enable_v2(u8 n)
{
return 0x1000 * (n);
}
static phys_addr_t pmic_arb_irq_status_v1(u8 n)
{
return 0x600 + 0x4 * (n);
}
static phys_addr_t pmic_arb_irq_status_v2(u8 n)
{
return 0x4 + 0x1000 * (n);
}
static phys_addr_t pmic_arb_irq_clear_v1(u8 n)
{
return 0xA00 + 0x4 * (n);
}
static phys_addr_t pmic_arb_irq_clear_v2(u8 n)
{
return 0x8 + 0x1000 * (n);
}
static void dbg_io(struct spmi_pmic_arb_dev *dev, const char *name,
void *virt, phys_addr_t phys, u32 offset, u32 val)
{
dev_dbg(dev->dev,
"%-10s phy-base:0x%lx phy:0x%lx virt:0x%p ofst:0x%03x val:0x%x\n",
name, (ulong) phys, (ulong) (phys + offset), (virt + offset),
offset, val);
}
static u32 pmic_arb_read(struct spmi_pmic_arb_dev *dev, u32 offset)
{
u32 val = readl_relaxed(dev->rdbase + offset);
dbg_io(dev, "spmi-rx", dev->rdbase, dev->dbg.rdbase_phy, offset, val);
return val;
}
static void pmic_arb_write(struct spmi_pmic_arb_dev *dev, u32 offset, u32 val)
{
writel_relaxed(val, dev->wrbase + offset);
dbg_io(dev, "spmi-tx", dev->wrbase, dev->dbg.wrbase_phy, offset, val);
}
static void pmic_arb_set_rd_cmd(struct spmi_pmic_arb_dev *dev, u32 offset,
u32 val)
{
dbg_io(dev, "set-rd-cmd", dev->rdbase, dev->dbg.rdbase_phy, offset,
val);
writel_relaxed(val, dev->rdbase + offset);
}
static void dbg_pic_io(struct spmi_pmic_arb_dev *dev, const char *name,
void *virt, phys_addr_t phys, u32 offset, u32 val,
u8 sid, u16 pid, u8 apid, const char *desc)
{
dev_dbg(dev->dev,
"%-10s phy-base:0x%lx phy:0x%lx virt:0x%p ofst:0x%03x val:0x%x sid:%d pid:0x%x apid:0x%x %s\n",
name, (ulong) phys, (ulong) (phys + offset), (virt + offset), offset,
val, sid, pid, apid, desc ? desc : "");
}
static void spmi_pic_acc_en_wr(struct spmi_pmic_arb_dev *dev, u32 val,
u8 sid, u16 pid, u8 apid, const char *desc)
{
phys_addr_t ofst = dev->ver->acc_enable(apid);
dbg_pic_io(dev, "acc-en-wr", dev->intr, dev->dbg.intr_phy, ofst, val,
sid, pid, apid, desc);
writel_relaxed(val, dev->intr + ofst);
}
static u32 spmi_pic_acc_en_rd(struct spmi_pmic_arb_dev *dev,
u8 sid, u16 pid, u8 apid, const char *desc)
{
phys_addr_t ofst = dev->ver->acc_enable(apid);
u32 val = readl_relaxed(dev->intr + ofst);
dbg_pic_io(dev, "acc-en-rd", dev->intr, dev->dbg.intr_phy, ofst, val,
sid, pid, apid, desc);
return val;
}
static void pmic_arb_save_stat_before_txn(struct spmi_pmic_arb_dev *dev)
{
dev->prev_prtcl_irq_stat =
readl_relaxed(dev->cnfg +
dev->ver->regs[PMIC_ARB_PROTOCOL_IRQ_STATUS]);
}
static int pmic_arb_wait_for_done(struct spmi_pmic_arb_dev *dev,
void __iomem *base, u8 sid, u16 addr)
{
u32 status = 0;
u32 timeout = PMIC_ARB_TIMEOUT_US;
u32 offset = dev->ver->chnl_ofst(dev, sid, addr) + PMIC_ARB_STATUS;
static const char * const diag_msg_fmt =
"wait_for_done: %s status:0x%x sid:%d addr:0x%x\n";
while (timeout--) {
status = readl_relaxed(base + offset);
if (status & PMIC_ARB_STATUS_DONE) {
if (status & PMIC_ARB_STATUS_DENIED) {
dev_err(dev->dev, diag_msg_fmt,
"transaction denied by SPMI master "
"(peripheral not owned by apps)",
status, sid, addr);
return -EPERM;
}
if (status & PMIC_ARB_STATUS_FAILURE) {
dev_err(dev->dev, diag_msg_fmt,
"failed (possible parity-error due to noisy"
"bus or access to nonexistent peripheral)",
status, sid, addr);
return -EIO;
}
if (status & PMIC_ARB_STATUS_DROPPED) {
dev_err(dev->dev, diag_msg_fmt,
"transaction dropped pmic-arb busy",
status, sid, addr);
return -EBUSY;
}
return 0;
};
udelay(1);
}
dev_err(dev->dev, diag_msg_fmt, "timeout", status, sid, addr);
return -ETIMEDOUT;
}
/**
* pa_read_data: reads pmic-arb's register and copy 1..4 bytes to buf
* @bc byte count -1. range: 0..3
* @reg register's address
* @buf output parameter, length must be bc+1
*/
static void pa_read_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
{
u32 data = pmic_arb_read(dev, reg);
memcpy(buf, &data, (bc & 3) + 1);
}
/**
* pa_write_data: write 1..4 bytes from buf to pmic-arb's register
* @bc byte-count -1. range: 0..3
* @reg register's address
* @buf buffer to write. length must be bc+1
*/
static void
pa_write_data(struct spmi_pmic_arb_dev *dev, u8 *buf, u32 reg, u8 bc)
{
u32 data = 0;
memcpy(&data, buf, (bc & 3) + 1);
pmic_arb_write(dev, reg, data);
}
static void pmic_arb_dbg_err_dump(struct spmi_pmic_arb_dev *pmic_arb, int ret,
const char *msg, u8 opc, u8 sid, u16 addr, u8 bc, u8 *buf)
{
u32 irq_stat = readl_relaxed(pmic_arb->cnfg +
pmic_arb->ver->regs[PMIC_ARB_PROTOCOL_IRQ_STATUS]);
u32 geni_stat = readl_relaxed(pmic_arb->cnfg +
pmic_arb->ver->regs[PMIC_ARB_GENI_STATUS]);
u32 geni_ctrl = readl_relaxed(pmic_arb->cnfg +
pmic_arb->ver->regs[PMIC_ARB_GENI_CTRL]);
bc += 1; /* actual byte count */
if (buf)
dev_err(pmic_arb->dev,
"error:%d on data %s opcode:0x%x sid:%d addr:0x%x bc:%d buf:%*phC\n",
ret, msg, opc, sid, addr, bc, bc, buf);
else
dev_err(pmic_arb->dev,
"error:%d on non-data-cmd opcode:0x%x sid:%d\n",
ret, opc, sid);
dev_err(pmic_arb->dev,
"PROTOCOL_IRQ_STATUS before:0x%x after:0x%x GENI_STATUS:0x%x GENI_CTRL:0x%x\n",
irq_stat, pmic_arb->prev_prtcl_irq_stat, geni_stat, geni_ctrl);
}
static int
pmic_arb_non_data_cmd_v1(struct spmi_pmic_arb_dev *pmic_arb, u8 opc, u8 sid)
{
unsigned long flags;
u32 cmd;
int rc;
/* sid and addr are don't-care for pmic_arb_chnl_ofst_v1() HW-v1 */
phys_addr_t chnl_ofst = pmic_arb_chnl_ofst_v1(pmic_arb, 0, 0);
opc -= SPMI_CMD_RESET - PMIC_ARB_OP_RESET;
cmd = (opc << 27) | ((sid & 0xf) << 20);
spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_save_stat_before_txn(pmic_arb);
pmic_arb_write(pmic_arb, chnl_ofst + PMIC_ARB_CMD, cmd);
/* sid and addr are don't-care for pmic_arb_wait_for_done() HW-v1 */
rc = pmic_arb_wait_for_done(pmic_arb, pmic_arb->wrbase, 0, 0);
spin_unlock_irqrestore(&pmic_arb->lock, flags);
if (rc)
pmic_arb_dbg_err_dump(pmic_arb, rc, "cmd", opc, sid, 0, 0, 0);
return rc;
}
/*
* currently unsupported by HW
*/
static int
pmic_arb_non_data_cmd_v2(struct spmi_pmic_arb_dev *pmic_arb, u8 opc, u8 sid)
{
return -EOPNOTSUPP;
}
static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
{
struct spmi_pmic_arb_dev *pmic_arb = spmi_get_ctrldata(ctrl);
pr_debug("op:0x%x sid:%d\n", opc, sid);
/* Check for valid non-data command */
if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
return -EINVAL;
return pmic_arb->ver->non_data_cmd(pmic_arb, opc, sid);
}
static const struct spmi_pmic_arb_ver spmi_pmic_arb_v1 = {
.non_data_cmd = pmic_arb_non_data_cmd_v1,
.chnl_ofst = pmic_arb_chnl_ofst_v1,
.fmt_cmd = pmic_arb_fmt_cmd_v1,
.owner_acc_status = pmic_arb_owner_acc_status_v1,
.acc_enable = pmic_arb_acc_enable_v1,
.irq_status = pmic_arb_irq_status_v1,
.irq_clear = pmic_arb_irq_clear_v1,
.regs = pmic_arb_regs_v1,
};
static const struct spmi_pmic_arb_ver spmi_pmic_arb_v2 = {
.non_data_cmd = pmic_arb_non_data_cmd_v2,
.chnl_ofst = pmic_arb_chnl_ofst_v2,
.fmt_cmd = pmic_arb_fmt_cmd_v2,
.owner_acc_status = pmic_arb_owner_acc_status_v2,
.acc_enable = pmic_arb_acc_enable_v2,
.irq_status = pmic_arb_irq_status_v2,
.irq_clear = pmic_arb_irq_clear_v2,
.regs = pmic_arb_regs_v2,
};
static int pmic_arb_read_cmd(struct spmi_controller *ctrl,
u8 opc, u8 sid, u16 addr, u8 bc, u8 *buf)
{
struct spmi_pmic_arb_dev *pmic_arb = spmi_get_ctrldata(ctrl);
unsigned long flags;
u32 cmd;
int rc;
phys_addr_t chnl_ofst = pmic_arb->ver->chnl_ofst(pmic_arb, sid, addr);
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(pmic_arb->dev
, "pmic-arb supports 1..%d bytes per trans, but:%d requested"
, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
return -EINVAL;
}
dev_dbg(pmic_arb->dev, "client-rd op:0x%x sid:%d addr:0x%x bc:%d\n",
opc, sid, addr, bc + 1);
/* Check the opcode */
if (opc >= 0x60 && opc <= 0x7F)
opc = PMIC_ARB_OP_READ;
else if (opc >= 0x20 && opc <= 0x2F)
opc = PMIC_ARB_OP_EXT_READ;
else if (opc >= 0x38 && opc <= 0x3F)
opc = PMIC_ARB_OP_EXT_READL;
else
return -EINVAL;
cmd = pmic_arb->ver->fmt_cmd(opc, sid, addr, bc);
spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_save_stat_before_txn(pmic_arb);
pmic_arb_set_rd_cmd(pmic_arb, chnl_ofst + PMIC_ARB_CMD, cmd);
rc = pmic_arb_wait_for_done(pmic_arb, pmic_arb->rdbase, sid, addr);
if (rc)
goto done;
/* Read from FIFO, note 'bc' is actually number of bytes minus 1 */
pa_read_data(pmic_arb, buf, chnl_ofst + PMIC_ARB_RDATA0,
min_t(u8, bc, 3));
if (bc > 3)
pa_read_data(pmic_arb, buf + 4,
chnl_ofst + PMIC_ARB_RDATA1, bc - 4);
done:
spin_unlock_irqrestore(&pmic_arb->lock, flags);
if (rc)
pmic_arb_dbg_err_dump(pmic_arb, rc, "read", opc, sid, addr, bc,
buf);
return rc;
}
static int pmic_arb_write_cmd(struct spmi_controller *ctrl,
u8 opc, u8 sid, u16 addr, u8 bc, u8 *buf)
{
struct spmi_pmic_arb_dev *pmic_arb = spmi_get_ctrldata(ctrl);
unsigned long flags;
u32 cmd;
int rc;
phys_addr_t chnl_ofst = pmic_arb->ver->chnl_ofst(pmic_arb, sid, addr);
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
dev_err(pmic_arb->dev
, "pmic-arb supports 1..%d bytes per trans, but:%d requested"
, PMIC_ARB_MAX_TRANS_BYTES, bc+1);
return -EINVAL;
}
dev_dbg(pmic_arb->dev, "client-wr op:0x%x sid:%d addr:0x%x bc:%d\n",
opc, sid, addr, bc + 1);
/* Check the opcode */
if (opc >= 0x40 && opc <= 0x5F)
opc = PMIC_ARB_OP_WRITE;
else if (opc >= 0x00 && opc <= 0x0F)
opc = PMIC_ARB_OP_EXT_WRITE;
else if (opc >= 0x30 && opc <= 0x37)
opc = PMIC_ARB_OP_EXT_WRITEL;
else if (opc >= 0x80 && opc <= 0xFF)
opc = PMIC_ARB_OP_ZERO_WRITE;
else
return -EINVAL;
cmd = pmic_arb->ver->fmt_cmd(opc, sid, addr, bc);
/* Write data to FIFOs */
spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_save_stat_before_txn(pmic_arb);
pa_write_data(pmic_arb, buf, chnl_ofst + PMIC_ARB_WDATA0,
min_t(u8, bc, 3));
if (bc > 3)
pa_write_data(pmic_arb, buf + 4,
chnl_ofst + PMIC_ARB_WDATA1, bc - 4);
/* Start the transaction */
pmic_arb_write(pmic_arb, chnl_ofst + PMIC_ARB_CMD, cmd);
rc = pmic_arb_wait_for_done(pmic_arb, pmic_arb->wrbase, sid, addr);
spin_unlock_irqrestore(&pmic_arb->lock, flags);
if (rc)
pmic_arb_dbg_err_dump(pmic_arb, rc, "write", opc, sid, addr, bc,
buf);
return rc;
}
/* APID to PPID */
static u16 get_peripheral_id(struct spmi_pmic_arb_dev *pmic_arb, u8 apid)
{
return pmic_arb->periph_id_map[apid] & PMIC_ARB_PPID_MASK;
}
/* APID to PPID, returns valid flag */
static int is_apid_valid(struct spmi_pmic_arb_dev *pmic_arb, u8 apid)
{
return pmic_arb->periph_id_map[apid] & PMIC_ARB_PERIPH_ID_VALID;
}
static u32 search_mapping_table(struct spmi_pmic_arb_dev *pmic_arb, u16 ppid)
{
u32 *mapping_table = pmic_arb->mapping_table;
u32 apid = PMIC_ARB_MAX_PERIPHS;
int index = 0;
u32 data;
int i;
for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
data = mapping_table[index];
if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
if (SPMI_MAPPING_BIT_IS_1_FLAG(data)) {
index = SPMI_MAPPING_BIT_IS_1_RESULT(data);
} else {
apid = SPMI_MAPPING_BIT_IS_1_RESULT(data);
break;
}
} else {
if (SPMI_MAPPING_BIT_IS_0_FLAG(data)) {
index = SPMI_MAPPING_BIT_IS_0_RESULT(data);
} else {
apid = SPMI_MAPPING_BIT_IS_0_RESULT(data);
break;
}
}
}
return apid;
}
static void dbg_dump_bad_irq_request(struct spmi_pmic_arb_dev *pmic_arb,
u8 apid, u16 ppid, const char *msg)
{
dev_err(pmic_arb->dev, "bad request: %s APID:0x%02x PPID:0x%03x\n",
msg, apid, ppid);
/* dump the stack to trace the caller */
dump_stack();
dev_info(pmic_arb->dev, "APID => PPID mapping table:\n");
for (apid = pmic_arb->min_intr_apid;
apid <= pmic_arb->max_intr_apid; ++apid)
if (is_apid_valid(pmic_arb, apid))
dev_info(pmic_arb->dev, "0x%02x => 0x%03x\n", apid,
get_peripheral_id(pmic_arb, apid));
}
/* PPID to APID */
static uint32_t map_peripheral_id(struct spmi_pmic_arb_dev *pmic_arb, u16 ppid)
{
u32 apid = search_mapping_table(pmic_arb, ppid);
u32 old_ppid;
u32 owner;
/* If the apid was found, add it to the lookup table */
if (apid < PMIC_ARB_MAX_PERIPHS) {
old_ppid = get_peripheral_id(pmic_arb, apid);
owner = SPMI_OWNERSHIP_PERIPH2OWNER(
readl_relaxed(pmic_arb->cnfg +
SPMI_OWNERSHIP_TABLE_REG(apid)));
/* Check ownership */
if (owner != pmic_arb->ee) {
dev_err(pmic_arb->dev, "PPID 0x%x incorrect owner %d\n",
ppid, owner);
return PMIC_ARB_MAX_PERIPHS;
}
/* Check if already mapped */
if (pmic_arb->periph_id_map[apid] & PMIC_ARB_PERIPH_ID_VALID) {
if (ppid != old_ppid) {
dbg_dump_bad_irq_request(pmic_arb, apid, ppid,
"map irq: apid already mapped");
return PMIC_ARB_MAX_PERIPHS;
}
return apid;
}
pmic_arb->periph_id_map[apid] = ppid | PMIC_ARB_PERIPH_ID_VALID;
if ((apid < pmic_arb->max_periph_intrs)
&& (apid > pmic_arb->max_intr_apid))
pmic_arb->max_intr_apid = apid;
if (apid < pmic_arb->min_intr_apid)
pmic_arb->min_intr_apid = apid;
return apid;
}
dev_err(pmic_arb->dev, "Unknown ppid 0x%x\n", ppid);
return PMIC_ARB_MAX_PERIPHS;
}
/*
* pmic_arb_pic_enable: Enable interrupt at the PMIC Arbiter PIC
*
* This function is a callback of request_irq(a PMIC interrupt #).
*/
static int pmic_arb_pic_enable(struct spmi_controller *ctrl,
struct qpnp_irq_spec *spec, uint32_t data)
{
struct spmi_pmic_arb_dev *pmic_arb = spmi_get_ctrldata(ctrl);
u8 apid = data & PMIC_ARB_APID_MASK;
unsigned long flags;
u32 status;
dev_dbg(pmic_arb->dev, "PIC enable, apid:0x%x, sid:0x%x, pid:0x%x\n",
apid, spec->slave, spec->per);
if ((apid < pmic_arb->min_intr_apid) ||
(apid > pmic_arb->max_intr_apid) ||
(!is_apid_valid(pmic_arb, apid))) {
dbg_dump_bad_irq_request(pmic_arb, apid,
PMIC_ARB_TO_PPID(spec->slave, spec->per),
"enable irq: invalid apid");
return -EINVAL;
}
spin_lock_irqsave(&pmic_arb->lock, flags);
status = spmi_pic_acc_en_rd(pmic_arb, spec->slave, spec->per, apid,
"pic-en");
if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
status = status | SPMI_PIC_ACC_ENABLE_BIT;
spmi_pic_acc_en_wr(pmic_arb, status, spec->slave, spec->per,
apid, "pic-en");
/* Interrupt needs to be enabled before returning to caller */
wmb();
}
spin_unlock_irqrestore(&pmic_arb->lock, flags);
return 0;
}
/*
* pmic_arb_pic_disable: Disable interrupt at the PMIC Arbiter PIC
*
* This function is a callback of free_irq(a PMIC interrupt #).
*/
static int pmic_arb_pic_disable(struct spmi_controller *ctrl,
struct qpnp_irq_spec *spec, uint32_t data)
{
struct spmi_pmic_arb_dev *pmic_arb = spmi_get_ctrldata(ctrl);
u8 apid = data & PMIC_ARB_APID_MASK;
unsigned long flags;
u32 status;
dev_dbg(pmic_arb->dev, "PIC disable, apid:0x%x, sid:0x%x, pid:0x%x\n",
apid, spec->slave, spec->per);
if ((apid < pmic_arb->min_intr_apid) ||
(apid > pmic_arb->max_intr_apid) ||
(!is_apid_valid(pmic_arb, apid))) {
dbg_dump_bad_irq_request(pmic_arb, apid,
PMIC_ARB_TO_PPID(spec->slave, spec->per),
"disable irq: invalid apid");
return -EINVAL;
}
spin_lock_irqsave(&pmic_arb->lock, flags);
status = spmi_pic_acc_en_rd(pmic_arb, spec->slave, spec->per, apid,
"pic-en");
if (status & SPMI_PIC_ACC_ENABLE_BIT) {
/* clear the enable bit and write */
status = status & ~SPMI_PIC_ACC_ENABLE_BIT;
spmi_pic_acc_en_wr(pmic_arb, status, spec->slave, spec->per,
apid, "pic-en");
/* Interrupt needs to be disabled before returning to caller */
wmb();
}
spin_unlock_irqrestore(&pmic_arb->lock, flags);
return 0;
}
static irqreturn_t
periph_interrupt(struct spmi_pmic_arb_dev *pmic_arb, u8 apid, bool show)
{
u16 ppid = get_peripheral_id(pmic_arb, apid);
void __iomem *intr = pmic_arb->intr;
u8 sid = (ppid >> 8) & 0x0F;
u8 pid = ppid & 0xFF;
u32 status;
int i;
if (!is_apid_valid(pmic_arb, apid)) {
dev_err(pmic_arb->dev,
"periph_interrupt(apid:0x%x sid:0x%x pid:0x%x) unknown peripheral\n",
apid, sid, pid);
/* return IRQ_NONE; */
}
status = spmi_pic_acc_en_rd(pmic_arb, sid, pid, apid, "isr");
if (!(status & SPMI_PIC_ACC_ENABLE_BIT)) {
/*
* All interrupts from this peripheral are disabled
* don't bother calling the qpnpint handler
*/
return IRQ_HANDLED;
}
/* Read the peripheral specific interrupt bits */
status = readl_relaxed(intr + pmic_arb->ver->irq_status(apid));
if (!show) {
/* Clear the peripheral interrupts */
writel_relaxed(status, intr + pmic_arb->ver->irq_clear(apid));
/* Irq needs to be cleared/acknowledged before exiting ISR */
mb();
}
dev_dbg(pmic_arb->dev,
"interrupt, apid:0x%x, sid:0x%x, pid:0x%x, intr:0x%x\n",
apid, sid, pid, status);
/* Send interrupt notification */
for (i = 0; status && i < 8; ++i, status >>= 1) {
if (status & 0x1) {
struct qpnp_irq_spec irq_spec = {
.slave = sid,
.per = pid,
.irq = i,
};
if (show)
qpnpint_show_irq(&pmic_arb->controller,
&irq_spec);
else
qpnpint_handle_irq(&pmic_arb->controller,
&irq_spec);
}
}
return IRQ_HANDLED;
}
/* Peripheral interrupt handler */
static irqreturn_t
__pmic_arb_periph_irq(int irq, void *dev_id, bool show)
{
struct spmi_pmic_arb_dev *pmic_arb = dev_id;
u8 ee = pmic_arb->ee;
u32 ret = IRQ_NONE;
u32 status;
int first = pmic_arb->min_intr_apid >> 5;
int last = pmic_arb->max_intr_apid >> 5;
int i, j;
dev_dbg(pmic_arb->dev, "Peripheral interrupt detected\n");
/* Check the accumulated interrupt status */
for (i = first; i <= last; ++i) {
status = readl_relaxed(pmic_arb->intr +
pmic_arb->ver->owner_acc_status(ee, i));
if ((i == 0) && (status & pmic_arb->irq_acc0_init_val)) {
dev_dbg(pmic_arb->dev, "Ignoring IRQ acc[0] mask:0x%x\n",
status & pmic_arb->irq_acc0_init_val);
status &= ~pmic_arb->irq_acc0_init_val;
}
for (j = 0; status && j < 32; ++j, status >>= 1) {
if (status & 0x1) {
u8 id = (i * 32) + j;
ret |= periph_interrupt(pmic_arb, id, show);
}
}
}
return ret;
}
static irqreturn_t pmic_arb_periph_irq(int irq, void *dev_id)
{
return __pmic_arb_periph_irq(irq, dev_id, false);
}
static void spmi_pmic_arb_resume(void)
{
if (qpnpint_show_resume_irq())
__pmic_arb_periph_irq(the_pmic_arb->pic_irq,
the_pmic_arb, true);
}
static struct syscore_ops spmi_pmic_arb_syscore_ops = {
.resume = spmi_pmic_arb_resume,
};
/* Callback to register an APID for specific slave/peripheral */
static int pmic_arb_intr_priv_data(struct spmi_controller *ctrl,
struct qpnp_irq_spec *spec, uint32_t *data)
{
struct spmi_pmic_arb_dev *pmic_arb = spmi_get_ctrldata(ctrl);
u16 ppid = ((spec->slave & 0x0F) << 8) | (spec->per & 0xFF);
*data = map_peripheral_id(pmic_arb, ppid);
return 0;
}
static int pmic_arb_mapping_data_show(struct seq_file *file, void *unused)
{
struct spmi_pmic_arb_dev *pmic_arb = file->private;
int first = pmic_arb->min_intr_apid;
int last = pmic_arb->max_intr_apid;
int i;
for (i = first; i <= last; ++i) {
if (!is_apid_valid(pmic_arb, i))
continue;
seq_printf(file, "APID 0x%.2x = PPID 0x%.3x. Enabled:%d\n",
i, get_peripheral_id(pmic_arb, i),
readl_relaxed(pmic_arb->intr +
pmic_arb->ver->acc_enable(i)));
}
return 0;
}
static int pmic_arb_mapping_data_open(struct inode *inode, struct file *file)
{
return single_open(file, pmic_arb_mapping_data_show, inode->i_private);
}
static const struct file_operations pmic_arb_dfs_fops = {
.open = pmic_arb_mapping_data_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/* mask interrupts that are stack at boot time */
static void pmic_arb_handle_stuck_irqs(struct spmi_pmic_arb_dev *pmic_arb)
{
int apid;
/* we only saw the firt 32bit accumulator get currupted at boot */
pmic_arb->irq_acc0_init_val = readl_relaxed(pmic_arb->intr +
pmic_arb->ver->owner_acc_status(pmic_arb->ee, 0));
if (!pmic_arb->irq_acc0_init_val)
return;
dev_err(pmic_arb->dev, "non-zero irq-accumulator[0]:0x%x\n",
pmic_arb->irq_acc0_init_val);
for (apid = 0; apid < 32 ; ++apid) {
u32 mask = BIT(apid);
if (pmic_arb->irq_acc0_init_val & mask) {
u32 owner = SPMI_OWNERSHIP_PERIPH2OWNER(
readl_relaxed(pmic_arb->cnfg +
SPMI_OWNERSHIP_TABLE_REG(apid)));
/* don't mask interrupts that we own */
if (owner == pmic_arb->ee)
pmic_arb->irq_acc0_init_val &= ~mask;
}
}
}
static int
spmi_pmic_arb_get_property(struct platform_device *pdev, char *pname, u32 *prop)
{
int ret = of_property_read_u32(pdev->dev.of_node, pname, prop);
if (ret)
dev_err(&pdev->dev, "missing property: %s\n", pname);
else
pr_debug("%s = 0x%x\n", pname, *prop);
return ret;
}
static struct qpnp_local_int spmi_pmic_arb_intr_cb = {
.mask = pmic_arb_pic_disable,
.unmask = pmic_arb_pic_enable,
.register_priv_data = pmic_arb_intr_priv_data,
};
static int pmic_arb_chnl_tbl_create(struct spmi_pmic_arb_dev *pmic_arb)
{
u16 chnl;
/* size: 12bit entries = 4bit SID + 8bit periph ID */
u32 tbl_sz = (1 << 12);
pmic_arb->ppid_2_chnl_tbl = devm_kzalloc(pmic_arb->dev, tbl_sz,
GFP_KERNEL);
if (!pmic_arb->ppid_2_chnl_tbl) {
dev_err(pmic_arb->dev,
"cannot allocate pmic_arb channel table\n");
return -ENOMEM;
}
/*
* The PMIC_ARB_REG_CHNL registers are a table mapping channel number
* to SID + PID (PPID). We create an invert of that table here for
* optimization of mapping SID+PID to channel number.
*/
for (chnl = 0; chnl < pmic_arb->max_peripherals; ++chnl) {
u32 regval = readl_relaxed(pmic_arb->base +
PMIC_ARB_REG_CHNL(chnl));
u8 sid = (regval >> 16) & 0xF;
u8 pid = (regval >> 8) & 0xFF;
if (!regval)
continue;
PMIC_ARB_CHNL(pmic_arb, sid, pid) = chnl;
}
return 0;
}
/*
* pmic_arb_devm_ioremap: get resource and ioremap it
*
* @res_name name of resource
* @virt input parameter, will be set with the resources mapped virtual adderss
* @phys input parameter, if not null, will be set to the resources physical
* address. If null, no-op.
*/
static int pmic_arb_devm_ioremap(struct platform_device *pdev,
const char *res_name, void __iomem **virt, phys_addr_t *phys)
{
struct resource *mem_res =
platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
if (!mem_res) {
dev_err(&pdev->dev, "error missing config of %s reg-space\n",
res_name);
return -ENODEV;
}
*virt = devm_ioremap(&pdev->dev, mem_res->start,
resource_size(mem_res));
dev_dbg(&pdev->dev,
"%s ioremap(phy:0x%lx vir:0x%p len:0x%lx)\n", res_name,
(ulong) mem_res->start, *virt, (ulong) resource_size(mem_res));
if (!(*virt)) {
dev_err(&pdev->dev,
"error %s ioremap(phy:0x%lx len:0x%lx) failed\n",
res_name, (ulong) mem_res->start,
(ulong) resource_size(mem_res));
return -ENOMEM;
}
if (phys)
*phys = mem_res->start;
return 0;
}
static int pmic_arb_version_specific_init(struct spmi_pmic_arb_dev *pmic_arb,
struct platform_device *pdev)
{
int ret;
u32 version;
version = readl_relaxed(pmic_arb->base + PMIC_ARB_VERSION);
if (version < PMIC_ARB_V2_MIN) {
dev_err(&pdev->dev, "PMIC Arb Version-1 0x%x\n", version);
pmic_arb->rdbase = pmic_arb->base;
pmic_arb->wrbase = pmic_arb->base;
pmic_arb->dbg.rdbase_phy = pmic_arb->dbg.base_phy;
pmic_arb->dbg.wrbase_phy = pmic_arb->dbg.base_phy;
pmic_arb->ver = &spmi_pmic_arb_v1;
} else {
dev_err(&pdev->dev, "PMIC Arb Version-2 0x%x\n", version);
ret = pmic_arb_chnl_tbl_create(pmic_arb);
if (ret)
return ret;
ret = pmic_arb_devm_ioremap(pdev, "obsrvr", &pmic_arb->rdbase,
&pmic_arb->dbg.rdbase_phy);
if (ret)
return ret;
ret = pmic_arb_devm_ioremap(pdev, "chnls", &pmic_arb->wrbase,
&pmic_arb->dbg.wrbase_phy);
if (ret)
return ret;
pmic_arb->ver = &spmi_pmic_arb_v2;
}
return 0;
}
static int spmi_pmic_arb_probe(struct platform_device *pdev)
{
struct spmi_pmic_arb_dev *pmic_arb;
u32 cell_index;
u32 prop;
int ret = 0;
int i;
pr_debug("SPMI PMIC Arbiter\n");
pmic_arb = devm_kzalloc(&pdev->dev,
sizeof(struct spmi_pmic_arb_dev), GFP_KERNEL);
if (!pmic_arb) {
dev_err(&pdev->dev, "can not allocate pmic_arb data\n");
return -ENOMEM;
}
pmic_arb->dev = &pdev->dev;
ret = pmic_arb_devm_ioremap(pdev, "core", &pmic_arb->base,
&pmic_arb->dbg.base_phy);
if (ret)
return ret;
ret = spmi_pmic_arb_get_property(pdev, "qcom,pmic-arb-max-peripherals",
&prop);
if (ret)
prop = PMIC_ARB_PERIPHS_CHNL_DEFAULT;
pmic_arb->max_peripherals = prop;
ret = pmic_arb_version_specific_init(pmic_arb, pdev);
if (ret)
return ret;
ret = pmic_arb_devm_ioremap(pdev, "intr", &pmic_arb->intr,
&pmic_arb->dbg.intr_phy);
if (ret)
return ret;
ret = pmic_arb_devm_ioremap(pdev, "cnfg", &pmic_arb->cnfg, NULL);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(pmic_arb->mapping_table); ++i)
pmic_arb->mapping_table[i] = readl_relaxed(
pmic_arb->cnfg + SPMI_MAPPING_TABLE_REG(i));
pmic_arb->pic_irq = platform_get_irq(pdev, 0);
if (!pmic_arb->pic_irq) {
dev_err(&pdev->dev, "missing IRQ resource\n");
return -ENODEV;
}
/* Get properties from the device tree */
ret = spmi_pmic_arb_get_property(pdev, "cell-index", &cell_index);
if (ret)
return -ENODEV;
ret = spmi_pmic_arb_get_property(pdev, "qcom,pmic-arb-ee", &prop);
if (ret)
return -ENODEV;
pmic_arb->ee = (u8)prop;
ret = spmi_pmic_arb_get_property(pdev, "qcom,pmic-arb-channel",
&prop);
if (ret)
return -ENODEV;
pmic_arb->channel = (u8)prop;
pmic_arb->allow_wakeup = !of_property_read_bool(pdev->dev.of_node,
"qcom,not-wakeup");
if (pmic_arb->allow_wakeup) {
ret = irq_set_irq_wake(pmic_arb->pic_irq, 1);
if (unlikely(ret)) {
pr_err("Unable to set wakeup irq, err=%d\n", ret);
return -ENODEV;
}
}
ret = spmi_pmic_arb_get_property(pdev,
"qcom,pmic-arb-max-periph-interrupts", &prop);
if (ret)
prop = PMIC_ARB_PERIPHS_INTR_DEFAULT;
pmic_arb->max_periph_intrs = prop;
pmic_arb->max_intr_apid = 0;
pmic_arb->min_intr_apid = PMIC_ARB_MAX_PERIPHS - 1;
platform_set_drvdata(pdev, pmic_arb);
spmi_set_ctrldata(&pmic_arb->controller, pmic_arb);
spin_lock_init(&pmic_arb->lock);
pmic_arb->controller.nr = cell_index;
pmic_arb->controller.dev.parent = pdev->dev.parent;
pmic_arb->controller.dev.of_node = of_node_get(pdev->dev.of_node);
pmic_arb_handle_stuck_irqs(pmic_arb);
/* Callbacks */
pmic_arb->controller.cmd = pmic_arb_cmd;
pmic_arb->controller.read_cmd = pmic_arb_read_cmd;
pmic_arb->controller.write_cmd = pmic_arb_write_cmd;
ret = devm_request_irq(&pdev->dev, pmic_arb->pic_irq,
pmic_arb_periph_irq, IRQF_TRIGGER_HIGH, pdev->name, pmic_arb);
if (ret) {
dev_err(&pdev->dev, "request IRQ failed\n");
return ret;
}
ret = spmi_add_controller(&pmic_arb->controller);
if (ret)
goto err_add_controller;
/* Register the interrupt enable/disable functions */
ret = qpnpint_register_controller(pmic_arb->controller.dev.of_node,
&pmic_arb->controller,
&spmi_pmic_arb_intr_cb);
if (ret) {
dev_err(&pdev->dev, "Unable to register controller %d\n",
cell_index);
goto err_reg_controller;
}
/* Register device(s) from the device tree */
of_spmi_register_devices(&pmic_arb->controller);
/* Add debugfs file for mapping data */
if (spmi_dfs_create_file(&pmic_arb->controller, "mapping",
pmic_arb, &pmic_arb_dfs_fops) == NULL)
dev_err(&pdev->dev, "error creating 'mapping' debugfs file\n");
the_pmic_arb = pmic_arb;
register_syscore_ops(&spmi_pmic_arb_syscore_ops);
return 0;
err_reg_controller:
spmi_del_controller(&pmic_arb->controller);
err_add_controller:
platform_set_drvdata(pdev, NULL);
if (pmic_arb->allow_wakeup)
irq_set_irq_wake(pmic_arb->pic_irq, 0);
return ret;
}
static int spmi_pmic_arb_remove(struct platform_device *pdev)
{
struct spmi_pmic_arb_dev *pmic_arb = platform_get_drvdata(pdev);
int ret;
ret = qpnpint_unregister_controller(pmic_arb->controller.dev.of_node);
if (ret)
dev_err(&pdev->dev, "Unable to unregister controller %d\n",
pmic_arb->controller.nr);
if (pmic_arb->allow_wakeup)
irq_set_irq_wake(pmic_arb->pic_irq, 0);
platform_set_drvdata(pdev, NULL);
spmi_del_controller(&pmic_arb->controller);
return ret;
}
static struct of_device_id spmi_pmic_arb_match_table[] = {
{ .compatible = "qcom,spmi-pmic-arb",
},
{}
};
static struct platform_driver spmi_pmic_arb_driver = {
.probe = spmi_pmic_arb_probe,
.remove = spmi_pmic_arb_remove,
.driver = {
.name = SPMI_PMIC_ARB_NAME,
.owner = THIS_MODULE,
.of_match_table = spmi_pmic_arb_match_table,
},
};
static int __init spmi_pmic_arb_init(void)
{
return platform_driver_register(&spmi_pmic_arb_driver);
}
postcore_initcall(spmi_pmic_arb_init);
static void __exit spmi_pmic_arb_exit(void)
{
platform_driver_unregister(&spmi_pmic_arb_driver);
}
module_exit(spmi_pmic_arb_exit);
MODULE_LICENSE("GPL v2");
MODULE_VERSION("1.0");
MODULE_ALIAS("platform:spmi_pmic_arb");
| gpl-2.0 |
shardul-seth/PAC-man_semc-kernel-msm7x30 | arch/arm/plat-s3c/pm.c | 479 | 8918 | /* linux/arch/arm/plat-s3c/pm.c
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2004,2006,2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* S3C common power management (suspend to ram) support.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <asm/cacheflush.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <plat/regs-serial.h>
#include <mach/regs-clock.h>
#include <mach/regs-irq.h>
#include <asm/irq.h>
#include <plat/pm.h>
#include <plat/pm-core.h>
/* for external use */
unsigned long s3c_pm_flags;
/* Debug code:
*
* This code supports debug output to the low level UARTs for use on
* resume before the console layer is available.
*/
#ifdef CONFIG_S3C2410_PM_DEBUG
extern void printascii(const char *);
void s3c_pm_dbg(const char *fmt, ...)
{
va_list va;
char buff[256];
va_start(va, fmt);
vsprintf(buff, fmt, va);
va_end(va);
printascii(buff);
}
static inline void s3c_pm_debug_init(void)
{
/* restart uart clocks so we can use them to output */
s3c_pm_debug_init_uart();
}
#else
#define s3c_pm_debug_init() do { } while(0)
#endif /* CONFIG_S3C2410_PM_DEBUG */
/* Save the UART configurations if we are configured for debug. */
unsigned char pm_uart_udivslot;
#ifdef CONFIG_S3C2410_PM_DEBUG
struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
{
void __iomem *regs = S3C_VA_UARTx(uart);
save->ulcon = __raw_readl(regs + S3C2410_ULCON);
save->ucon = __raw_readl(regs + S3C2410_UCON);
save->ufcon = __raw_readl(regs + S3C2410_UFCON);
save->umcon = __raw_readl(regs + S3C2410_UMCON);
save->ubrdiv = __raw_readl(regs + S3C2410_UBRDIV);
if (pm_uart_udivslot)
save->udivslot = __raw_readl(regs + S3C2443_DIVSLOT);
S3C_PMDBG("UART[%d]: ULCON=%04x, UCON=%04x, UFCON=%04x, UBRDIV=%04x\n",
uart, save->ulcon, save->ucon, save->ufcon, save->ubrdiv);
}
static void s3c_pm_save_uarts(void)
{
struct pm_uart_save *save = uart_save;
unsigned int uart;
for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
s3c_pm_save_uart(uart, save);
}
static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
{
void __iomem *regs = S3C_VA_UARTx(uart);
s3c_pm_arch_update_uart(regs, save);
__raw_writel(save->ulcon, regs + S3C2410_ULCON);
__raw_writel(save->ucon, regs + S3C2410_UCON);
__raw_writel(save->ufcon, regs + S3C2410_UFCON);
__raw_writel(save->umcon, regs + S3C2410_UMCON);
__raw_writel(save->ubrdiv, regs + S3C2410_UBRDIV);
if (pm_uart_udivslot)
__raw_writel(save->udivslot, regs + S3C2443_DIVSLOT);
}
static void s3c_pm_restore_uarts(void)
{
struct pm_uart_save *save = uart_save;
unsigned int uart;
for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
s3c_pm_restore_uart(uart, save);
}
#else
static void s3c_pm_save_uarts(void) { }
static void s3c_pm_restore_uarts(void) { }
#endif
/* The IRQ ext-int code goes here, it is too small to currently bother
* with its own file. */
unsigned long s3c_irqwake_intmask = 0xffffffffL;
unsigned long s3c_irqwake_eintmask = 0xffffffffL;
int s3c_irqext_wake(unsigned int irqno, unsigned int state)
{
unsigned long bit = 1L << IRQ_EINT_BIT(irqno);
if (!(s3c_irqwake_eintallow & bit))
return -ENOENT;
printk(KERN_INFO "wake %s for irq %d\n",
state ? "enabled" : "disabled", irqno);
if (!state)
s3c_irqwake_eintmask |= bit;
else
s3c_irqwake_eintmask &= ~bit;
return 0;
}
/* helper functions to save and restore register state */
/**
* s3c_pm_do_save() - save a set of registers for restoration on resume.
* @ptr: Pointer to an array of registers.
* @count: Size of the ptr array.
*
* Run through the list of registers given, saving their contents in the
* array for later restoration when we wakeup.
*/
void s3c_pm_do_save(struct sleep_save *ptr, int count)
{
for (; count > 0; count--, ptr++) {
ptr->val = __raw_readl(ptr->reg);
S3C_PMDBG("saved %p value %08lx\n", ptr->reg, ptr->val);
}
}
/**
* s3c_pm_do_restore() - restore register values from the save list.
* @ptr: Pointer to an array of registers.
* @count: Size of the ptr array.
*
* Restore the register values saved from s3c_pm_do_save().
*
* Note, we do not use S3C_PMDBG() in here, as the system may not have
* restore the UARTs state yet
*/
void s3c_pm_do_restore(struct sleep_save *ptr, int count)
{
for (; count > 0; count--, ptr++) {
printk(KERN_DEBUG "restore %p (restore %08lx, was %08x)\n",
ptr->reg, ptr->val, __raw_readl(ptr->reg));
__raw_writel(ptr->val, ptr->reg);
}
}
/**
* s3c_pm_do_restore_core() - early restore register values from save list.
*
* This is similar to s3c_pm_do_restore() except we try and minimise the
* side effects of the function in case registers that hardware might need
* to work has been restored.
*
* WARNING: Do not put any debug in here that may effect memory or use
* peripherals, as things may be changing!
*/
void s3c_pm_do_restore_core(struct sleep_save *ptr, int count)
{
for (; count > 0; count--, ptr++)
__raw_writel(ptr->val, ptr->reg);
}
/* s3c2410_pm_show_resume_irqs
*
* print any IRQs asserted at resume time (ie, we woke from)
*/
static void s3c_pm_show_resume_irqs(int start, unsigned long which,
unsigned long mask)
{
int i;
which &= ~mask;
for (i = 0; i <= 31; i++) {
if (which & (1L<<i)) {
S3C_PMDBG("IRQ %d asserted at resume\n", start+i);
}
}
}
void (*pm_cpu_prep)(void);
void (*pm_cpu_sleep)(void);
#define any_allowed(mask, allow) (((mask) & (allow)) != (allow))
/* s3c_pm_enter
*
* central control for sleep/resume process
*/
static int s3c_pm_enter(suspend_state_t state)
{
static unsigned long regs_save[16];
/* ensure the debug is initialised (if enabled) */
s3c_pm_debug_init();
S3C_PMDBG("%s(%d)\n", __func__, state);
if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) {
printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__);
return -EINVAL;
}
/* check if we have anything to wake-up with... bad things seem
* to happen if you suspend with no wakeup (system will often
* require a full power-cycle)
*/
if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
!any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
printk(KERN_ERR "%s: Aborting sleep\n", __func__);
return -EINVAL;
}
/* store the physical address of the register recovery block */
s3c_sleep_save_phys = virt_to_phys(regs_save);
S3C_PMDBG("s3c_sleep_save_phys=0x%08lx\n", s3c_sleep_save_phys);
/* save all necessary core registers not covered by the drivers */
s3c_pm_save_gpios();
s3c_pm_save_uarts();
s3c_pm_save_core();
/* set the irq configuration for wake */
s3c_pm_configure_extint();
S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n",
s3c_irqwake_intmask, s3c_irqwake_eintmask);
s3c_pm_arch_prepare_irqs();
/* call cpu specific preparation */
pm_cpu_prep();
/* flush cache back to ram */
flush_cache_all();
s3c_pm_check_store();
/* send the cpu to sleep... */
s3c_pm_arch_stop_clocks();
/* s3c_cpu_save will also act as our return point from when
* we resume as it saves its own register state and restores it
* during the resume. */
s3c_cpu_save(regs_save);
/* restore the cpu state using the kernel's cpu init code. */
cpu_init();
/* restore the system state */
s3c_pm_restore_core();
s3c_pm_restore_uarts();
s3c_pm_restore_gpios();
s3c_pm_debug_init();
/* check what irq (if any) restored the system */
s3c_pm_arch_show_resume_irqs();
S3C_PMDBG("%s: post sleep, preparing to return\n", __func__);
/* LEDs should now be 1110 */
s3c_pm_debug_smdkled(1 << 1, 0);
s3c_pm_check_restore();
/* ok, let's return from sleep */
S3C_PMDBG("S3C PM Resume (post-restore)\n");
return 0;
}
/* callback from assembly code */
void s3c_pm_cb_flushcache(void)
{
flush_cache_all();
}
static int s3c_pm_prepare(void)
{
/* prepare check area if configured */
s3c_pm_check_prepare();
return 0;
}
static void s3c_pm_finish(void)
{
s3c_pm_check_cleanup();
}
static struct platform_suspend_ops s3c_pm_ops = {
.enter = s3c_pm_enter,
.prepare = s3c_pm_prepare,
.finish = s3c_pm_finish,
.valid = suspend_valid_only_mem,
};
/* s3c_pm_init
*
* Attach the power management functions. This should be called
* from the board specific initialisation if the board supports
* it.
*/
int __init s3c_pm_init(void)
{
printk("S3C Power Management, Copyright 2004 Simtec Electronics\n");
suspend_set_ops(&s3c_pm_ops);
return 0;
}
| gpl-2.0 |
cleech/linux | sound/pci/oxygen/oxygen_pcm.c | 479 | 22477 | // SPDX-License-Identifier: GPL-2.0-only
/*
* C-Media CMI8788 driver - PCM code
*
* Copyright (c) Clemens Ladisch <clemens@ladisch.de>
*/
#include <linux/pci.h>
#include <sound/control.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include "oxygen.h"
/* most DMA channels have a 16-bit counter for 32-bit words */
#define BUFFER_BYTES_MAX ((1 << 16) * 4)
/* the multichannel DMA channel has a 24-bit counter */
#define BUFFER_BYTES_MAX_MULTICH ((1 << 24) * 4)
#define FIFO_BYTES 256
#define FIFO_BYTES_MULTICH 1024
#define PERIOD_BYTES_MIN 64
#define DEFAULT_BUFFER_BYTES (BUFFER_BYTES_MAX / 2)
#define DEFAULT_BUFFER_BYTES_MULTICH (1024 * 1024)
static const struct snd_pcm_hardware oxygen_stereo_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_64000 |
SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000 |
SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000,
.rate_min = 32000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = BUFFER_BYTES_MAX,
.period_bytes_min = PERIOD_BYTES_MIN,
.period_bytes_max = BUFFER_BYTES_MAX,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
.fifo_size = FIFO_BYTES,
};
static const struct snd_pcm_hardware oxygen_multichannel_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE,
.rates = SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000 |
SNDRV_PCM_RATE_64000 |
SNDRV_PCM_RATE_88200 |
SNDRV_PCM_RATE_96000 |
SNDRV_PCM_RATE_176400 |
SNDRV_PCM_RATE_192000,
.rate_min = 32000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 8,
.buffer_bytes_max = BUFFER_BYTES_MAX_MULTICH,
.period_bytes_min = PERIOD_BYTES_MIN,
.period_bytes_max = BUFFER_BYTES_MAX_MULTICH,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX_MULTICH / PERIOD_BYTES_MIN,
.fifo_size = FIFO_BYTES_MULTICH,
};
static const struct snd_pcm_hardware oxygen_ac97_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
SNDRV_PCM_INFO_SYNC_START |
SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = BUFFER_BYTES_MAX,
.period_bytes_min = PERIOD_BYTES_MIN,
.period_bytes_max = BUFFER_BYTES_MAX,
.periods_min = 1,
.periods_max = BUFFER_BYTES_MAX / PERIOD_BYTES_MIN,
.fifo_size = FIFO_BYTES,
};
static const struct snd_pcm_hardware *const oxygen_hardware[PCM_COUNT] = {
[PCM_A] = &oxygen_stereo_hardware,
[PCM_B] = &oxygen_stereo_hardware,
[PCM_C] = &oxygen_stereo_hardware,
[PCM_SPDIF] = &oxygen_stereo_hardware,
[PCM_MULTICH] = &oxygen_multichannel_hardware,
[PCM_AC97] = &oxygen_ac97_hardware,
};
static inline unsigned int
oxygen_substream_channel(struct snd_pcm_substream *substream)
{
return (unsigned int)(uintptr_t)substream->runtime->private_data;
}
static int oxygen_open(struct snd_pcm_substream *substream,
unsigned int channel)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err;
runtime->private_data = (void *)(uintptr_t)channel;
if (channel == PCM_B && chip->has_ac97_1 &&
(chip->model.device_config & CAPTURE_2_FROM_AC97_1))
runtime->hw = oxygen_ac97_hardware;
else
runtime->hw = *oxygen_hardware[channel];
switch (channel) {
case PCM_C:
if (chip->model.device_config & CAPTURE_1_FROM_SPDIF) {
runtime->hw.rates &= ~(SNDRV_PCM_RATE_32000 |
SNDRV_PCM_RATE_64000);
runtime->hw.rate_min = 44100;
}
fallthrough;
case PCM_A:
case PCM_B:
runtime->hw.fifo_size = 0;
break;
case PCM_MULTICH:
runtime->hw.channels_max = chip->model.dac_channels_pcm;
break;
}
if (chip->model.pcm_hardware_filter)
chip->model.pcm_hardware_filter(channel, &runtime->hw);
err = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32);
if (err < 0)
return err;
err = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32);
if (err < 0)
return err;
if (runtime->hw.formats & SNDRV_PCM_FMTBIT_S32_LE) {
err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
if (err < 0)
return err;
}
if (runtime->hw.channels_max > 2) {
err = snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_CHANNELS,
2);
if (err < 0)
return err;
}
snd_pcm_set_sync(substream);
chip->streams[channel] = substream;
mutex_lock(&chip->mutex);
chip->pcm_active |= 1 << channel;
if (channel == PCM_SPDIF) {
chip->spdif_pcm_bits = chip->spdif_bits;
chip->controls[CONTROL_SPDIF_PCM]->vd[0].access &=
~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO,
&chip->controls[CONTROL_SPDIF_PCM]->id);
}
mutex_unlock(&chip->mutex);
return 0;
}
static int oxygen_rec_a_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_A);
}
static int oxygen_rec_b_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_B);
}
static int oxygen_rec_c_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_C);
}
static int oxygen_spdif_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_SPDIF);
}
static int oxygen_multich_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_MULTICH);
}
static int oxygen_ac97_open(struct snd_pcm_substream *substream)
{
return oxygen_open(substream, PCM_AC97);
}
static int oxygen_close(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
unsigned int channel = oxygen_substream_channel(substream);
mutex_lock(&chip->mutex);
chip->pcm_active &= ~(1 << channel);
if (channel == PCM_SPDIF) {
chip->controls[CONTROL_SPDIF_PCM]->vd[0].access |=
SNDRV_CTL_ELEM_ACCESS_INACTIVE;
snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE |
SNDRV_CTL_EVENT_MASK_INFO,
&chip->controls[CONTROL_SPDIF_PCM]->id);
}
if (channel == PCM_SPDIF || channel == PCM_MULTICH)
oxygen_update_spdif_source(chip);
mutex_unlock(&chip->mutex);
chip->streams[channel] = NULL;
return 0;
}
static unsigned int oxygen_format(struct snd_pcm_hw_params *hw_params)
{
if (params_format(hw_params) == SNDRV_PCM_FORMAT_S32_LE)
return OXYGEN_FORMAT_24;
else
return OXYGEN_FORMAT_16;
}
static unsigned int oxygen_rate(struct snd_pcm_hw_params *hw_params)
{
switch (params_rate(hw_params)) {
case 32000:
return OXYGEN_RATE_32000;
case 44100:
return OXYGEN_RATE_44100;
default: /* 48000 */
return OXYGEN_RATE_48000;
case 64000:
return OXYGEN_RATE_64000;
case 88200:
return OXYGEN_RATE_88200;
case 96000:
return OXYGEN_RATE_96000;
case 176400:
return OXYGEN_RATE_176400;
case 192000:
return OXYGEN_RATE_192000;
}
}
static unsigned int oxygen_i2s_bits(struct snd_pcm_hw_params *hw_params)
{
if (params_format(hw_params) == SNDRV_PCM_FORMAT_S32_LE)
return OXYGEN_I2S_BITS_24;
else
return OXYGEN_I2S_BITS_16;
}
static unsigned int oxygen_play_channels(struct snd_pcm_hw_params *hw_params)
{
switch (params_channels(hw_params)) {
default: /* 2 */
return OXYGEN_PLAY_CHANNELS_2;
case 4:
return OXYGEN_PLAY_CHANNELS_4;
case 6:
return OXYGEN_PLAY_CHANNELS_6;
case 8:
return OXYGEN_PLAY_CHANNELS_8;
}
}
static const unsigned int channel_base_registers[PCM_COUNT] = {
[PCM_A] = OXYGEN_DMA_A_ADDRESS,
[PCM_B] = OXYGEN_DMA_B_ADDRESS,
[PCM_C] = OXYGEN_DMA_C_ADDRESS,
[PCM_SPDIF] = OXYGEN_DMA_SPDIF_ADDRESS,
[PCM_MULTICH] = OXYGEN_DMA_MULTICH_ADDRESS,
[PCM_AC97] = OXYGEN_DMA_AC97_ADDRESS,
};
static int oxygen_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
unsigned int channel = oxygen_substream_channel(substream);
oxygen_write32(chip, channel_base_registers[channel],
(u32)substream->runtime->dma_addr);
if (channel == PCM_MULTICH) {
oxygen_write32(chip, OXYGEN_DMA_MULTICH_COUNT,
params_buffer_bytes(hw_params) / 4 - 1);
oxygen_write32(chip, OXYGEN_DMA_MULTICH_TCOUNT,
params_period_bytes(hw_params) / 4 - 1);
} else {
oxygen_write16(chip, channel_base_registers[channel] + 4,
params_buffer_bytes(hw_params) / 4 - 1);
oxygen_write16(chip, channel_base_registers[channel] + 6,
params_period_bytes(hw_params) / 4 - 1);
}
return 0;
}
static u16 get_mclk(struct oxygen *chip, unsigned int channel,
struct snd_pcm_hw_params *params)
{
unsigned int mclks, shift;
if (channel == PCM_MULTICH)
mclks = chip->model.dac_mclks;
else
mclks = chip->model.adc_mclks;
if (params_rate(params) <= 48000)
shift = 0;
else if (params_rate(params) <= 96000)
shift = 2;
else
shift = 4;
return OXYGEN_I2S_MCLK(mclks >> shift);
}
static int oxygen_rec_a_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_REC_FORMAT,
oxygen_format(hw_params) << OXYGEN_REC_FORMAT_A_SHIFT,
OXYGEN_REC_FORMAT_A_MASK);
oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT,
oxygen_rate(hw_params) |
chip->model.adc_i2s_format |
get_mclk(chip, PCM_A, hw_params) |
oxygen_i2s_bits(hw_params),
OXYGEN_I2S_RATE_MASK |
OXYGEN_I2S_FORMAT_MASK |
OXYGEN_I2S_MCLK_MASK |
OXYGEN_I2S_BITS_MASK);
spin_unlock_irq(&chip->reg_lock);
mutex_lock(&chip->mutex);
chip->model.set_adc_params(chip, hw_params);
mutex_unlock(&chip->mutex);
return 0;
}
static int oxygen_rec_b_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int is_ac97;
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
is_ac97 = chip->has_ac97_1 &&
(chip->model.device_config & CAPTURE_2_FROM_AC97_1);
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_REC_FORMAT,
oxygen_format(hw_params) << OXYGEN_REC_FORMAT_B_SHIFT,
OXYGEN_REC_FORMAT_B_MASK);
if (!is_ac97)
oxygen_write16_masked(chip, OXYGEN_I2S_B_FORMAT,
oxygen_rate(hw_params) |
chip->model.adc_i2s_format |
get_mclk(chip, PCM_B, hw_params) |
oxygen_i2s_bits(hw_params),
OXYGEN_I2S_RATE_MASK |
OXYGEN_I2S_FORMAT_MASK |
OXYGEN_I2S_MCLK_MASK |
OXYGEN_I2S_BITS_MASK);
spin_unlock_irq(&chip->reg_lock);
if (!is_ac97) {
mutex_lock(&chip->mutex);
chip->model.set_adc_params(chip, hw_params);
mutex_unlock(&chip->mutex);
}
return 0;
}
static int oxygen_rec_c_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
bool is_spdif;
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
is_spdif = chip->model.device_config & CAPTURE_1_FROM_SPDIF;
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_REC_FORMAT,
oxygen_format(hw_params) << OXYGEN_REC_FORMAT_C_SHIFT,
OXYGEN_REC_FORMAT_C_MASK);
if (!is_spdif)
oxygen_write16_masked(chip, OXYGEN_I2S_C_FORMAT,
oxygen_rate(hw_params) |
chip->model.adc_i2s_format |
get_mclk(chip, PCM_B, hw_params) |
oxygen_i2s_bits(hw_params),
OXYGEN_I2S_RATE_MASK |
OXYGEN_I2S_FORMAT_MASK |
OXYGEN_I2S_MCLK_MASK |
OXYGEN_I2S_BITS_MASK);
spin_unlock_irq(&chip->reg_lock);
if (!is_spdif) {
mutex_lock(&chip->mutex);
chip->model.set_adc_params(chip, hw_params);
mutex_unlock(&chip->mutex);
}
return 0;
}
static int oxygen_spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
mutex_lock(&chip->mutex);
spin_lock_irq(&chip->reg_lock);
oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL,
OXYGEN_SPDIF_OUT_ENABLE);
oxygen_write8_masked(chip, OXYGEN_PLAY_FORMAT,
oxygen_format(hw_params) << OXYGEN_SPDIF_FORMAT_SHIFT,
OXYGEN_SPDIF_FORMAT_MASK);
oxygen_write32_masked(chip, OXYGEN_SPDIF_CONTROL,
oxygen_rate(hw_params) << OXYGEN_SPDIF_OUT_RATE_SHIFT,
OXYGEN_SPDIF_OUT_RATE_MASK);
oxygen_update_spdif_source(chip);
spin_unlock_irq(&chip->reg_lock);
mutex_unlock(&chip->mutex);
return 0;
}
static int oxygen_multich_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
int err;
err = oxygen_hw_params(substream, hw_params);
if (err < 0)
return err;
mutex_lock(&chip->mutex);
spin_lock_irq(&chip->reg_lock);
oxygen_write8_masked(chip, OXYGEN_PLAY_CHANNELS,
oxygen_play_channels(hw_params),
OXYGEN_PLAY_CHANNELS_MASK);
oxygen_write8_masked(chip, OXYGEN_PLAY_FORMAT,
oxygen_format(hw_params) << OXYGEN_MULTICH_FORMAT_SHIFT,
OXYGEN_MULTICH_FORMAT_MASK);
oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
oxygen_rate(hw_params) |
chip->model.dac_i2s_format |
get_mclk(chip, PCM_MULTICH, hw_params) |
oxygen_i2s_bits(hw_params),
OXYGEN_I2S_RATE_MASK |
OXYGEN_I2S_FORMAT_MASK |
OXYGEN_I2S_MCLK_MASK |
OXYGEN_I2S_BITS_MASK);
oxygen_update_spdif_source(chip);
spin_unlock_irq(&chip->reg_lock);
chip->model.set_dac_params(chip, hw_params);
oxygen_update_dac_routing(chip);
mutex_unlock(&chip->mutex);
return 0;
}
static int oxygen_hw_free(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
unsigned int channel = oxygen_substream_channel(substream);
unsigned int channel_mask = 1 << channel;
spin_lock_irq(&chip->reg_lock);
chip->interrupt_mask &= ~channel_mask;
oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask);
oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static int oxygen_spdif_hw_free(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
spin_lock_irq(&chip->reg_lock);
oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL,
OXYGEN_SPDIF_OUT_ENABLE);
spin_unlock_irq(&chip->reg_lock);
return oxygen_hw_free(substream);
}
static int oxygen_prepare(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
unsigned int channel = oxygen_substream_channel(substream);
unsigned int channel_mask = 1 << channel;
spin_lock_irq(&chip->reg_lock);
oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
if (substream->runtime->no_period_wakeup)
chip->interrupt_mask &= ~channel_mask;
else
chip->interrupt_mask |= channel_mask;
oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask);
spin_unlock_irq(&chip->reg_lock);
return 0;
}
static int oxygen_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_substream *s;
unsigned int mask = 0;
int pausing;
switch (cmd) {
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_SUSPEND:
pausing = 0;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
pausing = 1;
break;
default:
return -EINVAL;
}
snd_pcm_group_for_each_entry(s, substream) {
if (snd_pcm_substream_chip(s) == chip) {
mask |= 1 << oxygen_substream_channel(s);
snd_pcm_trigger_done(s, substream);
}
}
spin_lock(&chip->reg_lock);
if (!pausing) {
if (cmd == SNDRV_PCM_TRIGGER_START)
chip->pcm_running |= mask;
else
chip->pcm_running &= ~mask;
oxygen_write8(chip, OXYGEN_DMA_STATUS, chip->pcm_running);
} else {
if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH)
oxygen_set_bits8(chip, OXYGEN_DMA_PAUSE, mask);
else
oxygen_clear_bits8(chip, OXYGEN_DMA_PAUSE, mask);
}
spin_unlock(&chip->reg_lock);
return 0;
}
static snd_pcm_uframes_t oxygen_pointer(struct snd_pcm_substream *substream)
{
struct oxygen *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int channel = oxygen_substream_channel(substream);
u32 curr_addr;
/* no spinlock, this read should be atomic */
curr_addr = oxygen_read32(chip, channel_base_registers[channel]);
return bytes_to_frames(runtime, curr_addr - (u32)runtime->dma_addr);
}
static const struct snd_pcm_ops oxygen_rec_a_ops = {
.open = oxygen_rec_a_open,
.close = oxygen_close,
.hw_params = oxygen_rec_a_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static const struct snd_pcm_ops oxygen_rec_b_ops = {
.open = oxygen_rec_b_open,
.close = oxygen_close,
.hw_params = oxygen_rec_b_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static const struct snd_pcm_ops oxygen_rec_c_ops = {
.open = oxygen_rec_c_open,
.close = oxygen_close,
.hw_params = oxygen_rec_c_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static const struct snd_pcm_ops oxygen_spdif_ops = {
.open = oxygen_spdif_open,
.close = oxygen_close,
.hw_params = oxygen_spdif_hw_params,
.hw_free = oxygen_spdif_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static const struct snd_pcm_ops oxygen_multich_ops = {
.open = oxygen_multich_open,
.close = oxygen_close,
.hw_params = oxygen_multich_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
static const struct snd_pcm_ops oxygen_ac97_ops = {
.open = oxygen_ac97_open,
.close = oxygen_close,
.hw_params = oxygen_hw_params,
.hw_free = oxygen_hw_free,
.prepare = oxygen_prepare,
.trigger = oxygen_trigger,
.pointer = oxygen_pointer,
};
int oxygen_pcm_init(struct oxygen *chip)
{
struct snd_pcm *pcm;
int outs, ins;
int err;
outs = !!(chip->model.device_config & PLAYBACK_0_TO_I2S);
ins = !!(chip->model.device_config & (CAPTURE_0_FROM_I2S_1 |
CAPTURE_0_FROM_I2S_2));
if (outs | ins) {
err = snd_pcm_new(chip->card, "Multichannel",
0, outs, ins, &pcm);
if (err < 0)
return err;
if (outs)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&oxygen_multich_ops);
if (chip->model.device_config & CAPTURE_0_FROM_I2S_1)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_a_ops);
else if (chip->model.device_config & CAPTURE_0_FROM_I2S_2)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_b_ops);
pcm->private_data = chip;
strcpy(pcm->name, "Multichannel");
if (outs)
snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream,
SNDRV_DMA_TYPE_DEV,
&chip->pci->dev,
DEFAULT_BUFFER_BYTES_MULTICH,
BUFFER_BYTES_MAX_MULTICH);
if (ins)
snd_pcm_set_managed_buffer(pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream,
SNDRV_DMA_TYPE_DEV,
&chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
outs = !!(chip->model.device_config & PLAYBACK_1_TO_SPDIF);
ins = !!(chip->model.device_config & CAPTURE_1_FROM_SPDIF);
if (outs | ins) {
err = snd_pcm_new(chip->card, "Digital", 1, outs, ins, &pcm);
if (err < 0)
return err;
if (outs)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&oxygen_spdif_ops);
if (ins)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_c_ops);
pcm->private_data = chip;
strcpy(pcm->name, "Digital");
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
if (chip->has_ac97_1) {
outs = !!(chip->model.device_config & PLAYBACK_2_TO_AC97_1);
ins = !!(chip->model.device_config & CAPTURE_2_FROM_AC97_1);
} else {
outs = 0;
ins = !!(chip->model.device_config & CAPTURE_2_FROM_I2S_2);
}
if (outs | ins) {
err = snd_pcm_new(chip->card, outs ? "AC97" : "Analog2",
2, outs, ins, &pcm);
if (err < 0)
return err;
if (outs) {
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
&oxygen_ac97_ops);
oxygen_write8_masked(chip, OXYGEN_REC_ROUTING,
OXYGEN_REC_B_ROUTE_AC97_1,
OXYGEN_REC_B_ROUTE_MASK);
}
if (ins)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_b_ops);
pcm->private_data = chip;
strcpy(pcm->name, outs ? "Front Panel" : "Analog 2");
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
ins = !!(chip->model.device_config & CAPTURE_3_FROM_I2S_3);
if (ins) {
err = snd_pcm_new(chip->card, "Analog3", 3, 0, ins, &pcm);
if (err < 0)
return err;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&oxygen_rec_c_ops);
oxygen_write8_masked(chip, OXYGEN_REC_ROUTING,
OXYGEN_REC_C_ROUTE_I2S_ADC_3,
OXYGEN_REC_C_ROUTE_MASK);
pcm->private_data = chip;
strcpy(pcm->name, "Analog 3");
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV,
&chip->pci->dev,
DEFAULT_BUFFER_BYTES,
BUFFER_BYTES_MAX);
}
return 0;
}
| gpl-2.0 |
ztotherad/nd7 | drivers/gpu/drm/i810/i810_dma.c | 479 | 33485 | /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
* Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
* Jeff Hartmann <jhartmann@valinux.com>
* Keith Whitwell <keith@tungstengraphics.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i810_drm.h"
#include "i810_drv.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
#define I810_BUF_HARDWARE 0
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
static struct drm_buf *i810_freelist_get(struct drm_device * dev)
{
struct drm_device_dma *dma = dev->dma;
int i;
int used;
/* Linear search might not be the best solution */
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
I810_BUF_CLIENT);
if (used == I810_BUF_FREE)
return buf;
}
return NULL;
}
/* This should only be called if the buffer is not sent to the hardware
* yet, the hardware updates in use for us once its on the ring buffer.
*/
static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
if (used != I810_BUF_CLIENT) {
DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
return -EINVAL;
}
return 0;
}
static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev;
drm_i810_private_t *dev_priv;
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
dev = priv->minor->dev;
dev_priv = dev->dev_private;
buf = dev_priv->mmap_buffer;
buf_priv = buf->dev_private;
vma->vm_flags |= (VM_IO | VM_DONTCOPY);
buf_priv->currently_mapped = I810_BUF_MAPPED;
if (io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static const struct file_operations i810_buffer_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
.fasync = drm_fasync,
.llseek = noop_llseek,
};
static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_private_t *dev_priv = dev->dev_private;
const struct file_operations *old_fops;
int retcode = 0;
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
return -EINVAL;
down_write(¤t->mm->mmap_sem);
old_fops = file_priv->filp->f_op;
file_priv->filp->f_op = &i810_buffer_fops;
dev_priv->mmap_buffer = buf;
buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
PROT_READ | PROT_WRITE,
MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
file_priv->filp->f_op = old_fops;
if (IS_ERR(buf_priv->virtual)) {
/* Real error */
DRM_ERROR("mmap error\n");
retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
}
up_write(¤t->mm->mmap_sem);
return retcode;
}
static int i810_unmap_buffer(struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
if (buf_priv->currently_mapped != I810_BUF_MAPPED)
return -EINVAL;
down_write(¤t->mm->mmap_sem);
retcode = do_munmap(current->mm,
(unsigned long)buf_priv->virtual,
(size_t) buf->total);
up_write(¤t->mm->mmap_sem);
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
buf_priv->virtual = NULL;
return retcode;
}
static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
struct drm_file *file_priv)
{
struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
int retcode = 0;
buf = i810_freelist_get(dev);
if (!buf) {
retcode = -ENOMEM;
DRM_DEBUG("retcode=%d\n", retcode);
return retcode;
}
retcode = i810_map_buffer(buf, file_priv);
if (retcode) {
i810_freelist_put(dev, buf);
DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
return retcode;
}
buf->file_priv = file_priv;
buf_priv = buf->dev_private;
d->granted = 1;
d->request_idx = buf->idx;
d->request_size = buf->total;
d->virtual = buf_priv->virtual;
return retcode;
}
static int i810_dma_cleanup(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev->dev_private) {
int i;
drm_i810_private_t *dev_priv =
(drm_i810_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
dev_priv->dma_status_page);
}
kfree(dev->dev_private);
dev->dev_private = NULL;
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf_priv->kernel_virtual && buf->total)
drm_core_ioremapfree(&buf_priv->map, dev);
}
}
return 0;
}
static int i810_wait_ring(struct drm_device *dev, int n)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
int iters = 0;
unsigned long end;
unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
end = jiffies + (HZ * 3);
while (ring->space < n) {
ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
if (ring->head != last_head) {
end = jiffies + (HZ * 3);
last_head = ring->head;
}
iters++;
if (time_before(end, jiffies)) {
DRM_ERROR("space: %d wanted %d\n", ring->space, n);
DRM_ERROR("lockup\n");
goto out_wait_ring;
}
udelay(1);
}
out_wait_ring:
return iters;
}
static void i810_kernel_lost_context(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->tail = I810_READ(LP_RING + RING_TAIL);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
}
static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
int my_idx = 24;
u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
int i;
if (dma->buf_count > 1019) {
/* Not enough space in the status page for the freelist */
return -EINVAL;
}
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->in_use = hw_status++;
buf_priv->my_use_idx = my_idx;
my_idx += 4;
*buf_priv->in_use = I810_BUF_FREE;
buf_priv->map.offset = buf->bus_address;
buf_priv->map.size = buf->total;
buf_priv->map.type = _DRM_AGP;
buf_priv->map.flags = 0;
buf_priv->map.mtrr = 0;
drm_core_ioremap(&buf_priv->map, dev);
buf_priv->kernel_virtual = buf_priv->map.handle;
}
return 0;
}
static int i810_dma_initialize(struct drm_device *dev,
drm_i810_private_t *dev_priv,
drm_i810_init_t *init)
{
struct drm_map_list *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK) {
dev_priv->sarea_map = r_list->map;
break;
}
}
if (!dev_priv->sarea_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find sarea!\n");
return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not find dma buffer map!\n");
return -EINVAL;
}
dev_priv->sarea_priv = (drm_i810_sarea_t *)
((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
dev_priv->ring.Start = init->ring_start;
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = _DRM_AGP;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
dev_priv->w = init->w;
dev_priv->h = init->h;
dev_priv->pitch = init->pitch;
dev_priv->back_offset = init->back_offset;
dev_priv->depth_offset = init->depth_offset;
dev_priv->front_offset = init->front_offset;
dev_priv->overlay_offset = init->overlay_offset;
dev_priv->overlay_physical = init->overlay_physical;
dev_priv->front_di1 = init->front_offset | init->pitch_bits;
dev_priv->back_di1 = init->back_offset | init->pitch_bits;
dev_priv->zi1 = init->depth_offset | init->pitch_bits;
/* Program Hardware Status Page */
dev_priv->hw_status_page =
pci_alloc_consistent(dev->pdev, PAGE_SIZE,
&dev_priv->dma_status_page);
if (!dev_priv->hw_status_page) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
I810_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
/* Now we need to init our freelist */
if (i810_freelist_init(dev, dev_priv) != 0) {
dev->dev_private = (void *)dev_priv;
i810_dma_cleanup(dev);
DRM_ERROR("Not enough space in the status page for"
" the freelist\n");
return -ENOMEM;
}
dev->dev_private = (void *)dev_priv;
return 0;
}
static int i810_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv;
drm_i810_init_t *init = data;
int retcode = 0;
switch (init->func) {
case I810_INIT_DMA_1_4:
DRM_INFO("Using v1.4 init.\n");
dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
retcode = i810_dma_initialize(dev, dev_priv, init);
break;
case I810_CLEANUP_DMA:
DRM_INFO("DMA Cleanup\n");
retcode = i810_dma_cleanup(dev);
break;
default:
return -EINVAL;
}
return retcode;
}
/* Most efficient way to verify state for the i810 is as it is
* emitted. Non-conformant state is silently dropped.
*
* Use 'volatile' & local var tmp to force the emitted values to be
* identical to the verified ones.
*/
static void i810EmitContextVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
unsigned int tmp;
RING_LOCALS;
BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
OUT_RING(GFX_OP_COLOR_FACTOR);
OUT_RING(code[I810_CTXREG_CF1]);
OUT_RING(GFX_OP_STIPPLE);
OUT_RING(code[I810_CTXREG_ST1]);
for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
tmp = code[i];
if ((tmp & (7 << 29)) == (3 << 29) &&
(tmp & (0x1f << 24)) < (0x1d << 24)) {
OUT_RING(tmp);
j++;
} else
printk("constext state dropped!!!\n");
}
if (j & 1)
OUT_RING(0);
ADVANCE_LP_RING();
}
static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
unsigned int tmp;
RING_LOCALS;
BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
OUT_RING(GFX_OP_MAP_INFO);
OUT_RING(code[I810_TEXREG_MI1]);
OUT_RING(code[I810_TEXREG_MI2]);
OUT_RING(code[I810_TEXREG_MI3]);
for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
tmp = code[i];
if ((tmp & (7 << 29)) == (3 << 29) &&
(tmp & (0x1f << 24)) < (0x1d << 24)) {
OUT_RING(tmp);
j++;
} else
printk("texture state dropped!!!\n");
}
if (j & 1)
OUT_RING(0);
ADVANCE_LP_RING();
}
/* Need to do some additional checking when setting the dest buffer.
*/
static void i810EmitDestVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
unsigned int tmp;
RING_LOCALS;
BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
tmp = code[I810_DESTREG_DI1];
if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
OUT_RING(CMD_OP_DESTBUFFER_INFO);
OUT_RING(tmp);
} else
DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
tmp, dev_priv->front_di1, dev_priv->back_di1);
/* invarient:
*/
OUT_RING(CMD_OP_Z_BUFFER_INFO);
OUT_RING(dev_priv->zi1);
OUT_RING(GFX_OP_DESTBUFFER_VARS);
OUT_RING(code[I810_DESTREG_DV1]);
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(code[I810_DESTREG_DR1]);
OUT_RING(code[I810_DESTREG_DR2]);
OUT_RING(code[I810_DESTREG_DR3]);
OUT_RING(code[I810_DESTREG_DR4]);
OUT_RING(0);
ADVANCE_LP_RING();
}
static void i810EmitState(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
DRM_DEBUG("%x\n", dirty);
if (dirty & I810_UPLOAD_BUFFERS) {
i810EmitDestVerified(dev, sarea_priv->BufferState);
sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
}
if (dirty & I810_UPLOAD_CTX) {
i810EmitContextVerified(dev, sarea_priv->ContextState);
sarea_priv->dirty &= ~I810_UPLOAD_CTX;
}
if (dirty & I810_UPLOAD_TEX0) {
i810EmitTexVerified(dev, sarea_priv->TexState[0]);
sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
}
if (dirty & I810_UPLOAD_TEX1) {
i810EmitTexVerified(dev, sarea_priv->TexState[1]);
sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
}
}
/* need to verify
*/
static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
unsigned int clear_color,
unsigned int clear_zval)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = 2;
int i;
RING_LOCALS;
if (dev_priv->current_page == 1) {
unsigned int tmp = flags;
flags &= ~(I810_FRONT | I810_BACK);
if (tmp & I810_FRONT)
flags |= I810_BACK;
if (tmp & I810_BACK)
flags |= I810_FRONT;
}
i810_kernel_lost_context(dev);
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
for (i = 0; i < nbox; i++, pbox++) {
unsigned int x = pbox->x1;
unsigned int y = pbox->y1;
unsigned int width = (pbox->x2 - x) * cpp;
unsigned int height = pbox->y2 - y;
unsigned int start = y * pitch + x * cpp;
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
continue;
if (flags & I810_FRONT) {
BEGIN_LP_RING(6);
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
OUT_RING((height << 16) | width);
OUT_RING(start);
OUT_RING(clear_color);
OUT_RING(0);
ADVANCE_LP_RING();
}
if (flags & I810_BACK) {
BEGIN_LP_RING(6);
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
OUT_RING((height << 16) | width);
OUT_RING(dev_priv->back_offset + start);
OUT_RING(clear_color);
OUT_RING(0);
ADVANCE_LP_RING();
}
if (flags & I810_DEPTH) {
BEGIN_LP_RING(6);
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
OUT_RING((height << 16) | width);
OUT_RING(dev_priv->depth_offset + start);
OUT_RING(clear_zval);
OUT_RING(0);
ADVANCE_LP_RING();
}
}
}
static void i810_dma_dispatch_swap(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = 2;
int i;
RING_LOCALS;
DRM_DEBUG("swapbuffers\n");
i810_kernel_lost_context(dev);
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
for (i = 0; i < nbox; i++, pbox++) {
unsigned int w = pbox->x2 - pbox->x1;
unsigned int h = pbox->y2 - pbox->y1;
unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
unsigned int start = dst;
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
continue;
BEGIN_LP_RING(6);
OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
OUT_RING(pitch | (0xCC << 16));
OUT_RING((h << 16) | (w * cpp));
if (dev_priv->current_page == 0)
OUT_RING(dev_priv->front_offset + start);
else
OUT_RING(dev_priv->back_offset + start);
OUT_RING(pitch);
if (dev_priv->current_page == 0)
OUT_RING(dev_priv->back_offset + start);
else
OUT_RING(dev_priv->front_offset + start);
ADVANCE_LP_RING();
}
}
static void i810_dma_dispatch_vertex(struct drm_device *dev,
struct drm_buf *buf, int discard, int used)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
struct drm_clip_rect *box = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
int i = 0;
RING_LOCALS;
i810_kernel_lost_context(dev);
if (nbox > I810_NR_SAREA_CLIPRECTS)
nbox = I810_NR_SAREA_CLIPRECTS;
if (used > 4 * 1024)
used = 0;
if (sarea_priv->dirty)
i810EmitState(dev);
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
*(u32 *) buf_priv->kernel_virtual =
((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
if (used & 4) {
*(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
used += 4;
}
i810_unmap_buffer(buf);
}
if (used) {
do {
if (i < nbox) {
BEGIN_LP_RING(4);
OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
SC_ENABLE);
OUT_RING(GFX_OP_SCISSOR_INFO);
OUT_RING(box[i].x1 | (box[i].y1 << 16));
OUT_RING((box[i].x2 -
1) | ((box[i].y2 - 1) << 16));
ADVANCE_LP_RING();
}
BEGIN_LP_RING(4);
OUT_RING(CMD_OP_BATCH_BUFFER);
OUT_RING(start | BB1_PROTECTED);
OUT_RING(start + used - 4);
OUT_RING(0);
ADVANCE_LP_RING();
} while (++i < nbox);
}
if (discard) {
dev_priv->counter++;
(void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
I810_BUF_HARDWARE);
BEGIN_LP_RING(8);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(dev_priv->counter);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(buf_priv->my_use_idx);
OUT_RING(I810_BUF_FREE);
OUT_RING(CMD_REPORT_HEAD);
OUT_RING(0);
ADVANCE_LP_RING();
}
}
static void i810_dma_dispatch_flip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int pitch = dev_priv->pitch;
RING_LOCALS;
DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
OUT_RING(0);
ADVANCE_LP_RING();
BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
/* On i815 at least ASYNC is buggy */
/* pitch<<5 is from 11.2.8 p158,
its the pitch / 8 then left shifted 8,
so (pitch >> 3) << 8 */
OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
if (dev_priv->current_page == 0) {
OUT_RING(dev_priv->back_offset);
dev_priv->current_page = 1;
} else {
OUT_RING(dev_priv->front_offset);
dev_priv->current_page = 0;
}
OUT_RING(0);
ADVANCE_LP_RING();
BEGIN_LP_RING(2);
OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
ADVANCE_LP_RING();
/* Increment the frame counter. The client-side 3D driver must
* throttle the framerate by waiting for this value before
* performing the swapbuffer ioctl.
*/
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
static void i810_dma_quiescent(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
i810_kernel_lost_context(dev);
BEGIN_LP_RING(4);
OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
OUT_RING(CMD_REPORT_HEAD);
OUT_RING(0);
OUT_RING(0);
ADVANCE_LP_RING();
i810_wait_ring(dev, dev_priv->ring.Size - 8);
}
static int i810_flush_queue(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
int i, ret = 0;
RING_LOCALS;
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
OUT_RING(CMD_REPORT_HEAD);
OUT_RING(0);
ADVANCE_LP_RING();
i810_wait_ring(dev, dev_priv->ring.Size - 8);
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
I810_BUF_FREE);
if (used == I810_BUF_HARDWARE)
DRM_DEBUG("reclaimed from HARDWARE\n");
if (used == I810_BUF_CLIENT)
DRM_DEBUG("still on client\n");
}
return ret;
}
/* Must be called with the lock held */
static void i810_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
if (!dev->dev_private)
return;
if (!dma->buflist)
return;
i810_flush_queue(dev);
for (i = 0; i < dma->buf_count; i++) {
struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf->file_priv == file_priv && buf_priv) {
int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
I810_BUF_FREE);
if (used == I810_BUF_CLIENT)
DRM_DEBUG("reclaimed from client\n");
if (buf_priv->currently_mapped == I810_BUF_MAPPED)
buf_priv->currently_mapped = I810_BUF_UNMAPPED;
}
}
}
static int i810_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
LOCK_TEST_WITH_RETURN(dev, file_priv);
i810_flush_queue(dev);
return 0;
}
static int i810_dma_vertex(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
drm_i810_vertex_t *vertex = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
if (vertex->idx < 0 || vertex->idx > dma->buf_count)
return -EINVAL;
i810_dma_dispatch_vertex(dev,
dma->buflist[vertex->idx],
vertex->discard, vertex->used);
atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
return 0;
}
static int i810_clear_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_clear_t *clear = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* GH: Someone's doing nasty things... */
if (!dev->dev_private)
return -EINVAL;
i810_dma_dispatch_clear(dev, clear->flags,
clear->clear_color, clear->clear_depth);
return 0;
}
static int i810_swap_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
i810_dma_dispatch_swap(dev);
return 0;
}
static int i810_getage(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
sarea_priv->last_dispatch = (int)hw_status[5];
return 0;
}
static int i810_getbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int retcode = 0;
drm_i810_dma_t *d = data;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
LOCK_TEST_WITH_RETURN(dev, file_priv);
d->granted = 0;
retcode = i810_dma_get_buffer(dev, d, file_priv);
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
task_pid_nr(current), retcode, d->granted);
sarea_priv->last_dispatch = (int)hw_status[5];
return retcode;
}
static int i810_copybuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
}
static int i810_docopy(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
}
static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
int u;
RING_LOCALS;
i810_kernel_lost_context(dev);
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
if (used > 4 * 1024)
used = 0;
sarea_priv->dirty = 0x7f;
DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
if (used & 4) {
*(u32 *) ((char *) buf_priv->virtual + used) = 0;
used += 4;
}
i810_unmap_buffer(buf);
}
BEGIN_LP_RING(4);
OUT_RING(CMD_OP_BATCH_BUFFER);
OUT_RING(start | BB1_PROTECTED);
OUT_RING(start + used - 4);
OUT_RING(0);
ADVANCE_LP_RING();
BEGIN_LP_RING(8);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(buf_priv->my_use_idx);
OUT_RING(I810_BUF_FREE);
OUT_RING(0);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(16);
OUT_RING(last_render);
OUT_RING(0);
ADVANCE_LP_RING();
}
static int i810_dma_mc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
drm_i810_mc_t *mc = data;
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (mc->idx >= dma->buf_count || mc->idx < 0)
return -EINVAL;
i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
mc->last_render);
atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
return 0;
}
static int i810_rstatus(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
}
static int i810_ov0_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
drm_i810_overlay_t *ov = data;
ov->offset = dev_priv->overlay_offset;
ov->physical = dev_priv->overlay_physical;
return 0;
}
static int i810_fstatus(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN(dev, file_priv);
return I810_READ(0x30008);
}
static int i810_ov0_flip(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Tell the overlay to update */
I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
return 0;
}
/* Not sure why this isn't set all the time:
*/
static void i810_do_init_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
static int i810_do_cleanup_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
if (dev_priv->current_page != 0)
i810_dma_dispatch_flip(dev);
dev_priv->page_flipping = 0;
return 0;
}
static int i810_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv->page_flipping)
i810_do_init_pageflip(dev);
i810_dma_dispatch_flip(dev);
return 0;
}
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
/* i810 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
pci_set_master(dev->pdev);
return 0;
}
void i810_driver_lastclose(struct drm_device *dev)
{
i810_dma_cleanup(dev);
}
void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i810_private_t *dev_priv = dev->dev_private;
if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
}
}
void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv)
{
i810_reclaim_buffers(dev, file_priv);
}
int i810_driver_dma_quiescent(struct drm_device *dev)
{
i810_dma_quiescent(dev);
return 0;
}
struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
/**
* Determine if the device really is AGP or not.
*
* All Intel graphics chipsets are treated as AGP, even if they are really
* PCI-e.
*
* \param dev The device to be tested.
*
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
int i810_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
| gpl-2.0 |
drod2169/Linux-Kernel | net/netfilter/nft_immediate.c | 735 | 3488 | /*
* Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
struct nft_immediate_expr {
struct nft_data data;
enum nft_registers dreg:8;
u8 dlen;
};
static void nft_immediate_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1],
const struct nft_pktinfo *pkt)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
nft_data_copy(&data[priv->dreg], &priv->data);
}
static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
[NFTA_IMMEDIATE_DREG] = { .type = NLA_U32 },
[NFTA_IMMEDIATE_DATA] = { .type = NLA_NESTED },
};
static int nft_immediate_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_immediate_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc;
int err;
if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
tb[NFTA_IMMEDIATE_DATA] == NULL)
return -EINVAL;
priv->dreg = ntohl(nla_get_be32(tb[NFTA_IMMEDIATE_DREG]));
err = nft_validate_output_register(priv->dreg);
if (err < 0)
return err;
err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
if (err < 0)
return err;
priv->dlen = desc.len;
err = nft_validate_data_load(ctx, priv->dreg, &priv->data, desc.type);
if (err < 0)
goto err1;
return 0;
err1:
nft_data_uninit(&priv->data, desc.type);
return err;
}
static void nft_immediate_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg));
}
static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
if (nla_put_be32(skb, NFTA_IMMEDIATE_DREG, htonl(priv->dreg)))
goto nla_put_failure;
return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data,
nft_dreg_to_type(priv->dreg), priv->dlen);
nla_put_failure:
return -1;
}
static int nft_immediate_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
if (priv->dreg == NFT_REG_VERDICT)
*data = &priv->data;
return 0;
}
static struct nft_expr_type nft_imm_type;
static const struct nft_expr_ops nft_imm_ops = {
.type = &nft_imm_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
.eval = nft_immediate_eval,
.init = nft_immediate_init,
.destroy = nft_immediate_destroy,
.dump = nft_immediate_dump,
.validate = nft_immediate_validate,
};
static struct nft_expr_type nft_imm_type __read_mostly = {
.name = "immediate",
.ops = &nft_imm_ops,
.policy = nft_immediate_policy,
.maxattr = NFTA_IMMEDIATE_MAX,
.owner = THIS_MODULE,
};
int __init nft_immediate_module_init(void)
{
return nft_register_expr(&nft_imm_type);
}
void nft_immediate_module_exit(void)
{
nft_unregister_expr(&nft_imm_type);
}
| gpl-2.0 |
Kirmaniarslan/xaak_sense_kernel | drivers/net/wireless/iwlwifi/iwl-led.c | 2271 | 6336 | /******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
/* default: IWL_LED_BLINK(0) using blinking index table */
static int led_mode;
module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "0=system default, "
"1=On(RF On)/Off(RF Off), 2=blinking");
/* Throughput OFF time(ms) ON time (ms)
* >300 25 25
* >200 to 300 40 40
* >100 to 200 55 55
* >70 to 100 65 65
* >50 to 70 75 75
* >20 to 50 85 85
* >10 to 20 95 95
* >5 to 10 110 110
* >1 to 5 130 130
* >0 to 1 167 167
* <=0 SOLID ON
*/
static const struct ieee80211_tpt_blink iwl_blink[] = {
{ .throughput = 0, .blink_time = 334 },
{ .throughput = 1 * 1024 - 1, .blink_time = 260 },
{ .throughput = 5 * 1024 - 1, .blink_time = 220 },
{ .throughput = 10 * 1024 - 1, .blink_time = 190 },
{ .throughput = 20 * 1024 - 1, .blink_time = 170 },
{ .throughput = 50 * 1024 - 1, .blink_time = 150 },
{ .throughput = 70 * 1024 - 1, .blink_time = 130 },
{ .throughput = 100 * 1024 - 1, .blink_time = 110 },
{ .throughput = 200 * 1024 - 1, .blink_time = 80 },
{ .throughput = 300 * 1024 - 1, .blink_time = 50 },
};
/* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv)
{
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
/*
* Adjust led blink rate to compensate on a MAC Clock difference on every HW
* Led blink rate analysis showed an average deviation of 20% on 5000 series
* and up.
* Need to compensate on the led on/off time per HW according to the deviation
* to achieve the desired led frequency
* The calculation is: (100-averageDeviation)/100 * blinkTime
* For code efficiency the calculation will be:
* compensation = (100 - averageDeviation) * 64 / 100
* NewBlinkTime = (compensation * BlinkTime) / 64
*/
static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
u8 time, u16 compensation)
{
if (!compensation) {
IWL_ERR(priv, "undefined blink compensation: "
"use pre-defined blinking time\n");
return time;
}
return (u8)((time * compensation) >> 6);
}
static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
{
struct iwl_host_cmd cmd = {
.id = REPLY_LEDS_CMD,
.len = { sizeof(struct iwl_led_cmd), },
.data = { led_cmd, },
.flags = CMD_ASYNC,
.callback = NULL,
};
u32 reg;
reg = iwl_read32(priv, CSR_LED_REG);
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
return iwl_send_cmd(priv, &cmd);
}
/* Set led pattern command */
static int iwl_led_cmd(struct iwl_priv *priv,
unsigned long on,
unsigned long off)
{
struct iwl_led_cmd led_cmd = {
.id = IWL_LED_LINK,
.interval = IWL_DEF_LED_INTRVL
};
int ret;
if (!test_bit(STATUS_READY, &priv->status))
return -EBUSY;
if (priv->blink_on == on && priv->blink_off == off)
return 0;
if (off == 0) {
/* led is SOLID_ON */
on = IWL_LED_SOLID;
}
IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
priv->cfg->base_params->led_compensation);
led_cmd.on = iwl_blink_compensation(priv, on,
priv->cfg->base_params->led_compensation);
led_cmd.off = iwl_blink_compensation(priv, off,
priv->cfg->base_params->led_compensation);
ret = iwl_send_led_cmd(priv, &led_cmd);
if (!ret) {
priv->blink_on = on;
priv->blink_off = off;
}
return ret;
}
static void iwl_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
unsigned long on = 0;
if (brightness > 0)
on = IWL_LED_SOLID;
iwl_led_cmd(priv, on, 0);
}
static int iwl_led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
return iwl_led_cmd(priv, *delay_on, *delay_off);
}
void iwl_leds_init(struct iwl_priv *priv)
{
int mode = led_mode;
int ret;
if (mode == IWL_LED_DEFAULT)
mode = priv->cfg->led_mode;
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
priv->led.brightness_set = iwl_led_brightness_set;
priv->led.blink_set = iwl_led_blink_set;
priv->led.max_brightness = 1;
switch (mode) {
case IWL_LED_DEFAULT:
WARN_ON(1);
break;
case IWL_LED_BLINK:
priv->led.default_trigger =
ieee80211_create_tpt_led_trigger(priv->hw,
IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
iwl_blink, ARRAY_SIZE(iwl_blink));
break;
case IWL_LED_RF_STATE:
priv->led.default_trigger =
ieee80211_get_radio_led_name(priv->hw);
break;
}
ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
if (ret) {
kfree(priv->led.name);
return;
}
priv->led_registered = true;
}
void iwl_leds_exit(struct iwl_priv *priv)
{
if (!priv->led_registered)
return;
led_classdev_unregister(&priv->led);
kfree(priv->led.name);
}
| gpl-2.0 |
draekko/android_kernel_ba2x_2.0 | drivers/infiniband/hw/qib/qib_mad.c | 2527 | 62689 | /*
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
* All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_smi.h>
#include "qib.h"
#include "qib_mad.h"
static int reply(struct ib_smp *smp)
{
/*
* The verbs framework will handle the directed/LID route
* packet changes.
*/
smp->method = IB_MGMT_METHOD_GET_RESP;
if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
smp->status |= IB_SMP_DIRECTION;
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
{
struct ib_mad_send_buf *send_buf;
struct ib_mad_agent *agent;
struct ib_smp *smp;
int ret;
unsigned long flags;
unsigned long timeout;
agent = ibp->send_agent;
if (!agent)
return;
/* o14-3.2.1 */
if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE))
return;
/* o14-2 */
if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout))
return;
send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
IB_MGMT_MAD_DATA, GFP_ATOMIC);
if (IS_ERR(send_buf))
return;
smp = send_buf->mad;
smp->base_version = IB_MGMT_BASE_VERSION;
smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
smp->class_version = 1;
smp->method = IB_MGMT_METHOD_TRAP;
ibp->tid++;
smp->tid = cpu_to_be64(ibp->tid);
smp->attr_id = IB_SMP_ATTR_NOTICE;
/* o14-1: smp->mkey = 0; */
memcpy(smp->data, data, len);
spin_lock_irqsave(&ibp->lock, flags);
if (!ibp->sm_ah) {
if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
struct ib_ah *ah;
struct ib_ah_attr attr;
memset(&attr, 0, sizeof attr);
attr.dlid = ibp->sm_lid;
attr.port_num = ppd_from_ibp(ibp)->port;
ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
if (IS_ERR(ah))
ret = -EINVAL;
else {
send_buf->ah = ah;
ibp->sm_ah = to_iah(ah);
ret = 0;
}
} else
ret = -EINVAL;
} else {
send_buf->ah = &ibp->sm_ah->ibah;
ret = 0;
}
spin_unlock_irqrestore(&ibp->lock, flags);
if (!ret)
ret = ib_post_send_mad(send_buf, NULL);
if (!ret) {
/* 4.096 usec. */
timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000;
ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout);
} else {
ib_free_send_mad(send_buf);
ibp->trap_timeout = 0;
}
}
/*
* Send a bad [PQ]_Key trap (ch. 14.3.8).
*/
void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
u32 qp1, u32 qp2, __be16 lid1, __be16 lid2)
{
struct ib_mad_notice_attr data;
if (trap_num == IB_NOTICE_TRAP_BAD_PKEY)
ibp->pkey_violations++;
else
ibp->qkey_violations++;
ibp->n_pkt_drops++;
/* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY;
data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
data.trap_num = trap_num;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
data.details.ntc_257_258.lid1 = lid1;
data.details.ntc_257_258.lid2 = lid2;
data.details.ntc_257_258.key = cpu_to_be32(key);
data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
qib_send_trap(ibp, &data, sizeof data);
}
/*
* Send a bad M_Key trap (ch. 14.3.9).
*/
static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
{
struct ib_mad_notice_attr data;
/* Send violation trap */
data.generic_type = IB_NOTICE_TYPE_SECURITY;
data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
data.details.ntc_256.lid = data.issuer_lid;
data.details.ntc_256.method = smp->method;
data.details.ntc_256.attr_id = smp->attr_id;
data.details.ntc_256.attr_mod = smp->attr_mod;
data.details.ntc_256.mkey = smp->mkey;
if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
u8 hop_cnt;
data.details.ntc_256.dr_slid = smp->dr_slid;
data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
hop_cnt = smp->hop_cnt;
if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) {
data.details.ntc_256.dr_trunc_hop |=
IB_NOTICE_TRAP_DR_TRUNC;
hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path);
}
data.details.ntc_256.dr_trunc_hop |= hop_cnt;
memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path,
hop_cnt);
}
qib_send_trap(ibp, &data, sizeof data);
}
/*
* Send a Port Capability Mask Changed trap (ch. 14.3.11).
*/
void qib_cap_mask_chg(struct qib_ibport *ibp)
{
struct ib_mad_notice_attr data;
data.generic_type = IB_NOTICE_TYPE_INFO;
data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
data.details.ntc_144.lid = data.issuer_lid;
data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
qib_send_trap(ibp, &data, sizeof data);
}
/*
* Send a System Image GUID Changed trap (ch. 14.3.12).
*/
void qib_sys_guid_chg(struct qib_ibport *ibp)
{
struct ib_mad_notice_attr data;
data.generic_type = IB_NOTICE_TYPE_INFO;
data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
data.details.ntc_145.lid = data.issuer_lid;
data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
qib_send_trap(ibp, &data, sizeof data);
}
/*
* Send a Node Description Changed trap (ch. 14.3.13).
*/
void qib_node_desc_chg(struct qib_ibport *ibp)
{
struct ib_mad_notice_attr data;
data.generic_type = IB_NOTICE_TYPE_INFO;
data.prod_type_msb = 0;
data.prod_type_lsb = IB_NOTICE_PROD_CA;
data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
data.toggle_count = 0;
memset(&data.details, 0, sizeof data.details);
data.details.ntc_144.lid = data.issuer_lid;
data.details.ntc_144.local_changes = 1;
data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
qib_send_trap(ibp, &data, sizeof data);
}
static int subn_get_nodedescription(struct ib_smp *smp,
struct ib_device *ibdev)
{
if (smp->attr_mod)
smp->status |= IB_SMP_INVALID_FIELD;
memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
return reply(smp);
}
static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
u32 vendor, majrev, minrev;
unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
/* GUID 0 is illegal */
if (smp->attr_mod || pidx >= dd->num_pports ||
dd->pport[pidx].guid == 0)
smp->status |= IB_SMP_INVALID_FIELD;
else
nip->port_guid = dd->pport[pidx].guid;
nip->base_version = 1;
nip->class_version = 1;
nip->node_type = 1; /* channel adapter */
nip->num_ports = ibdev->phys_port_cnt;
/* This is already in network order */
nip->sys_guid = ib_qib_sys_image_guid;
nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */
nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd));
nip->device_id = cpu_to_be16(dd->deviceid);
majrev = dd->majrev;
minrev = dd->minrev;
nip->revision = cpu_to_be32((majrev << 16) | minrev);
nip->local_port_num = port;
vendor = dd->vendorid;
nip->vendor_id[0] = QIB_SRC_OUI_1;
nip->vendor_id[1] = QIB_SRC_OUI_2;
nip->vendor_id[2] = QIB_SRC_OUI_3;
return reply(smp);
}
static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
struct qib_devdata *dd = dd_from_ibdev(ibdev);
u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
__be64 *p = (__be64 *) smp->data;
unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
/* 32 blocks of 8 64-bit GUIDs per block */
memset(smp->data, 0, sizeof(smp->data));
if (startgx == 0 && pidx < dd->num_pports) {
struct qib_pportdata *ppd = dd->pport + pidx;
struct qib_ibport *ibp = &ppd->ibport_data;
__be64 g = ppd->guid;
unsigned i;
/* GUID 0 is illegal */
if (g == 0)
smp->status |= IB_SMP_INVALID_FIELD;
else {
/* The first is a copy of the read-only HW GUID. */
p[0] = g;
for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
p[i] = ibp->guids[i - 1];
}
} else
smp->status |= IB_SMP_INVALID_FIELD;
return reply(smp);
}
static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w)
{
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w);
}
static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s)
{
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s);
}
static int get_overrunthreshold(struct qib_pportdata *ppd)
{
return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH);
}
/**
* set_overrunthreshold - set the overrun threshold
* @ppd: the physical port data
* @n: the new threshold
*
* Note that this will only take effect when the link state changes.
*/
static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n)
{
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH,
(u32)n);
return 0;
}
static int get_phyerrthreshold(struct qib_pportdata *ppd)
{
return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH);
}
/**
* set_phyerrthreshold - set the physical error threshold
* @ppd: the physical port data
* @n: the new threshold
*
* Note that this will only take effect when the link state changes.
*/
static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n)
{
(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH,
(u32)n);
return 0;
}
/**
* get_linkdowndefaultstate - get the default linkdown state
* @ppd: the physical port data
*
* Returns zero if the default is POLL, 1 if the default is SLEEP.
*/
static int get_linkdowndefaultstate(struct qib_pportdata *ppd)
{
return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) ==
IB_LINKINITCMD_SLEEP;
}
static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags)
{
int ret = 0;
/* Is the mkey in the process of expiring? */
if (ibp->mkey_lease_timeout &&
time_after_eq(jiffies, ibp->mkey_lease_timeout)) {
/* Clear timeout and mkey protection field. */
ibp->mkey_lease_timeout = 0;
ibp->mkeyprot = 0;
}
/* M_Key checking depends on Portinfo:M_Key_protect_bits */
if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && ibp->mkey != 0 &&
ibp->mkey != smp->mkey &&
(smp->method == IB_MGMT_METHOD_SET ||
smp->method == IB_MGMT_METHOD_TRAP_REPRESS ||
(smp->method == IB_MGMT_METHOD_GET && ibp->mkeyprot >= 2))) {
if (ibp->mkey_violations != 0xFFFF)
++ibp->mkey_violations;
if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period)
ibp->mkey_lease_timeout = jiffies +
ibp->mkey_lease_period * HZ;
/* Generate a trap notice. */
qib_bad_mkey(ibp, smp);
ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
} else if (ibp->mkey_lease_timeout)
ibp->mkey_lease_timeout = 0;
return ret;
}
static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
struct qib_devdata *dd;
struct qib_pportdata *ppd;
struct qib_ibport *ibp;
struct ib_port_info *pip = (struct ib_port_info *)smp->data;
u16 lid;
u8 mtu;
int ret;
u32 state;
u32 port_num = be32_to_cpu(smp->attr_mod);
if (port_num == 0)
port_num = port;
else {
if (port_num > ibdev->phys_port_cnt) {
smp->status |= IB_SMP_INVALID_FIELD;
ret = reply(smp);
goto bail;
}
if (port_num != port) {
ibp = to_iport(ibdev, port_num);
ret = check_mkey(ibp, smp, 0);
if (ret)
goto bail;
}
}
dd = dd_from_ibdev(ibdev);
/* IB numbers ports from 1, hdw from 0 */
ppd = dd->pport + (port_num - 1);
ibp = &ppd->ibport_data;
/* Clear all fields. Only set the non-zero fields. */
memset(smp->data, 0, sizeof(smp->data));
/* Only return the mkey if the protection field allows it. */
if (!(smp->method == IB_MGMT_METHOD_GET &&
ibp->mkey != smp->mkey &&
ibp->mkeyprot == 1))
pip->mkey = ibp->mkey;
pip->gid_prefix = ibp->gid_prefix;
lid = ppd->lid;
pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
pip->sm_lid = cpu_to_be16(ibp->sm_lid);
pip->cap_mask = cpu_to_be32(ibp->port_cap_flags);
/* pip->diag_code; */
pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period);
pip->local_port_num = port;
pip->link_width_enabled = ppd->link_width_enabled;
pip->link_width_supported = ppd->link_width_supported;
pip->link_width_active = ppd->link_width_active;
state = dd->f_iblink_state(ppd->lastibcstat);
pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state;
pip->portphysstate_linkdown =
(dd->f_ibphys_portstate(ppd->lastibcstat) << 4) |
(get_linkdowndefaultstate(ppd) ? 1 : 2);
pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc;
pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) |
ppd->link_speed_enabled;
switch (ppd->ibmtu) {
default: /* something is wrong; fall through */
case 4096:
mtu = IB_MTU_4096;
break;
case 2048:
mtu = IB_MTU_2048;
break;
case 1024:
mtu = IB_MTU_1024;
break;
case 512:
mtu = IB_MTU_512;
break;
case 256:
mtu = IB_MTU_256;
break;
}
pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl;
pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */
pip->vl_high_limit = ibp->vl_high_limit;
pip->vl_arb_high_cap =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP);
pip->vl_arb_low_cap =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP);
/* InitTypeReply = 0 */
pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
/* HCAs ignore VLStallCount and HOQLife */
/* pip->vlstallcnt_hoqlife; */
pip->operationalvl_pei_peo_fpi_fpo =
dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4;
pip->mkey_violations = cpu_to_be16(ibp->mkey_violations);
/* P_KeyViolations are counted by hardware. */
pip->pkey_violations = cpu_to_be16(ibp->pkey_violations);
pip->qkey_violations = cpu_to_be16(ibp->qkey_violations);
/* Only the hardware GUID is supported for now */
pip->guid_cap = QIB_GUIDS_PER_PORT;
pip->clientrereg_resv_subnetto = ibp->subnet_timeout;
/* 32.768 usec. response time (guessing) */
pip->resv_resptimevalue = 3;
pip->localphyerrors_overrunerrors =
(get_phyerrthreshold(ppd) << 4) |
get_overrunthreshold(ppd);
/* pip->max_credit_hint; */
if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
u32 v;
v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY);
pip->link_roundtrip_latency[0] = v >> 16;
pip->link_roundtrip_latency[1] = v >> 8;
pip->link_roundtrip_latency[2] = v;
}
ret = reply(smp);
bail:
return ret;
}
/**
* get_pkeys - return the PKEY table
* @dd: the qlogic_ib device
* @port: the IB port number
* @pkeys: the pkey table is placed here
*/
static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
{
struct qib_pportdata *ppd = dd->pport + port - 1;
/*
* always a kernel context, no locking needed.
* If we get here with ppd setup, no need to check
* that pd is valid.
*/
struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx];
memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys));
return 0;
}
static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
u16 *p = (u16 *) smp->data;
__be16 *q = (__be16 *) smp->data;
/* 64 blocks of 32 16-bit P_Key entries */
memset(smp->data, 0, sizeof(smp->data));
if (startpx == 0) {
struct qib_devdata *dd = dd_from_ibdev(ibdev);
unsigned i, n = qib_get_npkeys(dd);
get_pkeys(dd, port, p);
for (i = 0; i < n; i++)
q[i] = cpu_to_be16(p[i]);
} else
smp->status |= IB_SMP_INVALID_FIELD;
return reply(smp);
}
static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
struct qib_devdata *dd = dd_from_ibdev(ibdev);
u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
__be64 *p = (__be64 *) smp->data;
unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */
/* 32 blocks of 8 64-bit GUIDs per block */
if (startgx == 0 && pidx < dd->num_pports) {
struct qib_pportdata *ppd = dd->pport + pidx;
struct qib_ibport *ibp = &ppd->ibport_data;
unsigned i;
/* The first entry is read-only. */
for (i = 1; i < QIB_GUIDS_PER_PORT; i++)
ibp->guids[i - 1] = p[i];
} else
smp->status |= IB_SMP_INVALID_FIELD;
/* The only GUID we support is the first read-only entry. */
return subn_get_guidinfo(smp, ibdev, port);
}
/**
* subn_set_portinfo - set port information
* @smp: the incoming SM packet
* @ibdev: the infiniband device
* @port: the port on the device
*
* Set Portinfo (see ch. 14.2.5.6).
*/
static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
struct ib_port_info *pip = (struct ib_port_info *)smp->data;
struct ib_event event;
struct qib_devdata *dd;
struct qib_pportdata *ppd;
struct qib_ibport *ibp;
char clientrereg = 0;
unsigned long flags;
u16 lid, smlid;
u8 lwe;
u8 lse;
u8 state;
u8 vls;
u8 msl;
u16 lstate;
int ret, ore, mtu;
u32 port_num = be32_to_cpu(smp->attr_mod);
if (port_num == 0)
port_num = port;
else {
if (port_num > ibdev->phys_port_cnt)
goto err;
/* Port attributes can only be set on the receiving port */
if (port_num != port)
goto get_only;
}
dd = dd_from_ibdev(ibdev);
/* IB numbers ports from 1, hdw from 0 */
ppd = dd->pport + (port_num - 1);
ibp = &ppd->ibport_data;
event.device = ibdev;
event.element.port_num = port;
ibp->mkey = pip->mkey;
ibp->gid_prefix = pip->gid_prefix;
ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
lid = be16_to_cpu(pip->lid);
/* Must be a valid unicast LID address. */
if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
smp->status |= IB_SMP_INVALID_FIELD;
else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
if (ppd->lid != lid)
qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT);
qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7);
event.event = IB_EVENT_LID_CHANGE;
ib_dispatch_event(&event);
}
smlid = be16_to_cpu(pip->sm_lid);
msl = pip->neighbormtu_mastersmsl & 0xF;
/* Must be a valid unicast LID address. */
if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
smp->status |= IB_SMP_INVALID_FIELD;
else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
spin_lock_irqsave(&ibp->lock, flags);
if (ibp->sm_ah) {
if (smlid != ibp->sm_lid)
ibp->sm_ah->attr.dlid = smlid;
if (msl != ibp->sm_sl)
ibp->sm_ah->attr.sl = msl;
}
spin_unlock_irqrestore(&ibp->lock, flags);
if (smlid != ibp->sm_lid)
ibp->sm_lid = smlid;
if (msl != ibp->sm_sl)
ibp->sm_sl = msl;
event.event = IB_EVENT_SM_CHANGE;
ib_dispatch_event(&event);
}
/* Allow 1x or 4x to be set (see 14.2.6.6). */
lwe = pip->link_width_enabled;
if (lwe) {
if (lwe == 0xFF)
set_link_width_enabled(ppd, ppd->link_width_supported);
else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
smp->status |= IB_SMP_INVALID_FIELD;
else if (lwe != ppd->link_width_enabled)
set_link_width_enabled(ppd, lwe);
}
lse = pip->linkspeedactive_enabled & 0xF;
if (lse) {
/*
* The IB 1.2 spec. only allows link speed values
* 1, 3, 5, 7, 15. 1.2.1 extended to allow specific
* speeds.
*/
if (lse == 15)
set_link_speed_enabled(ppd,
ppd->link_speed_supported);
else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
smp->status |= IB_SMP_INVALID_FIELD;
else if (lse != ppd->link_speed_enabled)
set_link_speed_enabled(ppd, lse);
}
/* Set link down default state. */
switch (pip->portphysstate_linkdown & 0xF) {
case 0: /* NOP */
break;
case 1: /* SLEEP */
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
IB_LINKINITCMD_SLEEP);
break;
case 2: /* POLL */
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT,
IB_LINKINITCMD_POLL);
break;
default:
smp->status |= IB_SMP_INVALID_FIELD;
}
ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
ibp->vl_high_limit = pip->vl_high_limit;
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT,
ibp->vl_high_limit);
mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
if (mtu == -1)
smp->status |= IB_SMP_INVALID_FIELD;
else
qib_set_mtu(ppd, mtu);
/* Set operational VLs */
vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
if (vls) {
if (vls > ppd->vls_supported)
smp->status |= IB_SMP_INVALID_FIELD;
else
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
}
if (pip->mkey_violations == 0)
ibp->mkey_violations = 0;
if (pip->pkey_violations == 0)
ibp->pkey_violations = 0;
if (pip->qkey_violations == 0)
ibp->qkey_violations = 0;
ore = pip->localphyerrors_overrunerrors;
if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
smp->status |= IB_SMP_INVALID_FIELD;
if (set_overrunthreshold(ppd, (ore & 0xF)))
smp->status |= IB_SMP_INVALID_FIELD;
ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
if (pip->clientrereg_resv_subnetto & 0x80) {
clientrereg = 1;
event.event = IB_EVENT_CLIENT_REREGISTER;
ib_dispatch_event(&event);
}
/*
* Do the port state change now that the other link parameters
* have been set.
* Changing the port physical state only makes sense if the link
* is down or is being set to down.
*/
state = pip->linkspeed_portstate & 0xF;
lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
smp->status |= IB_SMP_INVALID_FIELD;
/*
* Only state changes of DOWN, ARM, and ACTIVE are valid
* and must be in the correct state to take effect (see 7.2.6).
*/
switch (state) {
case IB_PORT_NOP:
if (lstate == 0)
break;
/* FALLTHROUGH */
case IB_PORT_DOWN:
if (lstate == 0)
lstate = QIB_IB_LINKDOWN_ONLY;
else if (lstate == 1)
lstate = QIB_IB_LINKDOWN_SLEEP;
else if (lstate == 2)
lstate = QIB_IB_LINKDOWN;
else if (lstate == 3)
lstate = QIB_IB_LINKDOWN_DISABLE;
else {
smp->status |= IB_SMP_INVALID_FIELD;
break;
}
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags &= ~QIBL_LINKV;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
qib_set_linkstate(ppd, lstate);
/*
* Don't send a reply if the response would be sent
* through the disabled port.
*/
if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) {
ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
goto done;
}
qib_wait_linkstate(ppd, QIBL_LINKV, 10);
break;
case IB_PORT_ARMED:
qib_set_linkstate(ppd, QIB_IB_LINKARM);
break;
case IB_PORT_ACTIVE:
qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
break;
default:
smp->status |= IB_SMP_INVALID_FIELD;
}
ret = subn_get_portinfo(smp, ibdev, port);
if (clientrereg)
pip->clientrereg_resv_subnetto |= 0x80;
goto get_only;
err:
smp->status |= IB_SMP_INVALID_FIELD;
get_only:
ret = subn_get_portinfo(smp, ibdev, port);
done:
return ret;
}
/**
* rm_pkey - decrecment the reference count for the given PKEY
* @dd: the qlogic_ib device
* @key: the PKEY index
*
* Return true if this was the last reference and the hardware table entry
* needs to be changed.
*/
static int rm_pkey(struct qib_pportdata *ppd, u16 key)
{
int i;
int ret;
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (ppd->pkeys[i] != key)
continue;
if (atomic_dec_and_test(&ppd->pkeyrefs[i])) {
ppd->pkeys[i] = 0;
ret = 1;
goto bail;
}
break;
}
ret = 0;
bail:
return ret;
}
/**
* add_pkey - add the given PKEY to the hardware table
* @dd: the qlogic_ib device
* @key: the PKEY
*
* Return an error code if unable to add the entry, zero if no change,
* or 1 if the hardware PKEY register needs to be updated.
*/
static int add_pkey(struct qib_pportdata *ppd, u16 key)
{
int i;
u16 lkey = key & 0x7FFF;
int any = 0;
int ret;
if (lkey == 0x7FFF) {
ret = 0;
goto bail;
}
/* Look for an empty slot or a matching PKEY. */
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i]) {
any++;
continue;
}
/* If it matches exactly, try to increment the ref count */
if (ppd->pkeys[i] == key) {
if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) {
ret = 0;
goto bail;
}
/* Lost the race. Look for an empty slot below. */
atomic_dec(&ppd->pkeyrefs[i]);
any++;
}
/*
* It makes no sense to have both the limited and unlimited
* PKEY set at the same time since the unlimited one will
* disable the limited one.
*/
if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
ret = -EEXIST;
goto bail;
}
}
if (!any) {
ret = -EBUSY;
goto bail;
}
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
if (!ppd->pkeys[i] &&
atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
/* for qibstats, etc. */
ppd->pkeys[i] = key;
ret = 1;
goto bail;
}
}
ret = -EBUSY;
bail:
return ret;
}
/**
* set_pkeys - set the PKEY table for ctxt 0
* @dd: the qlogic_ib device
* @port: the IB port number
* @pkeys: the PKEY table
*/
static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys)
{
struct qib_pportdata *ppd;
struct qib_ctxtdata *rcd;
int i;
int changed = 0;
/*
* IB port one/two always maps to context zero/one,
* always a kernel context, no locking needed
* If we get here with ppd setup, no need to check
* that rcd is valid.
*/
ppd = dd->pport + (port - 1);
rcd = dd->rcd[ppd->hw_pidx];
for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
u16 key = pkeys[i];
u16 okey = rcd->pkeys[i];
if (key == okey)
continue;
/*
* The value of this PKEY table entry is changing.
* Remove the old entry in the hardware's array of PKEYs.
*/
if (okey & 0x7FFF)
changed |= rm_pkey(ppd, okey);
if (key & 0x7FFF) {
int ret = add_pkey(ppd, key);
if (ret < 0)
key = 0;
else
changed |= ret;
}
rcd->pkeys[i] = key;
}
if (changed) {
struct ib_event event;
(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
event.event = IB_EVENT_PKEY_CHANGE;
event.device = &dd->verbs_dev.ibdev;
event.element.port_num = 1;
ib_dispatch_event(&event);
}
return 0;
}
static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
__be16 *p = (__be16 *) smp->data;
u16 *q = (u16 *) smp->data;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
unsigned i, n = qib_get_npkeys(dd);
for (i = 0; i < n; i++)
q[i] = be16_to_cpu(p[i]);
if (startpx != 0 || set_pkeys(dd, port, q) != 0)
smp->status |= IB_SMP_INVALID_FIELD;
return subn_get_pkeytable(smp, ibdev, port);
}
static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
struct qib_ibport *ibp = to_iport(ibdev, port);
u8 *p = (u8 *) smp->data;
unsigned i;
memset(smp->data, 0, sizeof(smp->data));
if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP))
smp->status |= IB_SMP_UNSUP_METHOD;
else
for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2)
*p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1];
return reply(smp);
}
static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
struct qib_ibport *ibp = to_iport(ibdev, port);
u8 *p = (u8 *) smp->data;
unsigned i;
if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) {
smp->status |= IB_SMP_UNSUP_METHOD;
return reply(smp);
}
for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) {
ibp->sl_to_vl[i] = *p >> 4;
ibp->sl_to_vl[i + 1] = *p & 0xF;
}
qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)),
_QIB_EVENT_SL2VL_CHANGE_BIT);
return subn_get_sl_to_vl(smp, ibdev, port);
}
static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
memset(smp->data, 0, sizeof(smp->data));
if (ppd->vls_supported == IB_VL_VL0)
smp->status |= IB_SMP_UNSUP_METHOD;
else if (which == IB_VLARB_LOWPRI_0_31)
(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
smp->data);
else if (which == IB_VLARB_HIGHPRI_0_31)
(void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
smp->data);
else
smp->status |= IB_SMP_INVALID_FIELD;
return reply(smp);
}
static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
unsigned which = be32_to_cpu(smp->attr_mod) >> 16;
struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
if (ppd->vls_supported == IB_VL_VL0)
smp->status |= IB_SMP_UNSUP_METHOD;
else if (which == IB_VLARB_LOWPRI_0_31)
(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB,
smp->data);
else if (which == IB_VLARB_HIGHPRI_0_31)
(void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB,
smp->data);
else
smp->status |= IB_SMP_INVALID_FIELD;
return subn_get_vl_arb(smp, ibdev, port);
}
static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
u8 port)
{
/*
* For now, we only send the trap once so no need to process this.
* o13-6, o13-7,
* o14-3.a4 The SMA shall not send any message in response to a valid
* SubnTrapRepress() message.
*/
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
static int pma_get_classportinfo(struct ib_perf *pmp,
struct ib_device *ibdev)
{
struct ib_pma_classportinfo *p =
(struct ib_pma_classportinfo *)pmp->data;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
memset(pmp->data, 0, sizeof(pmp->data));
if (pmp->attr_mod != 0)
pmp->status |= IB_SMP_INVALID_FIELD;
/* Note that AllPortSelect is not valid */
p->base_version = 1;
p->class_version = 1;
p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
/*
* Set the most significant bit of CM2 to indicate support for
* congestion statistics
*/
p->reserved[0] = dd->psxmitwait_supported << 7;
/*
* Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
*/
p->resp_time_value = 18;
return reply((struct ib_smp *) pmp);
}
static int pma_get_portsamplescontrol(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
(struct ib_pma_portsamplescontrol *)pmp->data;
struct qib_ibdev *dev = to_idev(ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
unsigned long flags;
u8 port_select = p->port_select;
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
if (pmp->attr_mod != 0 || port_select != port) {
pmp->status |= IB_SMP_INVALID_FIELD;
goto bail;
}
spin_lock_irqsave(&ibp->lock, flags);
p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS);
p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
p->counter_width = 4; /* 32 bit counters */
p->counter_mask0_9 = COUNTER_MASK0_9;
p->sample_start = cpu_to_be32(ibp->pma_sample_start);
p->sample_interval = cpu_to_be32(ibp->pma_sample_interval);
p->tag = cpu_to_be16(ibp->pma_tag);
p->counter_select[0] = ibp->pma_counter_select[0];
p->counter_select[1] = ibp->pma_counter_select[1];
p->counter_select[2] = ibp->pma_counter_select[2];
p->counter_select[3] = ibp->pma_counter_select[3];
p->counter_select[4] = ibp->pma_counter_select[4];
spin_unlock_irqrestore(&ibp->lock, flags);
bail:
return reply((struct ib_smp *) pmp);
}
static int pma_set_portsamplescontrol(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
(struct ib_pma_portsamplescontrol *)pmp->data;
struct qib_ibdev *dev = to_idev(ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
unsigned long flags;
u8 status, xmit_flags;
int ret;
if (pmp->attr_mod != 0 || p->port_select != port) {
pmp->status |= IB_SMP_INVALID_FIELD;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
spin_lock_irqsave(&ibp->lock, flags);
/* Port Sampling code owns the PS* HW counters */
xmit_flags = ppd->cong_stats.flags;
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE;
status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
if (status == IB_PMA_SAMPLE_STATUS_DONE ||
(status == IB_PMA_SAMPLE_STATUS_RUNNING &&
xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) {
ibp->pma_sample_start = be32_to_cpu(p->sample_start);
ibp->pma_sample_interval = be32_to_cpu(p->sample_interval);
ibp->pma_tag = be16_to_cpu(p->tag);
ibp->pma_counter_select[0] = p->counter_select[0];
ibp->pma_counter_select[1] = p->counter_select[1];
ibp->pma_counter_select[2] = p->counter_select[2];
ibp->pma_counter_select[3] = p->counter_select[3];
ibp->pma_counter_select[4] = p->counter_select[4];
dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval,
ibp->pma_sample_start);
}
spin_unlock_irqrestore(&ibp->lock, flags);
ret = pma_get_portsamplescontrol(pmp, ibdev, port);
bail:
return ret;
}
static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd,
__be16 sel)
{
u64 ret;
switch (sel) {
case IB_PMA_PORT_XMIT_DATA:
ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA);
break;
case IB_PMA_PORT_RCV_DATA:
ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA);
break;
case IB_PMA_PORT_XMIT_PKTS:
ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS);
break;
case IB_PMA_PORT_RCV_PKTS:
ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS);
break;
case IB_PMA_PORT_XMIT_WAIT:
ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT);
break;
default:
ret = 0;
}
return ret;
}
/* This function assumes that the xmit_wait lock is already held */
static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd)
{
u32 delta;
delta = get_counter(&ppd->ibport_data, ppd,
IB_PMA_PORT_XMIT_WAIT);
return ppd->cong_stats.counter + delta;
}
static void cache_hw_sample_counters(struct qib_pportdata *ppd)
{
struct qib_ibport *ibp = &ppd->ibport_data;
ppd->cong_stats.counter_cache.psxmitdata =
get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA);
ppd->cong_stats.counter_cache.psrcvdata =
get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA);
ppd->cong_stats.counter_cache.psxmitpkts =
get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS);
ppd->cong_stats.counter_cache.psrcvpkts =
get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS);
ppd->cong_stats.counter_cache.psxmitwait =
get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT);
}
static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
__be16 sel)
{
u64 ret;
switch (sel) {
case IB_PMA_PORT_XMIT_DATA:
ret = ppd->cong_stats.counter_cache.psxmitdata;
break;
case IB_PMA_PORT_RCV_DATA:
ret = ppd->cong_stats.counter_cache.psrcvdata;
break;
case IB_PMA_PORT_XMIT_PKTS:
ret = ppd->cong_stats.counter_cache.psxmitpkts;
break;
case IB_PMA_PORT_RCV_PKTS:
ret = ppd->cong_stats.counter_cache.psrcvpkts;
break;
case IB_PMA_PORT_XMIT_WAIT:
ret = ppd->cong_stats.counter_cache.psxmitwait;
break;
default:
ret = 0;
}
return ret;
}
static int pma_get_portsamplesresult(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplesresult *p =
(struct ib_pma_portsamplesresult *)pmp->data;
struct qib_ibdev *dev = to_idev(ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
unsigned long flags;
u8 status;
int i;
memset(pmp->data, 0, sizeof(pmp->data));
spin_lock_irqsave(&ibp->lock, flags);
p->tag = cpu_to_be16(ibp->pma_tag);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
else {
status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
p->sample_status = cpu_to_be16(status);
if (status == IB_PMA_SAMPLE_STATUS_DONE) {
cache_hw_sample_counters(ppd);
ppd->cong_stats.counter =
xmit_wait_get_value_delta(ppd);
dd->f_set_cntr_sample(ppd,
QIB_CONG_TIMER_PSINTERVAL, 0);
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
}
}
for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
p->counter[i] = cpu_to_be32(
get_cache_hw_sample_counters(
ppd, ibp->pma_counter_select[i]));
spin_unlock_irqrestore(&ibp->lock, flags);
return reply((struct ib_smp *) pmp);
}
static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplesresult_ext *p =
(struct ib_pma_portsamplesresult_ext *)pmp->data;
struct qib_ibdev *dev = to_idev(ibdev);
struct qib_devdata *dd = dd_from_dev(dev);
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
unsigned long flags;
u8 status;
int i;
/* Port Sampling code owns the PS* HW counters */
memset(pmp->data, 0, sizeof(pmp->data));
spin_lock_irqsave(&ibp->lock, flags);
p->tag = cpu_to_be16(ibp->pma_tag);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER)
p->sample_status = IB_PMA_SAMPLE_STATUS_DONE;
else {
status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
p->sample_status = cpu_to_be16(status);
/* 64 bits */
p->extended_width = cpu_to_be32(0x80000000);
if (status == IB_PMA_SAMPLE_STATUS_DONE) {
cache_hw_sample_counters(ppd);
ppd->cong_stats.counter =
xmit_wait_get_value_delta(ppd);
dd->f_set_cntr_sample(ppd,
QIB_CONG_TIMER_PSINTERVAL, 0);
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
}
}
for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++)
p->counter[i] = cpu_to_be64(
get_cache_hw_sample_counters(
ppd, ibp->pma_counter_select[i]));
spin_unlock_irqrestore(&ibp->lock, flags);
return reply((struct ib_smp *) pmp);
}
static int pma_get_portcounters(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_verbs_counters cntrs;
u8 port_select = p->port_select;
qib_get_counters(ppd, &cntrs);
/* Adjust counters for any resets done. */
cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
cntrs.link_error_recovery_counter -=
ibp->z_link_error_recovery_counter;
cntrs.link_downed_counter -= ibp->z_link_downed_counter;
cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors;
cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
cntrs.port_xmit_data -= ibp->z_port_xmit_data;
cntrs.port_rcv_data -= ibp->z_port_rcv_data;
cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
cntrs.local_link_integrity_errors -=
ibp->z_local_link_integrity_errors;
cntrs.excessive_buffer_overrun_errors -=
ibp->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= ibp->z_vl15_dropped;
cntrs.vl15_dropped += ibp->n_vl15_dropped;
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
if (pmp->attr_mod != 0 || port_select != port)
pmp->status |= IB_SMP_INVALID_FIELD;
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = cpu_to_be16(0xFFFF);
else
p->symbol_error_counter =
cpu_to_be16((u16)cntrs.symbol_error_counter);
if (cntrs.link_error_recovery_counter > 0xFFUL)
p->link_error_recovery_counter = 0xFF;
else
p->link_error_recovery_counter =
(u8)cntrs.link_error_recovery_counter;
if (cntrs.link_downed_counter > 0xFFUL)
p->link_downed_counter = 0xFF;
else
p->link_downed_counter = (u8)cntrs.link_downed_counter;
if (cntrs.port_rcv_errors > 0xFFFFUL)
p->port_rcv_errors = cpu_to_be16(0xFFFF);
else
p->port_rcv_errors =
cpu_to_be16((u16) cntrs.port_rcv_errors);
if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
else
p->port_rcv_remphys_errors =
cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
if (cntrs.port_xmit_discards > 0xFFFFUL)
p->port_xmit_discards = cpu_to_be16(0xFFFF);
else
p->port_xmit_discards =
cpu_to_be16((u16)cntrs.port_xmit_discards);
if (cntrs.local_link_integrity_errors > 0xFUL)
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
else
p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
else
p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
else
p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
else
p->port_xmit_packets =
cpu_to_be32((u32)cntrs.port_xmit_packets);
if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
else
p->port_rcv_packets =
cpu_to_be32((u32) cntrs.port_rcv_packets);
return reply((struct ib_smp *) pmp);
}
static int pma_get_portcounters_cong(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
/* Congestion PMA packets start at offset 24 not 64 */
struct ib_pma_portcounters_cong *p =
(struct ib_pma_portcounters_cong *)pmp->reserved;
struct qib_verbs_counters cntrs;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
u64 xmit_wait_counter;
unsigned long flags;
/*
* This check is performed only in the GET method because the
* SET method ends up calling this anyway.
*/
if (!dd->psxmitwait_supported)
pmp->status |= IB_SMP_UNSUP_METH_ATTR;
if (port_select != port)
pmp->status |= IB_SMP_INVALID_FIELD;
qib_get_counters(ppd, &cntrs);
spin_lock_irqsave(&ppd->ibport_data.lock, flags);
xmit_wait_counter = xmit_wait_get_value_delta(ppd);
spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
/* Adjust counters for any resets done. */
cntrs.symbol_error_counter -= ibp->z_symbol_error_counter;
cntrs.link_error_recovery_counter -=
ibp->z_link_error_recovery_counter;
cntrs.link_downed_counter -= ibp->z_link_downed_counter;
cntrs.port_rcv_errors -= ibp->z_port_rcv_errors;
cntrs.port_rcv_remphys_errors -=
ibp->z_port_rcv_remphys_errors;
cntrs.port_xmit_discards -= ibp->z_port_xmit_discards;
cntrs.local_link_integrity_errors -=
ibp->z_local_link_integrity_errors;
cntrs.excessive_buffer_overrun_errors -=
ibp->z_excessive_buffer_overrun_errors;
cntrs.vl15_dropped -= ibp->z_vl15_dropped;
cntrs.vl15_dropped += ibp->n_vl15_dropped;
cntrs.port_xmit_data -= ibp->z_port_xmit_data;
cntrs.port_rcv_data -= ibp->z_port_rcv_data;
cntrs.port_xmit_packets -= ibp->z_port_xmit_packets;
cntrs.port_rcv_packets -= ibp->z_port_rcv_packets;
memset(pmp->reserved, 0, sizeof(pmp->reserved) +
sizeof(pmp->data));
/*
* Set top 3 bits to indicate interval in picoseconds in
* remaining bits.
*/
p->port_check_rate =
cpu_to_be16((QIB_XMIT_RATE_PICO << 13) |
(dd->psxmitwait_check_rate &
~(QIB_XMIT_RATE_PICO << 13)));
p->port_adr_events = cpu_to_be64(0);
p->port_xmit_wait = cpu_to_be64(xmit_wait_counter);
p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data);
p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data);
p->port_xmit_packets =
cpu_to_be64(cntrs.port_xmit_packets);
p->port_rcv_packets =
cpu_to_be64(cntrs.port_rcv_packets);
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = cpu_to_be16(0xFFFF);
else
p->symbol_error_counter =
cpu_to_be16(
(u16)cntrs.symbol_error_counter);
if (cntrs.link_error_recovery_counter > 0xFFUL)
p->link_error_recovery_counter = 0xFF;
else
p->link_error_recovery_counter =
(u8)cntrs.link_error_recovery_counter;
if (cntrs.link_downed_counter > 0xFFUL)
p->link_downed_counter = 0xFF;
else
p->link_downed_counter =
(u8)cntrs.link_downed_counter;
if (cntrs.port_rcv_errors > 0xFFFFUL)
p->port_rcv_errors = cpu_to_be16(0xFFFF);
else
p->port_rcv_errors =
cpu_to_be16((u16) cntrs.port_rcv_errors);
if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
else
p->port_rcv_remphys_errors =
cpu_to_be16(
(u16)cntrs.port_rcv_remphys_errors);
if (cntrs.port_xmit_discards > 0xFFFFUL)
p->port_xmit_discards = cpu_to_be16(0xFFFF);
else
p->port_xmit_discards =
cpu_to_be16((u16)cntrs.port_xmit_discards);
if (cntrs.local_link_integrity_errors > 0xFUL)
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
else
p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
return reply((struct ib_smp *)pmp);
}
static int pma_get_portcounters_ext(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters_ext *p =
(struct ib_pma_portcounters_ext *)pmp->data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
u64 swords, rwords, spkts, rpkts, xwait;
u8 port_select = p->port_select;
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
if (pmp->attr_mod != 0 || port_select != port) {
pmp->status |= IB_SMP_INVALID_FIELD;
goto bail;
}
qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
/* Adjust counters for any resets done. */
swords -= ibp->z_port_xmit_data;
rwords -= ibp->z_port_rcv_data;
spkts -= ibp->z_port_xmit_packets;
rpkts -= ibp->z_port_rcv_packets;
p->port_xmit_data = cpu_to_be64(swords);
p->port_rcv_data = cpu_to_be64(rwords);
p->port_xmit_packets = cpu_to_be64(spkts);
p->port_rcv_packets = cpu_to_be64(rpkts);
p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit);
p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv);
p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit);
p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv);
bail:
return reply((struct ib_smp *) pmp);
}
static int pma_set_portcounters(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_verbs_counters cntrs;
/*
* Since the HW doesn't support clearing counters, we save the
* current count and subtract it from future responses.
*/
qib_get_counters(ppd, &cntrs);
if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
ibp->z_link_error_recovery_counter =
cntrs.link_error_recovery_counter;
if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
ibp->z_link_downed_counter = cntrs.link_downed_counter;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
ibp->z_port_rcv_remphys_errors =
cntrs.port_rcv_remphys_errors;
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
ibp->z_local_link_integrity_errors =
cntrs.local_link_integrity_errors;
if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
ibp->n_vl15_dropped = 0;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
}
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
ibp->z_port_xmit_data = cntrs.port_xmit_data;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
ibp->z_port_rcv_data = cntrs.port_rcv_data;
if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
return pma_get_portcounters(pmp, ibdev, port);
}
static int pma_set_portcounters_cong(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
struct qib_verbs_counters cntrs;
u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
int ret = 0;
unsigned long flags;
qib_get_counters(ppd, &cntrs);
/* Get counter values before we save them */
ret = pma_get_portcounters_cong(pmp, ibdev, port);
if (counter_select & IB_PMA_SEL_CONG_XMIT) {
spin_lock_irqsave(&ppd->ibport_data.lock, flags);
ppd->cong_stats.counter = 0;
dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL,
0x0);
spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
}
if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) {
ibp->z_port_xmit_data = cntrs.port_xmit_data;
ibp->z_port_rcv_data = cntrs.port_rcv_data;
ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
}
if (counter_select & IB_PMA_SEL_CONG_ALL) {
ibp->z_symbol_error_counter =
cntrs.symbol_error_counter;
ibp->z_link_error_recovery_counter =
cntrs.link_error_recovery_counter;
ibp->z_link_downed_counter =
cntrs.link_downed_counter;
ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
ibp->z_port_rcv_remphys_errors =
cntrs.port_rcv_remphys_errors;
ibp->z_port_xmit_discards =
cntrs.port_xmit_discards;
ibp->z_local_link_integrity_errors =
cntrs.local_link_integrity_errors;
ibp->z_excessive_buffer_overrun_errors =
cntrs.excessive_buffer_overrun_errors;
ibp->n_vl15_dropped = 0;
ibp->z_vl15_dropped = cntrs.vl15_dropped;
}
return ret;
}
static int pma_set_portcounters_ext(struct ib_perf *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
pmp->data;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
u64 swords, rwords, spkts, rpkts, xwait;
qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait);
if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
ibp->z_port_xmit_data = swords;
if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
ibp->z_port_rcv_data = rwords;
if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
ibp->z_port_xmit_packets = spkts;
if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
ibp->z_port_rcv_packets = rpkts;
if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
ibp->n_unicast_xmit = 0;
if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
ibp->n_unicast_rcv = 0;
if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
ibp->n_multicast_xmit = 0;
if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
ibp->n_multicast_rcv = 0;
return pma_get_portcounters_ext(pmp, ibdev, port);
}
static int process_subn(struct ib_device *ibdev, int mad_flags,
u8 port, struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_smp *smp = (struct ib_smp *)out_mad;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
int ret;
*out_mad = *in_mad;
if (smp->class_version != 1) {
smp->status |= IB_SMP_UNSUP_VERSION;
ret = reply(smp);
goto bail;
}
ret = check_mkey(ibp, smp, mad_flags);
if (ret) {
u32 port_num = be32_to_cpu(smp->attr_mod);
/*
* If this is a get/set portinfo, we already check the
* M_Key if the MAD is for another port and the M_Key
* is OK on the receiving port. This check is needed
* to increment the error counters when the M_Key
* fails to match on *both* ports.
*/
if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
(smp->method == IB_MGMT_METHOD_GET ||
smp->method == IB_MGMT_METHOD_SET) &&
port_num && port_num <= ibdev->phys_port_cnt &&
port != port_num)
(void) check_mkey(to_iport(ibdev, port_num), smp, 0);
goto bail;
}
switch (smp->method) {
case IB_MGMT_METHOD_GET:
switch (smp->attr_id) {
case IB_SMP_ATTR_NODE_DESC:
ret = subn_get_nodedescription(smp, ibdev);
goto bail;
case IB_SMP_ATTR_NODE_INFO:
ret = subn_get_nodeinfo(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_GUID_INFO:
ret = subn_get_guidinfo(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_PORT_INFO:
ret = subn_get_portinfo(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_PKEY_TABLE:
ret = subn_get_pkeytable(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_SL_TO_VL_TABLE:
ret = subn_get_sl_to_vl(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_VL_ARB_TABLE:
ret = subn_get_vl_arb(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_SM_INFO:
if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
if (ibp->port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
/* FALLTHROUGH */
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
goto bail;
}
case IB_MGMT_METHOD_SET:
switch (smp->attr_id) {
case IB_SMP_ATTR_GUID_INFO:
ret = subn_set_guidinfo(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_PORT_INFO:
ret = subn_set_portinfo(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_PKEY_TABLE:
ret = subn_set_pkeytable(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_SL_TO_VL_TABLE:
ret = subn_set_sl_to_vl(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_VL_ARB_TABLE:
ret = subn_set_vl_arb(smp, ibdev, port);
goto bail;
case IB_SMP_ATTR_SM_INFO:
if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) {
ret = IB_MAD_RESULT_SUCCESS |
IB_MAD_RESULT_CONSUMED;
goto bail;
}
if (ibp->port_cap_flags & IB_PORT_SM) {
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
}
/* FALLTHROUGH */
default:
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
goto bail;
}
case IB_MGMT_METHOD_TRAP_REPRESS:
if (smp->attr_id == IB_SMP_ATTR_NOTICE)
ret = subn_trap_repress(smp, ibdev, port);
else {
smp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply(smp);
}
goto bail;
case IB_MGMT_METHOD_TRAP:
case IB_MGMT_METHOD_REPORT:
case IB_MGMT_METHOD_REPORT_RESP:
case IB_MGMT_METHOD_GET_RESP:
/*
* The ib_mad module will call us to process responses
* before checking for other consumers.
* Just tell the caller to process it normally.
*/
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
case IB_MGMT_METHOD_SEND:
if (ib_get_smp_direction(smp) &&
smp->attr_id == QIB_VENDOR_IPG) {
ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT,
smp->data[0]);
ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
} else
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
default:
smp->status |= IB_SMP_UNSUP_METHOD;
ret = reply(smp);
}
bail:
return ret;
}
static int process_perf(struct ib_device *ibdev, u8 port,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
struct ib_perf *pmp = (struct ib_perf *)out_mad;
int ret;
*out_mad = *in_mad;
if (pmp->class_version != 1) {
pmp->status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
switch (pmp->method) {
case IB_MGMT_METHOD_GET:
switch (pmp->attr_id) {
case IB_PMA_CLASS_PORT_INFO:
ret = pma_get_classportinfo(pmp, ibdev);
goto bail;
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = pma_get_portsamplescontrol(pmp, ibdev, port);
goto bail;
case IB_PMA_PORT_SAMPLES_RESULT:
ret = pma_get_portsamplesresult(pmp, ibdev, port);
goto bail;
case IB_PMA_PORT_SAMPLES_RESULT_EXT:
ret = pma_get_portsamplesresult_ext(pmp, ibdev, port);
goto bail;
case IB_PMA_PORT_COUNTERS:
ret = pma_get_portcounters(pmp, ibdev, port);
goto bail;
case IB_PMA_PORT_COUNTERS_EXT:
ret = pma_get_portcounters_ext(pmp, ibdev, port);
goto bail;
case IB_PMA_PORT_COUNTERS_CONG:
ret = pma_get_portcounters_cong(pmp, ibdev, port);
goto bail;
default:
pmp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_SET:
switch (pmp->attr_id) {
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = pma_set_portsamplescontrol(pmp, ibdev, port);
goto bail;
case IB_PMA_PORT_COUNTERS:
ret = pma_set_portcounters(pmp, ibdev, port);
goto bail;
case IB_PMA_PORT_COUNTERS_EXT:
ret = pma_set_portcounters_ext(pmp, ibdev, port);
goto bail;
case IB_PMA_PORT_COUNTERS_CONG:
ret = pma_set_portcounters_cong(pmp, ibdev, port);
goto bail;
default:
pmp->status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_TRAP:
case IB_MGMT_METHOD_GET_RESP:
/*
* The ib_mad module will call us to process responses
* before checking for other consumers.
* Just tell the caller to process it normally.
*/
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
default:
pmp->status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_smp *) pmp);
}
bail:
return ret;
}
/**
* qib_process_mad - process an incoming MAD packet
* @ibdev: the infiniband device this packet came in on
* @mad_flags: MAD flags
* @port: the port number this packet came in on
* @in_wc: the work completion entry for this packet
* @in_grh: the global route header for this packet
* @in_mad: the incoming MAD
* @out_mad: any outgoing MAD reply
*
* Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
* interested in processing.
*
* Note that the verbs framework has already done the MAD sanity checks,
* and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
* MADs.
*
* This is called by the ib_mad module.
*/
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
int ret;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
case IB_MGMT_CLASS_SUBN_LID_ROUTED:
ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
goto bail;
case IB_MGMT_CLASS_PERF_MGMT:
ret = process_perf(ibdev, port, in_mad, out_mad);
goto bail;
default:
ret = IB_MAD_RESULT_SUCCESS;
}
bail:
return ret;
}
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
ib_free_send_mad(mad_send_wc->send_buf);
}
static void xmit_wait_timer_func(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
struct qib_devdata *dd = dd_from_ppd(ppd);
unsigned long flags;
u8 status;
spin_lock_irqsave(&ppd->ibport_data.lock, flags);
if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) {
status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT);
if (status == IB_PMA_SAMPLE_STATUS_DONE) {
/* save counter cache */
cache_hw_sample_counters(ppd);
ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER;
} else
goto done;
}
ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd);
dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0);
done:
spin_unlock_irqrestore(&ppd->ibport_data.lock, flags);
mod_timer(&ppd->cong_stats.timer, jiffies + HZ);
}
int qib_create_agents(struct qib_ibdev *dev)
{
struct qib_devdata *dd = dd_from_dev(dev);
struct ib_mad_agent *agent;
struct qib_ibport *ibp;
int p;
int ret;
for (p = 0; p < dd->num_pports; p++) {
ibp = &dd->pport[p].ibport_data;
agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
NULL, 0, send_handler,
NULL, NULL);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
}
/* Initialize xmit_wait structure */
dd->pport[p].cong_stats.counter = 0;
init_timer(&dd->pport[p].cong_stats.timer);
dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func;
dd->pport[p].cong_stats.timer.data =
(unsigned long)(&dd->pport[p]);
dd->pport[p].cong_stats.timer.expires = 0;
add_timer(&dd->pport[p].cong_stats.timer);
ibp->send_agent = agent;
}
return 0;
err:
for (p = 0; p < dd->num_pports; p++) {
ibp = &dd->pport[p].ibport_data;
if (ibp->send_agent) {
agent = ibp->send_agent;
ibp->send_agent = NULL;
ib_unregister_mad_agent(agent);
}
}
return ret;
}
void qib_free_agents(struct qib_ibdev *dev)
{
struct qib_devdata *dd = dd_from_dev(dev);
struct ib_mad_agent *agent;
struct qib_ibport *ibp;
int p;
for (p = 0; p < dd->num_pports; p++) {
ibp = &dd->pport[p].ibport_data;
if (ibp->send_agent) {
agent = ibp->send_agent;
ibp->send_agent = NULL;
ib_unregister_mad_agent(agent);
}
if (ibp->sm_ah) {
ib_destroy_ah(&ibp->sm_ah->ibah);
ibp->sm_ah = NULL;
}
if (dd->pport[p].cong_stats.timer.data)
del_timer_sync(&dd->pport[p].cong_stats.timer);
}
}
| gpl-2.0 |
guylamar2006/android_kernel_samsung_smdk4412 | drivers/scsi/fnic/fnic_scsi.c | 2527 | 48985 | /*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/mempool.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "fnic_io.h"
#include "fnic.h"
const char *fnic_state_str[] = {
[FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
[FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
[FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
};
static const char *fnic_ioreq_state_str[] = {
[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
[FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
};
static const char *fcpio_status_str[] = {
[FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
[FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
[FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
[FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
[FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
[FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
[FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
[FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
[FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
[FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
[FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
[FCPIO_FW_ERR] = "FCPIO_FW_ERR",
[FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
[FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
[FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
[FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
[FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
[FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
};
const char *fnic_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
return "unknown";
return fnic_state_str[state];
}
static const char *fnic_ioreq_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
!fnic_ioreq_state_str[state])
return "unknown";
return fnic_ioreq_state_str[state];
}
static const char *fnic_fcpio_status_to_str(unsigned int status)
{
if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
return "unknown";
return fcpio_status_str[status];
}
static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
struct scsi_cmnd *sc)
{
u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
return &fnic->io_req_lock[hash];
}
/*
* Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list.
*/
static void fnic_release_ioreq_buf(struct fnic *fnic,
struct fnic_io_req *io_req,
struct scsi_cmnd *sc)
{
if (io_req->sgl_list_pa)
pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
PCI_DMA_TODEVICE);
scsi_dma_unmap(sc);
if (io_req->sgl_cnt)
mempool_free(io_req->sgl_list_alloc,
fnic->io_sgl_pool[io_req->sgl_type]);
if (io_req->sense_buf_pa)
pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
}
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
{
/* if no Ack received from firmware, then nothing to clean */
if (!fnic->fw_ack_recd[0])
return 1;
/*
* Update desc_available count based on number of freed descriptors
* Account for wraparound
*/
if (wq->to_clean_index <= fnic->fw_ack_index[0])
wq->ring.desc_avail += (fnic->fw_ack_index[0]
- wq->to_clean_index + 1);
else
wq->ring.desc_avail += (wq->ring.desc_count
- wq->to_clean_index
+ fnic->fw_ack_index[0] + 1);
/*
* just bump clean index to ack_index+1 accounting for wraparound
* this will essentially free up all descriptors between
* to_clean_index and fw_ack_index, both inclusive
*/
wq->to_clean_index =
(fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
/* we have processed the acks received so far */
fnic->fw_ack_recd[0] = 0;
return 0;
}
/*
* fnic_fw_reset_handler
* Routine to send reset msg to fw
*/
int fnic_fw_reset_handler(struct fnic *fnic)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
int ret = 0;
unsigned long flags;
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN;
else
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
if (!ret)
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
else
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
return ret;
}
/*
* fnic_flogi_reg_handler
* Routine to send flogi register msg to fw
*/
int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
enum fcpio_flogi_reg_format_type format;
struct fc_lport *lp = fnic->lport;
u8 gw_mac[ETH_ALEN];
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
ret = -EAGAIN;
goto flogi_reg_ioreq_end;
}
if (fnic->ctlr.map_dest) {
memset(gw_mac, 0xff, ETH_ALEN);
format = FCPIO_FLOGI_REG_DEF_DEST;
} else {
memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
format = FCPIO_FLOGI_REG_GW_DEST;
}
if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
fc_id, gw_mac,
fnic->data_src_addr,
lp->r_a_tov, lp->e_d_tov);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
fc_id, fnic->data_src_addr, gw_mac);
} else {
fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
format, fc_id, gw_mac);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"FLOGI reg issued fcid %x map %d dest %pM\n",
fc_id, fnic->ctlr.map_dest, gw_mac);
}
flogi_reg_ioreq_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
return ret;
}
/*
* fnic_queue_wq_copy_desc
* Routine to enqueue a wq copy desc
*/
static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct vnic_wq_copy *wq,
struct fnic_io_req *io_req,
struct scsi_cmnd *sc,
int sg_count)
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct host_sg_desc *desc;
u8 pri_tag = 0;
unsigned int i;
unsigned long intr_flags;
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
char msg[2];
if (sg_count) {
/* For each SGE, create a device desc entry */
desc = io_req->sgl_list;
for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
desc->addr = cpu_to_le64(sg_dma_address(sg));
desc->len = cpu_to_le32(sg_dma_len(sg));
desc->_resvd = 0;
desc++;
}
io_req->sgl_list_pa = pci_map_single
(fnic->pdev,
io_req->sgl_list,
sizeof(io_req->sgl_list[0]) * sg_count,
PCI_DMA_TODEVICE);
}
io_req->sense_buf_pa = pci_map_single(fnic->pdev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE);
int_to_scsilun(sc->device->lun, &fc_lun);
pri_tag = FCPIO_ICMND_PTA_SIMPLE;
msg[0] = MSG_SIMPLE_TAG;
scsi_populate_tag_msg(sc, msg);
if (msg[0] == MSG_ORDERED_TAG)
pri_tag = FCPIO_ICMND_PTA_ORDERED;
/* Enqueue the descriptor in the Copy WQ */
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
flags = 0;
if (sc->sc_data_direction == DMA_FROM_DEVICE)
flags = FCPIO_ICMND_RDDATA;
else if (sc->sc_data_direction == DMA_TO_DEVICE)
flags = FCPIO_ICMND_WRDATA;
exch_flags = 0;
if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
(rp->flags & FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
0, exch_flags, io_req->sgl_cnt,
SCSI_SENSE_BUFFERSIZE,
io_req->sgl_list_pa,
io_req->sense_buf_pa,
0, /* scsi cmd ref, always 0 */
pri_tag, /* scsi pri and tag */
flags, /* command flags */
sc->cmnd, sc->cmd_len,
scsi_bufflen(sc),
fc_lun.scsi_lun, io_req->port_id,
rport->maxframe_size, rp->r_a_tov,
rp->e_d_tov);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
return 0;
}
/*
* fnic_queuecommand
* Routine to send a scsi cdb
* Called with host_lock held and interrupts disabled.
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
struct fc_lport *lp;
struct fc_rport *rport;
struct fnic_io_req *io_req;
struct fnic *fnic;
struct vnic_wq_copy *wq;
int ret;
int sg_count;
unsigned long flags;
unsigned long ptr;
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
sc->result = ret;
done(sc);
return 0;
}
lp = shost_priv(sc->device->host);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
/*
* Release host lock, use driver resource specific locks from here.
* Don't re-enable interrupts in case they were disabled prior to the
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
/* Get a new io_req for this SCSI IO */
fnic = lport_priv(lp);
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
ret = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
memset(io_req, 0, sizeof(*io_req));
/* Map the data buffer */
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
/* Determine the type of scatter/gather list we need */
io_req->sgl_cnt = sg_count;
io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
if (sg_count > FNIC_DFLT_SG_DESC_CNT)
io_req->sgl_type = FNIC_SGL_CACHE_MAX;
if (sg_count) {
io_req->sgl_list =
mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
GFP_ATOMIC | GFP_DMA);
if (!io_req->sgl_list) {
ret = SCSI_MLQUEUE_HOST_BUSY;
scsi_dma_unmap(sc);
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
/* Cache sgl list allocated address before alignment */
io_req->sgl_list_alloc = io_req->sgl_list;
ptr = (unsigned long) io_req->sgl_list;
if (ptr % FNIC_SG_DESC_ALIGN) {
io_req->sgl_list = (struct host_sg_desc *)
(((unsigned long) ptr
+ FNIC_SG_DESC_ALIGN - 1)
& ~(FNIC_SG_DESC_ALIGN - 1));
}
}
/* initialize rest of io_req */
io_req->port_id = rport->port_id;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_SP(sc) = (char *)io_req;
sc->scsi_done = done;
/* create copy wq desc and enqueue it */
wq = &fnic->wq_copy[0];
ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
if (ret) {
/*
* In case another thread cancelled the request,
* refetch the pointer under the lock.
*/
spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_SP(sc) = NULL;
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
}
out:
/* acquire host lock before returning to SCSI */
spin_lock(lp->host->host_lock);
return ret;
}
DEF_SCSI_QCMD(fnic_queuecommand)
/*
* fnic_fcpio_fw_reset_cmpl_handler
* Routine to handle fw reset completion
*/
static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
struct fcpio_tag tag;
int ret = 0;
unsigned long flags;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
/* Clean up all outstanding io requests */
fnic_cleanup_io(fnic, SCSI_NO_TAG);
spin_lock_irqsave(&fnic->fnic_lock, flags);
/* fnic should be in FC_TRANS_ETH_MODE */
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
/* Check status of reset completion */
if (!hdr_status) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"reset cmpl success\n");
/* Ready to send flogi out */
fnic->state = FNIC_IN_ETH_MODE;
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"fnic fw_reset : failed %s\n",
fnic_fcpio_status_to_str(hdr_status));
/*
* Unable to change to eth mode, cannot send out flogi
* Change state to fc mode, so that subsequent Flogi
* requests from libFC will cause more attempts to
* reset the firmware. Free the cached flogi
*/
fnic->state = FNIC_IN_FC_MODE;
ret = -1;
}
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Unexpected state %s while processing"
" reset cmpl\n", fnic_state_to_str(fnic->state));
ret = -1;
}
/* Thread removing device blocks till firmware reset is complete */
if (fnic->remove_wait)
complete(fnic->remove_wait);
/*
* If fnic is being removed, or fw reset failed
* free the flogi frame. Else, send it out
*/
if (fnic->remove_wait || ret) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
skb_queue_purge(&fnic->tx_queue);
goto reset_cmpl_handler_end;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_flush_tx(fnic);
reset_cmpl_handler_end:
return ret;
}
/*
* fnic_fcpio_flogi_reg_cmpl_handler
* Routine to handle flogi register completion
*/
static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
struct fcpio_tag tag;
int ret = 0;
unsigned long flags;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
/* Update fnic state based on status of flogi reg completion */
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
/* Check flogi registration completion status */
if (!hdr_status) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"flog reg succeeded\n");
fnic->state = FNIC_IN_FC_MODE;
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"fnic flogi reg :failed %s\n",
fnic_fcpio_status_to_str(hdr_status));
fnic->state = FNIC_IN_ETH_MODE;
ret = -1;
}
} else {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Unexpected fnic state %s while"
" processing flogi reg completion\n",
fnic_state_to_str(fnic->state));
ret = -1;
}
if (!ret) {
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto reg_cmpl_handler_end;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_flush_tx(fnic);
queue_work(fnic_event_queue, &fnic->frame_work);
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
reg_cmpl_handler_end:
return ret;
}
static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
u16 request_out)
{
if (wq->to_clean_index <= wq->to_use_index) {
/* out of range, stale request_out index */
if (request_out < wq->to_clean_index ||
request_out >= wq->to_use_index)
return 0;
} else {
/* out of range, stale request_out index */
if (request_out < wq->to_clean_index &&
request_out >= wq->to_use_index)
return 0;
}
/* request_out index is in range */
return 1;
}
/*
* Mark that ack received and store the Ack index. If there are multiple
* acks received before Tx thread cleans it up, the latest value will be
* used which is correct behavior. This state should be in the copy Wq
* instead of in the fnic
*/
static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
unsigned int cq_index,
struct fcpio_fw_req *desc)
{
struct vnic_wq_copy *wq;
u16 request_out = desc->u.ack.request_out;
unsigned long flags;
/* mark the ack state */
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (is_ack_index_in_range(wq, request_out)) {
fnic->fw_ack_index[0] = request_out;
fnic->fw_ack_recd[0] = 1;
}
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
}
/*
* fnic_fcpio_icmnd_cmpl_handler
* Routine to handle icmnd completions
*/
static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
struct fcpio_tag tag;
u32 id;
u64 xfer_len = 0;
struct fcpio_icmnd_cmpl *icmnd_cmpl;
struct fnic_io_req *io_req;
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
if (id >= FNIC_MAX_IO_REQ)
return;
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
if (!sc)
return;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
return;
}
/* firmware completed the io */
io_req->io_completed = 1;
/*
* if SCSI-ML has already issued abort on this command,
* ignore completion of the IO. The abts path will clean it up
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
return;
}
/* Mark the IO as complete */
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
icmnd_cmpl = &desc->u.icmnd_cmpl;
switch (hdr_status) {
case FCPIO_SUCCESS:
sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
xfer_len = scsi_bufflen(sc);
scsi_set_resid(sc, icmnd_cmpl->residual);
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
xfer_len -= icmnd_cmpl->residual;
/*
* If queue_full, then try to reduce queue depth for all
* LUNS on the target. Todo: this should be accompanied
* by a periodic queue_depth rampup based on successful
* IO completion.
*/
if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
struct scsi_device *t_sdev;
int qd = 0;
shost_for_each_device(t_sdev, sc->device->host) {
if (t_sdev->id != sc->device->id)
continue;
if (t_sdev->queue_depth > 1) {
qd = scsi_track_queue_full
(t_sdev,
t_sdev->queue_depth - 1);
if (qd == -1)
qd = t_sdev->host->cmd_per_lun;
shost_printk(KERN_INFO,
fnic->lport->host,
"scsi[%d:%d:%d:%d"
"] queue full detected,"
"new depth = %d\n",
t_sdev->host->host_no,
t_sdev->channel,
t_sdev->id, t_sdev->lun,
t_sdev->queue_depth);
}
}
}
break;
case FCPIO_TIMEOUT: /* request was timed out */
sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_ABORTED: /* request was aborted */
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
scsi_set_resid(sc, icmnd_cmpl->residual);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_INVALID_HEADER: /* header contains invalid data */
case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
case FCPIO_FW_ERR: /* request was terminated due fw error */
default:
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
}
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
fnic->lport->host_stats.fcp_output_requests++;
fnic->fcp_output_bytes += xfer_len;
} else
fnic->lport->host_stats.fcp_control_requests++;
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
}
/* fnic_fcpio_itmf_cmpl_handler
* Routine to handle itmf completions
*/
static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
struct fcpio_tag tag;
u32 id;
struct scsi_cmnd *sc;
struct fnic_io_req *io_req;
unsigned long flags;
spinlock_t *io_lock;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
return;
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
if (!sc)
return;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
return;
}
if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
spin_unlock_irqrestore(io_lock, flags);
return;
}
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
/*
* If scsi_eh thread is blocked waiting for abts to complete,
* signal completion to it. IO will be cleaned in the thread
* else clean it in this context
*/
if (io_req->abts_done) {
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
} else {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl, completing IO\n");
CMD_SP(sc) = NULL;
sc->result = (DID_ERROR << 16);
spin_unlock_irqrestore(io_lock, flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
if (sc->scsi_done)
sc->scsi_done(sc);
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
CMD_LR_STATUS(sc) = hdr_status;
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
if (io_req->dr_done)
complete(io_req->dr_done);
spin_unlock_irqrestore(io_lock, flags);
} else {
shost_printk(KERN_ERR, fnic->lport->host,
"Unexpected itmf io state %s tag %x\n",
fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
spin_unlock_irqrestore(io_lock, flags);
}
}
/*
* fnic_fcpio_cmpl_handler
* Routine to service the cq for wq_copy
*/
static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
unsigned int cq_index,
struct fcpio_fw_req *desc)
{
struct fnic *fnic = vnic_dev_priv(vdev);
int ret = 0;
switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
fnic_fcpio_ack_handler(fnic, cq_index, desc);
break;
case FCPIO_ICMND_CMPL: /* fw completed a command */
fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
break;
case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
fnic_fcpio_itmf_cmpl_handler(fnic, desc);
break;
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
break;
case FCPIO_RESET_CMPL: /* fw completed reset */
ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
break;
default:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"firmware completion type %d\n",
desc->hdr.type);
break;
}
return ret;
}
/*
* fnic_wq_copy_cmpl_handler
* Routine to process wq copy
*/
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
{
unsigned int wq_work_done = 0;
unsigned int i, cq_index;
unsigned int cur_work_done;
for (i = 0; i < fnic->wq_copy_count; i++) {
cq_index = i + fnic->raw_wq_count + fnic->rq_count;
cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
fnic_fcpio_cmpl_handler,
copy_work_to_do);
wq_work_done += cur_work_done;
}
return wq_work_done;
}
static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
{
unsigned int i;
struct fnic_io_req *io_req;
unsigned long flags = 0;
struct scsi_cmnd *sc;
spinlock_t *io_lock;
for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
if (i == exclude_id)
continue;
sc = scsi_host_find_tag(fnic->lport->host, i);
if (!sc)
continue;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto cleanup_scsi_cmd;
}
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
/*
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
" DID_TRANSPORT_DISRUPTED\n");
/* Complete the command to SCSI */
if (sc->scsi_done)
sc->scsi_done(sc);
}
}
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
struct fcpio_host_req *desc)
{
u32 id;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
struct fnic_io_req *io_req;
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
/* get the tag reference */
fcpio_tag_id_dec(&desc->hdr.tag, &id);
id &= FNIC_TAG_MASK;
if (id >= FNIC_MAX_IO_REQ)
return;
sc = scsi_host_find_tag(fnic->lport->host, id);
if (!sc)
return;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
/* Get the IO context which this desc refers to */
io_req = (struct fnic_io_req *)CMD_SP(sc);
/* fnic interrupts are turned off by now */
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto wq_copy_cleanup_scsi_cmd;
}
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
wq_copy_cleanup_scsi_cmd:
sc->result = DID_NO_CONNECT << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
if (sc->scsi_done)
sc->scsi_done(sc);
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
u32 task_req, u8 *fc_lun,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
unsigned long flags;
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
0, task_req, tag, fc_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
return 0;
}
void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
int tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
struct scsi_cmnd *sc;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"fnic_rport_reset_exch called portid 0x%06x\n",
port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req || io_req->port_id != port_id) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
BUG_ON(io_req->abts_done);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_reset_exch: Issuing abts\n");
spin_unlock_irqrestore(io_lock, flags);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
* Revert the cmd state back to old state, if
* it hasn't changed in between. This cmd will get
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
}
}
}
void fnic_terminate_rport_io(struct fc_rport *rport)
{
int tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
struct scsi_cmnd *sc;
struct scsi_lun fc_lun;
struct fc_rport_libfc_priv *rdata = rport->dd_data;
struct fc_lport *lport = rdata->local_port;
struct fnic *fnic = lport_priv(lport);
struct fc_rport *cmd_rport;
enum fnic_ioreq_state old_ioreq_state;
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
" wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
rport->port_name, rport->node_name,
rport->port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
cmd_rport = starget_to_rport(scsi_target(sc->device));
if (rport != cmd_rport)
continue;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req || rport != cmd_rport) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
BUG_ON(io_req->abts_done);
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"fnic_terminate_rport_io: Issuing abts\n");
spin_unlock_irqrestore(io_lock, flags);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
* Revert the cmd state back to old state, if
* it hasn't changed in between. This cmd will get
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
}
}
}
/*
* This function is exported to SCSI for sending abort cmnds.
* A SCSI IO is represented by a io_req in the driver.
* The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
*/
int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req;
struct fc_rport *rport;
spinlock_t *io_lock;
unsigned long flags;
int ret = SUCCESS;
u32 task_req;
struct scsi_lun fc_lun;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
rport->port_id, sc->device->lun, sc->request->tag);
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
goto fnic_abort_cmd_end;
}
/*
* Avoid a race between SCSI issuing the abort and the device
* completing the command.
*
* If the command is already completed by the fw cmpl code,
* we just return SUCCESS from here. This means that the abort
* succeeded. In the SCSI ML, since the timeout for command has
* happened, the completion wont actually complete the command
* and it will be considered as an aborted command
*
* The CMD_SP will not be cleared except while holding io_req_lock.
*/
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
}
io_req->abts_done = &tm_done;
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
goto wait_pending;
}
/*
* Command is still pending, need to abort it
* If the firmware completes the command after this point,
* the completion wont be done till mid-layer, since abort
* has already started.
*/
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
/*
* Check readiness of the remote port. If the path to remote
* port is up, then send abts to the remote port to terminate
* the IO. Else, just locally terminate the IO in the firmware
*/
if (fc_remote_port_chkready(rport) == 0)
task_req = FCPIO_ITMF_ABT_TASK;
else
task_req = FCPIO_ITMF_ABT_TASK_TERM;
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->abts_done = NULL;
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
goto fnic_abort_cmd_end;
}
/*
* We queued an abort IO, wait for its completion.
* Once the firmware completes the abort command, it will
* wake up this thread.
*/
wait_pending:
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies
(2 * fnic->config.ra_tov +
fnic->config.ed_tov));
/* Check the abort status */
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
goto fnic_abort_cmd_end;
}
io_req->abts_done = NULL;
/* fw did not complete abort, timed out */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
goto fnic_abort_cmd_end;
}
/*
* firmware completed the abort, check the status,
* free the io_req irrespective of failure or success
*/
if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
ret = FAILED;
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
fnic_abort_cmd_end:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from abort cmd %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
}
static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct scsi_cmnd *sc,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
ret = -EAGAIN;
goto lr_io_req_end;
}
/* fill in the lun info */
int_to_scsilun(sc->device->lun, &fc_lun);
fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
return ret;
}
/*
* Clean up any pending aborts on the lun
* For each outstanding IO on this lun, whose abort is not completed by fw,
* issue a local abort. Wait for abort to complete. Return 0 if all commands
* successfully aborted, 1 otherwise
*/
static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_cmnd *lr_sc)
{
int tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
int ret = 0;
struct scsi_cmnd *sc;
struct scsi_lun fc_lun;
struct scsi_device *lun_dev = lr_sc->device;
DECLARE_COMPLETION_ONSTACK(tm_done);
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
/*
* ignore this lun reset cmd or cmds that do not belong to
* this lun
*/
if (!sc || sc == lr_sc || sc->device != lun_dev)
continue;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req || sc->device != lun_dev) {
spin_unlock_irqrestore(io_lock, flags);
continue;
}
/*
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(CMD_STATE(sc)));
BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->abts_done = NULL;
spin_unlock_irqrestore(io_lock, flags);
ret = 1;
goto clean_pending_aborts_end;
}
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies
(fnic->config.ed_tov));
/* Recheck cmd state to check if it is now aborted */
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
ret = 1;
goto clean_pending_aborts_end;
}
io_req->abts_done = NULL;
/* if abort is still pending with fw, fail */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
ret = 1;
goto clean_pending_aborts_end;
}
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
clean_pending_aborts_end:
return ret;
}
/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
* on the LUN.
*/
int fnic_device_reset(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req;
struct fc_rport *rport;
int status;
int ret = FAILED;
spinlock_t *io_lock;
unsigned long flags;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset called FCID 0x%x, LUN 0x%x\n",
rport->port_id, sc->device->lun);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
goto fnic_device_reset_end;
/* Check if remote port up */
if (fc_remote_port_chkready(rport))
goto fnic_device_reset_end;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
/*
* If there is a io_req attached to this command, then use it,
* else allocate a new one.
*/
if (!io_req) {
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto fnic_device_reset_end;
}
memset(io_req, 0, sizeof(*io_req));
io_req->port_id = rport->port_id;
CMD_SP(sc) = (char *)io_req;
}
io_req->dr_done = &tm_done;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
sc->request->tag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
* and break assoc with scsi cmd
*/
if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
/*
* Wait on the local completion for LUN reset. The io_req may be
* freed while we wait since we hold no lock.
*/
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto fnic_device_reset_end;
}
io_req->dr_done = NULL;
status = CMD_LR_STATUS(sc);
spin_unlock_irqrestore(io_lock, flags);
/*
* If lun reset not completed, bail out with failed. io_req
* gets cleaned up during higher levels of EH
*/
if (status == FCPIO_INVALID_CODE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
goto fnic_device_reset_end;
}
/* Completed, but not successful, clean up the io_req, return fail */
if (status != FCPIO_SUCCESS) {
spin_lock_irqsave(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Device reset completed - failed\n");
io_req = (struct fnic_io_req *)CMD_SP(sc);
goto fnic_device_reset_clean;
}
/*
* Clean up any aborts on this lun that have still not
* completed. If any of these fail, then LUN reset fails.
* clean_pending_aborts cleans all cmds on this lun except
* the lun reset cmd. If all cmds get cleaned, the lun reset
* succeeds
*/
if (fnic_clean_pending_aborts(fnic, sc)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset failed"
" since could not abort all IOs\n");
goto fnic_device_reset_clean;
}
/* Clean lun reset command */
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
/* Completed, and successful */
ret = SUCCESS;
fnic_device_reset_clean:
if (io_req)
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
fnic_device_reset_end:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
}
/* Clean up all IOs, clean up libFC local port */
int fnic_reset(struct Scsi_Host *shost)
{
struct fc_lport *lp;
struct fnic *fnic;
int ret = SUCCESS;
lp = shost_priv(shost);
fnic = lport_priv(lp);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_reset called\n");
/*
* Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi
*/
if (lp->tt.lport_reset(lp))
ret = FAILED;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
}
/*
* SCSI Error handling calls driver's eh_host_reset if all prior
* error handling levels return FAILED. If host reset completes
* successfully, and if link is up, then Fabric login begins.
*
* Host Reset is the highest level of error recovery. If this fails, then
* host is offlined by SCSI.
*
*/
int fnic_host_reset(struct scsi_cmnd *sc)
{
int ret;
unsigned long wait_host_tmo;
struct Scsi_Host *shost = sc->device->host;
struct fc_lport *lp = shost_priv(shost);
/*
* If fnic_reset is successful, wait for fabric login to complete
* scsi-ml tries to send a TUR to every device if host reset is
* successful, so before returning to scsi, fabric should be up
*/
ret = fnic_reset(shost);
if (ret == SUCCESS) {
wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
ret = FAILED;
while (time_before(jiffies, wait_host_tmo)) {
if ((lp->state == LPORT_ST_READY) &&
(lp->link_up)) {
ret = SUCCESS;
break;
}
ssleep(1);
}
}
return ret;
}
/*
* This fxn is called from libFC when host is removed
*/
void fnic_scsi_abort_io(struct fc_lport *lp)
{
int err = 0;
unsigned long flags;
enum fnic_state old_state;
struct fnic *fnic = lport_priv(lp);
DECLARE_COMPLETION_ONSTACK(remove_wait);
/* Issue firmware reset for fnic, wait for reset to complete */
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->remove_wait = &remove_wait;
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
err = fnic_fw_reset_handler(fnic);
if (err) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
fnic->state = old_state;
fnic->remove_wait = NULL;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
/* Wait for firmware reset to complete */
wait_for_completion_timeout(&remove_wait,
msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->remove_wait = NULL;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_scsi_abort_io %s\n",
(fnic->state == FNIC_IN_ETH_MODE) ?
"SUCCESS" : "FAILED");
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
/*
* This fxn called from libFC to clean up driver IO state on link down
*/
void fnic_scsi_cleanup(struct fc_lport *lp)
{
unsigned long flags;
enum fnic_state old_state;
struct fnic *fnic = lport_priv(lp);
/* issue fw reset */
spin_lock_irqsave(&fnic->fnic_lock, flags);
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fnic_fw_reset_handler(fnic)) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
fnic->state = old_state;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
}
void fnic_empty_scsi_cleanup(struct fc_lport *lp)
{
}
void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
{
struct fnic *fnic = lport_priv(lp);
/* Non-zero sid, nothing to do */
if (sid)
goto call_fc_exch_mgr_reset;
if (did) {
fnic_rport_exch_reset(fnic, did);
goto call_fc_exch_mgr_reset;
}
/*
* sid = 0, did = 0
* link down or device being removed
*/
if (!fnic->in_remove)
fnic_scsi_cleanup(lp);
else
fnic_scsi_abort_io(lp);
/* call libFC exch mgr reset to reset its exchanges */
call_fc_exch_mgr_reset:
fc_exch_mgr_reset(lp, sid, did);
}
| gpl-2.0 |
TREX-ROM/android_kernel_asus_grouper | net/mac80211/debugfs.c | 2783 | 15724 |
/*
* mac80211 debugfs for wireless PHYs
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
*
* GPLv2
*
*/
#include <linux/debugfs.h>
#include <linux/rtnetlink.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#include "debugfs.h"
int mac80211_open_file_generic(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
#define DEBUGFS_FORMAT_BUFFER_SIZE 100
int mac80211_format_buffer(char __user *userbuf, size_t count,
loff_t *ppos, char *fmt, ...)
{
va_list args;
char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
int res;
va_start(args, fmt);
res = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
#define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \
static ssize_t name## _read(struct file *file, char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct ieee80211_local *local = file->private_data; \
\
return mac80211_format_buffer(userbuf, count, ppos, \
fmt "\n", ##value); \
}
#define DEBUGFS_READONLY_FILE_OPS(name) \
static const struct file_operations name## _ops = { \
.read = name## _read, \
.open = mac80211_open_file_generic, \
.llseek = generic_file_llseek, \
};
#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
DEBUGFS_READONLY_FILE_FN(name, fmt, value) \
DEBUGFS_READONLY_FILE_OPS(name)
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, phyd, local, &name## _ops);
#define DEBUGFS_ADD_MODE(name, mode) \
debugfs_create_file(#name, mode, phyd, local, &name## _ops);
DEBUGFS_READONLY_FILE(user_power, "%d",
local->user_power_level);
DEBUGFS_READONLY_FILE(power, "%d",
local->hw.conf.power_level);
DEBUGFS_READONLY_FILE(frequency, "%d",
local->hw.conf.channel->center_freq);
DEBUGFS_READONLY_FILE(total_ps_buffered, "%d",
local->total_ps_buffered);
DEBUGFS_READONLY_FILE(wep_iv, "%#08x",
local->wep_iv & 0xffffff);
DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s",
local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver");
static ssize_t tsf_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
u64 tsf;
tsf = drv_get_tsf(local);
return mac80211_format_buffer(user_buf, count, ppos, "0x%016llx\n",
(unsigned long long) tsf);
}
static ssize_t tsf_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
unsigned long long tsf;
char buf[100];
size_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (strncmp(buf, "reset", 5) == 0) {
if (local->ops->reset_tsf) {
drv_reset_tsf(local);
wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
}
} else {
tsf = simple_strtoul(buf, NULL, 0);
if (local->ops->set_tsf) {
drv_set_tsf(local, tsf);
wiphy_info(local->hw.wiphy,
"debugfs set TSF to %#018llx\n", tsf);
}
}
return count;
}
static const struct file_operations tsf_ops = {
.read = tsf_read,
.write = tsf_write,
.open = mac80211_open_file_generic,
.llseek = default_llseek,
};
static ssize_t reset_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
rtnl_lock();
__ieee80211_suspend(&local->hw, NULL);
__ieee80211_resume(&local->hw);
rtnl_unlock();
return count;
}
static const struct file_operations reset_ops = {
.write = reset_write,
.open = mac80211_open_file_generic,
.llseek = noop_llseek,
};
static ssize_t noack_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
return mac80211_format_buffer(user_buf, count, ppos, "%d\n",
local->wifi_wme_noack_test);
}
static ssize_t noack_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
char buf[10];
size_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
local->wifi_wme_noack_test = !!simple_strtoul(buf, NULL, 0);
return count;
}
static const struct file_operations noack_ops = {
.read = noack_read,
.write = noack_write,
.open = mac80211_open_file_generic,
.llseek = default_llseek,
};
static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
local->uapsd_queues);
}
static ssize_t uapsd_queues_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
unsigned long val;
char buf[10];
size_t len;
int ret;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
ret = strict_strtoul(buf, 0, &val);
if (ret)
return -EINVAL;
if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
return -ERANGE;
local->uapsd_queues = val;
return count;
}
static const struct file_operations uapsd_queues_ops = {
.read = uapsd_queues_read,
.write = uapsd_queues_write,
.open = mac80211_open_file_generic,
.llseek = default_llseek,
};
static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
return mac80211_format_buffer(user_buf, count, ppos, "0x%x\n",
local->uapsd_max_sp_len);
}
static ssize_t uapsd_max_sp_len_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
unsigned long val;
char buf[10];
size_t len;
int ret;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
ret = strict_strtoul(buf, 0, &val);
if (ret)
return -EINVAL;
if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
return -ERANGE;
local->uapsd_max_sp_len = val;
return count;
}
static const struct file_operations uapsd_max_sp_len_ops = {
.read = uapsd_max_sp_len_read,
.write = uapsd_max_sp_len_write,
.open = mac80211_open_file_generic,
.llseek = default_llseek,
};
static ssize_t channel_type_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
const char *buf;
switch (local->hw.conf.channel_type) {
case NL80211_CHAN_NO_HT:
buf = "no ht\n";
break;
case NL80211_CHAN_HT20:
buf = "ht20\n";
break;
case NL80211_CHAN_HT40MINUS:
buf = "ht40-\n";
break;
case NL80211_CHAN_HT40PLUS:
buf = "ht40+\n";
break;
default:
buf = "???";
break;
}
return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
}
static ssize_t hwflags_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
int mxln = 500;
ssize_t rv;
char *buf = kzalloc(mxln, GFP_KERNEL);
int sf = 0; /* how many written so far */
sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
sf += snprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
sf += snprintf(buf + sf, mxln - sf,
"HOST_BCAST_PS_BUFFERING\n");
if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
sf += snprintf(buf + sf, mxln - sf,
"2GHZ_SHORT_SLOT_INCAPABLE\n");
if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
sf += snprintf(buf + sf, mxln - sf,
"2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_PERIOD\n");
if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
sf += snprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
sf += snprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
if (local->hw.flags & IEEE80211_HW_BEACON_FILTER)
sf += snprintf(buf + sf, mxln - sf, "BEACON_FILTER\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_SMPS\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_CQM_RSSI\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
kfree(buf);
return rv;
}
static ssize_t queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_local *local = file->private_data;
unsigned long flags;
char buf[IEEE80211_MAX_QUEUES * 20];
int q, res = 0;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (q = 0; q < local->hw.queues; q++)
res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q,
local->queue_stop_reasons[q],
skb_queue_len(&local->pending[q]));
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
return simple_read_from_buffer(user_buf, count, ppos, buf, res);
}
DEBUGFS_READONLY_FILE_OPS(hwflags);
DEBUGFS_READONLY_FILE_OPS(channel_type);
DEBUGFS_READONLY_FILE_OPS(queues);
/* statistics stuff */
static ssize_t format_devstat_counter(struct ieee80211_local *local,
char __user *userbuf,
size_t count, loff_t *ppos,
int (*printvalue)(struct ieee80211_low_level_stats *stats, char *buf,
int buflen))
{
struct ieee80211_low_level_stats stats;
char buf[20];
int res;
rtnl_lock();
res = drv_get_stats(local, &stats);
rtnl_unlock();
if (res)
return res;
res = printvalue(&stats, buf, sizeof(buf));
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
#define DEBUGFS_DEVSTATS_FILE(name) \
static int print_devstats_##name(struct ieee80211_low_level_stats *stats,\
char *buf, int buflen) \
{ \
return scnprintf(buf, buflen, "%u\n", stats->name); \
} \
static ssize_t stats_ ##name## _read(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
return format_devstat_counter(file->private_data, \
userbuf, \
count, \
ppos, \
print_devstats_##name); \
} \
\
static const struct file_operations stats_ ##name## _ops = { \
.read = stats_ ##name## _read, \
.open = mac80211_open_file_generic, \
.llseek = generic_file_llseek, \
};
#define DEBUGFS_STATS_ADD(name, field) \
debugfs_create_u32(#name, 0400, statsd, (u32 *) &field);
#define DEBUGFS_DEVSTATS_ADD(name) \
debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount);
DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount);
DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount);
DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount);
void debugfs_hw_add(struct ieee80211_local *local)
{
struct dentry *phyd = local->hw.wiphy->debugfsdir;
struct dentry *statsd;
if (!phyd)
return;
local->debugfs.keys = debugfs_create_dir("keys", phyd);
DEBUGFS_ADD(frequency);
DEBUGFS_ADD(total_ps_buffered);
DEBUGFS_ADD(wep_iv);
DEBUGFS_ADD(tsf);
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
DEBUGFS_ADD(noack);
DEBUGFS_ADD(uapsd_queues);
DEBUGFS_ADD(uapsd_max_sp_len);
DEBUGFS_ADD(channel_type);
DEBUGFS_ADD(hwflags);
DEBUGFS_ADD(user_power);
DEBUGFS_ADD(power);
statsd = debugfs_create_dir("statistics", phyd);
/* if the dir failed, don't put all the other things into the root! */
if (!statsd)
return;
DEBUGFS_STATS_ADD(transmitted_fragment_count,
local->dot11TransmittedFragmentCount);
DEBUGFS_STATS_ADD(multicast_transmitted_frame_count,
local->dot11MulticastTransmittedFrameCount);
DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount);
DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount);
DEBUGFS_STATS_ADD(multiple_retry_count,
local->dot11MultipleRetryCount);
DEBUGFS_STATS_ADD(frame_duplicate_count,
local->dot11FrameDuplicateCount);
DEBUGFS_STATS_ADD(received_fragment_count,
local->dot11ReceivedFragmentCount);
DEBUGFS_STATS_ADD(multicast_received_frame_count,
local->dot11MulticastReceivedFrameCount);
DEBUGFS_STATS_ADD(transmitted_frame_count,
local->dot11TransmittedFrameCount);
#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted,
local->tx_handlers_drop_unencrypted);
DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
local->tx_handlers_drop_fragment);
DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
local->tx_handlers_drop_wep);
DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc,
local->tx_handlers_drop_not_assoc);
DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port,
local->tx_handlers_drop_unauth_port);
DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop);
DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued);
DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc,
local->rx_handlers_drop_nullfunc);
DEBUGFS_STATS_ADD(rx_handlers_drop_defrag,
local->rx_handlers_drop_defrag);
DEBUGFS_STATS_ADD(rx_handlers_drop_short,
local->rx_handlers_drop_short);
DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan,
local->rx_handlers_drop_passive_scan);
DEBUGFS_STATS_ADD(tx_expand_skb_head,
local->tx_expand_skb_head);
DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
local->tx_expand_skb_head_cloned);
DEBUGFS_STATS_ADD(rx_expand_skb_head,
local->rx_expand_skb_head);
DEBUGFS_STATS_ADD(rx_expand_skb_head2,
local->rx_expand_skb_head2);
DEBUGFS_STATS_ADD(rx_handlers_fragments,
local->rx_handlers_fragments);
DEBUGFS_STATS_ADD(tx_status_drop,
local->tx_status_drop);
#endif
DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount);
DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount);
DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount);
}
| gpl-2.0 |
CyanogenMod/hardkernel-kernel-4412 | drivers/hwmon/ad7414.c | 3039 | 7247 | /*
* An hwmon driver for the Analog Devices AD7414
*
* Copyright 2006 Stefan Roese <sr at denx.de>, DENX Software Engineering
*
* Copyright (c) 2008 PIKA Technologies
* Sean MacLennan <smaclennan@pikatech.com>
*
* Copyright (c) 2008 Spansion Inc.
* Frank Edelhaeuser <frank.edelhaeuser at spansion.com>
* (converted to "new style" I2C driver model, removed checkpatch.pl warnings)
*
* Based on ad7418.c
* Copyright 2006 Tower Technologies, Alessandro Zummo <a.zummo at towertech.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
/* AD7414 registers */
#define AD7414_REG_TEMP 0x00
#define AD7414_REG_CONF 0x01
#define AD7414_REG_T_HIGH 0x02
#define AD7414_REG_T_LOW 0x03
static u8 AD7414_REG_LIMIT[] = { AD7414_REG_T_HIGH, AD7414_REG_T_LOW };
struct ad7414_data {
struct device *hwmon_dev;
struct mutex lock; /* atomic read data updates */
char valid; /* !=0 if following fields are valid */
unsigned long next_update; /* In jiffies */
s16 temp_input; /* Register values */
s8 temps[ARRAY_SIZE(AD7414_REG_LIMIT)];
};
/* REG: (0.25C/bit, two's complement) << 6 */
static inline int ad7414_temp_from_reg(s16 reg)
{
/* use integer division instead of equivalent right shift to
* guarantee arithmetic shift and preserve the sign
*/
return ((int)reg / 64) * 250;
}
static inline int ad7414_read(struct i2c_client *client, u8 reg)
{
if (reg == AD7414_REG_TEMP) {
int value = i2c_smbus_read_word_data(client, reg);
return (value < 0) ? value : swab16(value);
} else
return i2c_smbus_read_byte_data(client, reg);
}
static inline int ad7414_write(struct i2c_client *client, u8 reg, u8 value)
{
return i2c_smbus_write_byte_data(client, reg, value);
}
static struct ad7414_data *ad7414_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ad7414_data *data = i2c_get_clientdata(client);
mutex_lock(&data->lock);
if (time_after(jiffies, data->next_update) || !data->valid) {
int value, i;
dev_dbg(&client->dev, "starting ad7414 update\n");
value = ad7414_read(client, AD7414_REG_TEMP);
if (value < 0)
dev_dbg(&client->dev, "AD7414_REG_TEMP err %d\n",
value);
else
data->temp_input = value;
for (i = 0; i < ARRAY_SIZE(AD7414_REG_LIMIT); ++i) {
value = ad7414_read(client, AD7414_REG_LIMIT[i]);
if (value < 0)
dev_dbg(&client->dev, "AD7414 reg %d err %d\n",
AD7414_REG_LIMIT[i], value);
else
data->temps[i] = value;
}
data->next_update = jiffies + HZ + HZ / 2;
data->valid = 1;
}
mutex_unlock(&data->lock);
return data;
}
static ssize_t show_temp_input(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ad7414_data *data = ad7414_update_device(dev);
return sprintf(buf, "%d\n", ad7414_temp_from_reg(data->temp_input));
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0);
static ssize_t show_max_min(struct device *dev, struct device_attribute *attr,
char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct ad7414_data *data = ad7414_update_device(dev);
return sprintf(buf, "%d\n", data->temps[index] * 1000);
}
static ssize_t set_max_min(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct ad7414_data *data = i2c_get_clientdata(client);
int index = to_sensor_dev_attr(attr)->index;
u8 reg = AD7414_REG_LIMIT[index];
long temp = simple_strtol(buf, NULL, 10);
temp = SENSORS_LIMIT(temp, -40000, 85000);
temp = (temp + (temp < 0 ? -500 : 500)) / 1000;
mutex_lock(&data->lock);
data->temps[index] = temp;
ad7414_write(client, reg, temp);
mutex_unlock(&data->lock);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
show_max_min, set_max_min, 0);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
show_max_min, set_max_min, 1);
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct ad7414_data *data = ad7414_update_device(dev);
int value = (data->temp_input >> bitnr) & 1;
return sprintf(buf, "%d\n", value);
}
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static struct attribute *ad7414_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group ad7414_group = {
.attrs = ad7414_attributes,
};
static int ad7414_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
struct ad7414_data *data;
int conf;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_READ_WORD_DATA)) {
err = -EOPNOTSUPP;
goto exit;
}
data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
dev_info(&client->dev, "chip found\n");
/* Make sure the chip is powered up. */
conf = i2c_smbus_read_byte_data(client, AD7414_REG_CONF);
if (conf < 0)
dev_warn(&client->dev,
"ad7414_probe unable to read config register.\n");
else {
conf &= ~(1 << 7);
i2c_smbus_write_byte_data(client, AD7414_REG_CONF, conf);
}
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &ad7414_group);
if (err)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
return 0;
exit_remove:
sysfs_remove_group(&client->dev.kobj, &ad7414_group);
exit_free:
kfree(data);
exit:
return err;
}
static int __devexit ad7414_remove(struct i2c_client *client)
{
struct ad7414_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &ad7414_group);
kfree(data);
return 0;
}
static const struct i2c_device_id ad7414_id[] = {
{ "ad7414", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, ad7414_id);
static struct i2c_driver ad7414_driver = {
.driver = {
.name = "ad7414",
},
.probe = ad7414_probe,
.remove = __devexit_p(ad7414_remove),
.id_table = ad7414_id,
};
static int __init ad7414_init(void)
{
return i2c_add_driver(&ad7414_driver);
}
module_init(ad7414_init);
static void __exit ad7414_exit(void)
{
i2c_del_driver(&ad7414_driver);
}
module_exit(ad7414_exit);
MODULE_AUTHOR("Stefan Roese <sr at denx.de>, "
"Frank Edelhaeuser <frank.edelhaeuser at spansion.com>");
MODULE_DESCRIPTION("AD7414 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kim6515516/fastmodel-on-kvm | drivers/media/dvb-frontends/au8522_common.c | 4319 | 7569 | /*
Auvitek AU8522 QAM/8VSB demodulator driver
Copyright (C) 2008 Steven Toth <stoth@linuxtv.org>
Copyright (C) 2008 Devin Heitmueller <dheitmueller@linuxtv.org>
Copyright (C) 2005-2008 Auvitek International, Ltd.
Copyright (C) 2012 Michael Krufky <mkrufky@linuxtv.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/i2c.h>
#include "dvb_frontend.h"
#include "au8522_priv.h"
static int debug;
#define dprintk(arg...)\
do { if (debug)\
printk(arg);\
} while (0)
/* Despite the name "hybrid_tuner", the framework works just as well for
hybrid demodulators as well... */
static LIST_HEAD(hybrid_tuner_instance_list);
static DEFINE_MUTEX(au8522_list_mutex);
/* 16 bit registers, 8 bit values */
int au8522_writereg(struct au8522_state *state, u16 reg, u8 data)
{
int ret;
u8 buf[] = { (reg >> 8) | 0x80, reg & 0xff, data };
struct i2c_msg msg = { .addr = state->config->demod_address,
.flags = 0, .buf = buf, .len = 3 };
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
printk("%s: writereg error (reg == 0x%02x, val == 0x%04x, "
"ret == %i)\n", __func__, reg, data, ret);
return (ret != 1) ? -1 : 0;
}
EXPORT_SYMBOL(au8522_writereg);
u8 au8522_readreg(struct au8522_state *state, u16 reg)
{
int ret;
u8 b0[] = { (reg >> 8) | 0x40, reg & 0xff };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{ .addr = state->config->demod_address, .flags = 0,
.buf = b0, .len = 2 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD,
.buf = b1, .len = 1 } };
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
printk(KERN_ERR "%s: readreg error (ret == %i)\n",
__func__, ret);
return b1[0];
}
EXPORT_SYMBOL(au8522_readreg);
int au8522_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s(%d)\n", __func__, enable);
if (state->operational_mode == AU8522_ANALOG_MODE) {
/* We're being asked to manage the gate even though we're
not in digital mode. This can occur if we get switched
over to analog mode before the dvb_frontend kernel thread
has completely shutdown */
return 0;
}
if (enable)
return au8522_writereg(state, 0x106, 1);
else
return au8522_writereg(state, 0x106, 0);
}
EXPORT_SYMBOL(au8522_i2c_gate_ctrl);
int au8522_analog_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s(%d)\n", __func__, enable);
if (enable)
return au8522_writereg(state, 0x106, 1);
else
return au8522_writereg(state, 0x106, 0);
}
EXPORT_SYMBOL(au8522_analog_i2c_gate_ctrl);
/* Reset the demod hardware and reset all of the configuration registers
to a default state. */
int au8522_get_state(struct au8522_state **state, struct i2c_adapter *i2c,
u8 client_address)
{
int ret;
mutex_lock(&au8522_list_mutex);
ret = hybrid_tuner_request_state(struct au8522_state, (*state),
hybrid_tuner_instance_list,
i2c, client_address, "au8522");
mutex_unlock(&au8522_list_mutex);
return ret;
}
EXPORT_SYMBOL(au8522_get_state);
void au8522_release_state(struct au8522_state *state)
{
mutex_lock(&au8522_list_mutex);
if (state != NULL)
hybrid_tuner_release_state(state);
mutex_unlock(&au8522_list_mutex);
}
EXPORT_SYMBOL(au8522_release_state);
static int au8522_led_gpio_enable(struct au8522_state *state, int onoff)
{
struct au8522_led_config *led_config = state->config->led_cfg;
u8 val;
/* bail out if we can't control an LED */
if (!led_config || !led_config->gpio_output ||
!led_config->gpio_output_enable || !led_config->gpio_output_disable)
return 0;
val = au8522_readreg(state, 0x4000 |
(led_config->gpio_output & ~0xc000));
if (onoff) {
/* enable GPIO output */
val &= ~((led_config->gpio_output_enable >> 8) & 0xff);
val |= (led_config->gpio_output_enable & 0xff);
} else {
/* disable GPIO output */
val &= ~((led_config->gpio_output_disable >> 8) & 0xff);
val |= (led_config->gpio_output_disable & 0xff);
}
return au8522_writereg(state, 0x8000 |
(led_config->gpio_output & ~0xc000), val);
}
/* led = 0 | off
* led = 1 | signal ok
* led = 2 | signal strong
* led < 0 | only light led if leds are currently off
*/
int au8522_led_ctrl(struct au8522_state *state, int led)
{
struct au8522_led_config *led_config = state->config->led_cfg;
int i, ret = 0;
/* bail out if we can't control an LED */
if (!led_config || !led_config->gpio_leds ||
!led_config->num_led_states || !led_config->led_states)
return 0;
if (led < 0) {
/* if LED is already lit, then leave it as-is */
if (state->led_state)
return 0;
else
led *= -1;
}
/* toggle LED if changing state */
if (state->led_state != led) {
u8 val;
dprintk("%s: %d\n", __func__, led);
au8522_led_gpio_enable(state, 1);
val = au8522_readreg(state, 0x4000 |
(led_config->gpio_leds & ~0xc000));
/* start with all leds off */
for (i = 0; i < led_config->num_led_states; i++)
val &= ~led_config->led_states[i];
/* set selected LED state */
if (led < led_config->num_led_states)
val |= led_config->led_states[led];
else if (led_config->num_led_states)
val |=
led_config->led_states[led_config->num_led_states - 1];
ret = au8522_writereg(state, 0x8000 |
(led_config->gpio_leds & ~0xc000), val);
if (ret < 0)
return ret;
state->led_state = led;
if (led == 0)
au8522_led_gpio_enable(state, 0);
}
return 0;
}
EXPORT_SYMBOL(au8522_led_ctrl);
int au8522_init(struct dvb_frontend *fe)
{
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
state->operational_mode = AU8522_DIGITAL_MODE;
/* Clear out any state associated with the digital side of the
chip, so that when it gets powered back up it won't think
that it is already tuned */
state->current_frequency = 0;
au8522_writereg(state, 0xa4, 1 << 5);
au8522_i2c_gate_ctrl(fe, 1);
return 0;
}
EXPORT_SYMBOL(au8522_init);
int au8522_sleep(struct dvb_frontend *fe)
{
struct au8522_state *state = fe->demodulator_priv;
dprintk("%s()\n", __func__);
/* Only power down if the digital side is currently using the chip */
if (state->operational_mode == AU8522_ANALOG_MODE) {
/* We're not in one of the expected power modes, which means
that the DVB thread is probably telling us to go to sleep
even though the analog frontend has already started using
the chip. So ignore the request */
return 0;
}
/* turn off led */
au8522_led_ctrl(state, 0);
/* Power down the chip */
au8522_writereg(state, 0xa4, 1 << 5);
state->current_frequency = 0;
return 0;
}
EXPORT_SYMBOL(au8522_sleep);
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Enable verbose debug messages");
MODULE_DESCRIPTION("Auvitek AU8522 QAM-B/ATSC Demodulator driver");
MODULE_AUTHOR("Steven Toth");
MODULE_LICENSE("GPL");
| gpl-2.0 |
wurikiji/ttFS | ulinux/linux-3.10.61/drivers/media/platform/marvell-ccic/cafe-driver.c | 7903 | 17526 | /*
* A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
* multifunction chip. Currently works with the Omnivision OV7670
* sensor.
*
* The data sheet for this device can be found at:
* http://www.marvell.com/products/pc_connectivity/88alp01/
*
* Copyright 2006-11 One Laptop Per Child Association, Inc.
* Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
*
* Written by Jonathan Corbet, corbet@lwn.net.
*
* v4l2_device/v4l2_subdev conversion by:
* Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
*
* This file may be distributed under the terms of the GNU General
* Public License, version 2.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/io.h>
#include "mcam-core.h"
#define CAFE_VERSION 0x000002
/*
* Parameters.
*/
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Video");
struct cafe_camera {
int registered; /* Fully initialized? */
struct mcam_camera mcam;
struct pci_dev *pdev;
wait_queue_head_t smbus_wait; /* Waiting on i2c events */
};
/*
* Most of the camera controller registers are defined in mcam-core.h,
* but the Cafe platform has some additional registers of its own;
* they are described here.
*/
/*
* "General purpose register" has a couple of GPIOs used for sensor
* power and reset on OLPC XO 1.0 systems.
*/
#define REG_GPR 0xb4
#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
#define GPR_C1 0x00000002 /* Control 1 value */
/*
* Control 0 is wired to reset on OLPC machines. For ov7x sensors,
* it is active low.
*/
#define GPR_C0 0x00000001 /* Control 0 value */
/*
* These registers control the SMBUS module for communicating
* with the sensor.
*/
#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
#define TWSIC0_EN 0x00000001 /* TWSI enable */
#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
#define TWSIC0_SID 0x000003fc /* Slave ID */
/*
* Subtle trickery: the slave ID field starts with bit 2. But the
* Linux i2c stack wants to treat the bottommost bit as a separate
* read/write bit, which is why slave ID's are usually presented
* >>1. For consistency with that behavior, we shift over three
* bits instead of two.
*/
#define TWSIC0_SID_SHIFT 3
#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
#define REG_TWSIC1 0xbc /* TWSI control 1 */
#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
#define TWSIC1_ADDR_SHIFT 16
#define TWSIC1_READ 0x01000000 /* Set for read op */
#define TWSIC1_WSTAT 0x02000000 /* Write status */
#define TWSIC1_RVALID 0x04000000 /* Read data valid */
#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
/*
* Here's the weird global control registers
*/
#define REG_GL_CSR 0x3004 /* Control/status register */
#define GCSR_SRS 0x00000001 /* SW Reset set */
#define GCSR_SRC 0x00000002 /* SW Reset clear */
#define GCSR_MRS 0x00000004 /* Master reset set */
#define GCSR_MRC 0x00000008 /* HW Reset clear */
#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
#define REG_GL_IMASK 0x300c /* Interrupt mask register */
#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
#define REG_GL_FCR 0x3038 /* GPIO functional control register */
#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
#define REG_GL_GPIOR 0x315c /* GPIO register */
#define GGPIO_OUT 0x80000 /* GPIO output */
#define GGPIO_VAL 0x00008 /* Output pin value */
#define REG_LEN (REG_GL_IMASK + 4)
/*
* Debugging and related.
*/
#define cam_err(cam, fmt, arg...) \
dev_err(&(cam)->pdev->dev, fmt, ##arg);
#define cam_warn(cam, fmt, arg...) \
dev_warn(&(cam)->pdev->dev, fmt, ##arg);
/* -------------------------------------------------------------------- */
/*
* The I2C/SMBUS interface to the camera itself starts here. The
* controller handles SMBUS itself, presenting a relatively simple register
* interface; all we have to do is to tell it where to route the data.
*/
#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
static inline struct cafe_camera *to_cam(struct v4l2_device *dev)
{
struct mcam_camera *m = container_of(dev, struct mcam_camera, v4l2_dev);
return container_of(m, struct cafe_camera, mcam);
}
static int cafe_smbus_write_done(struct mcam_camera *mcam)
{
unsigned long flags;
int c1;
/*
* We must delay after the interrupt, or the controller gets confused
* and never does give us good status. Fortunately, we don't do this
* often.
*/
udelay(20);
spin_lock_irqsave(&mcam->dev_lock, flags);
c1 = mcam_reg_read(mcam, REG_TWSIC1);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
}
static int cafe_smbus_write_data(struct cafe_camera *cam,
u16 addr, u8 command, u8 value)
{
unsigned int rval;
unsigned long flags;
struct mcam_camera *mcam = &cam->mcam;
spin_lock_irqsave(&mcam->dev_lock, flags);
rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
/*
* Marvell sez set clkdiv to all 1's for now.
*/
rval |= TWSIC0_CLKDIV;
mcam_reg_write(mcam, REG_TWSIC0, rval);
(void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
mcam_reg_write(mcam, REG_TWSIC1, rval);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
/* Unfortunately, reading TWSIC1 too soon after sending a command
* causes the device to die.
* Use a busy-wait because we often send a large quantity of small
* commands at-once; using msleep() would cause a lot of context
* switches which take longer than 2ms, resulting in a noticeable
* boot-time and capture-start delays.
*/
mdelay(2);
/*
* Another sad fact is that sometimes, commands silently complete but
* cafe_smbus_write_done() never becomes aware of this.
* This happens at random and appears to possible occur with any
* command.
* We don't understand why this is. We work around this issue
* with the timeout in the wait below, assuming that all commands
* complete within the timeout.
*/
wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
CAFE_SMBUS_TIMEOUT);
spin_lock_irqsave(&mcam->dev_lock, flags);
rval = mcam_reg_read(mcam, REG_TWSIC1);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
if (rval & TWSIC1_WSTAT) {
cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
command, value);
return -EIO;
}
if (rval & TWSIC1_ERROR) {
cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
command, value);
return -EIO;
}
return 0;
}
static int cafe_smbus_read_done(struct mcam_camera *mcam)
{
unsigned long flags;
int c1;
/*
* We must delay after the interrupt, or the controller gets confused
* and never does give us good status. Fortunately, we don't do this
* often.
*/
udelay(20);
spin_lock_irqsave(&mcam->dev_lock, flags);
c1 = mcam_reg_read(mcam, REG_TWSIC1);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
}
static int cafe_smbus_read_data(struct cafe_camera *cam,
u16 addr, u8 command, u8 *value)
{
unsigned int rval;
unsigned long flags;
struct mcam_camera *mcam = &cam->mcam;
spin_lock_irqsave(&mcam->dev_lock, flags);
rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
/*
* Marvel sez set clkdiv to all 1's for now.
*/
rval |= TWSIC0_CLKDIV;
mcam_reg_write(mcam, REG_TWSIC0, rval);
(void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
mcam_reg_write(mcam, REG_TWSIC1, rval);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
wait_event_timeout(cam->smbus_wait,
cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
spin_lock_irqsave(&mcam->dev_lock, flags);
rval = mcam_reg_read(mcam, REG_TWSIC1);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
if (rval & TWSIC1_ERROR) {
cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
return -EIO;
}
if (!(rval & TWSIC1_RVALID)) {
cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
command);
return -EIO;
}
*value = rval & 0xff;
return 0;
}
/*
* Perform a transfer over SMBUS. This thing is called under
* the i2c bus lock, so we shouldn't race with ourselves...
*/
static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
unsigned short flags, char rw, u8 command,
int size, union i2c_smbus_data *data)
{
struct cafe_camera *cam = i2c_get_adapdata(adapter);
int ret = -EINVAL;
/*
* This interface would appear to only do byte data ops. OK
* it can do word too, but the cam chip has no use for that.
*/
if (size != I2C_SMBUS_BYTE_DATA) {
cam_err(cam, "funky xfer size %d\n", size);
return -EINVAL;
}
if (rw == I2C_SMBUS_WRITE)
ret = cafe_smbus_write_data(cam, addr, command, data->byte);
else if (rw == I2C_SMBUS_READ)
ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
return ret;
}
static void cafe_smbus_enable_irq(struct cafe_camera *cam)
{
unsigned long flags;
spin_lock_irqsave(&cam->mcam.dev_lock, flags);
mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
}
static u32 cafe_smbus_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_READ_BYTE_DATA |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
}
static struct i2c_algorithm cafe_smbus_algo = {
.smbus_xfer = cafe_smbus_xfer,
.functionality = cafe_smbus_func
};
static int cafe_smbus_setup(struct cafe_camera *cam)
{
struct i2c_adapter *adap;
int ret;
adap = kzalloc(sizeof(*adap), GFP_KERNEL);
if (adap == NULL)
return -ENOMEM;
cam->mcam.i2c_adapter = adap;
cafe_smbus_enable_irq(cam);
adap->owner = THIS_MODULE;
adap->algo = &cafe_smbus_algo;
strcpy(adap->name, "cafe_ccic");
adap->dev.parent = &cam->pdev->dev;
i2c_set_adapdata(adap, cam);
ret = i2c_add_adapter(adap);
if (ret)
printk(KERN_ERR "Unable to register cafe i2c adapter\n");
return ret;
}
static void cafe_smbus_shutdown(struct cafe_camera *cam)
{
i2c_del_adapter(cam->mcam.i2c_adapter);
kfree(cam->mcam.i2c_adapter);
}
/*
* Controller-level stuff
*/
static void cafe_ctlr_init(struct mcam_camera *mcam)
{
unsigned long flags;
spin_lock_irqsave(&mcam->dev_lock, flags);
/*
* Added magic to bring up the hardware on the B-Test board
*/
mcam_reg_write(mcam, 0x3038, 0x8);
mcam_reg_write(mcam, 0x315c, 0x80008);
/*
* Go through the dance needed to wake the device up.
* Note that these registers are global and shared
* with the NAND and SD devices. Interaction between the
* three still needs to be examined.
*/
mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
/*
* Here we must wait a bit for the controller to come around.
*/
spin_unlock_irqrestore(&mcam->dev_lock, flags);
msleep(5);
spin_lock_irqsave(&mcam->dev_lock, flags);
mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
/*
* Mask all interrupts.
*/
mcam_reg_write(mcam, REG_IRQMASK, 0);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
}
static void cafe_ctlr_power_up(struct mcam_camera *mcam)
{
/*
* Part one of the sensor dance: turn the global
* GPIO signal on.
*/
mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
/*
* Put the sensor into operational mode (assumes OLPC-style
* wiring). Control 0 is reset - set to 1 to operate.
* Control 1 is power down, set to 0 to operate.
*/
mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
}
static void cafe_ctlr_power_down(struct mcam_camera *mcam)
{
mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
}
/*
* The platform interrupt handler.
*/
static irqreturn_t cafe_irq(int irq, void *data)
{
struct cafe_camera *cam = data;
struct mcam_camera *mcam = &cam->mcam;
unsigned int irqs, handled;
spin_lock(&mcam->dev_lock);
irqs = mcam_reg_read(mcam, REG_IRQSTAT);
handled = cam->registered && mccic_irq(mcam, irqs);
if (irqs & TWSIIRQS) {
mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
wake_up(&cam->smbus_wait);
handled = 1;
}
spin_unlock(&mcam->dev_lock);
return IRQ_RETVAL(handled);
}
/* -------------------------------------------------------------------------- */
/*
* PCI interface stuff.
*/
static int cafe_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct cafe_camera *cam;
struct mcam_camera *mcam;
/*
* Start putting together one of our big camera structures.
*/
ret = -ENOMEM;
cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
if (cam == NULL)
goto out;
cam->pdev = pdev;
mcam = &cam->mcam;
mcam->chip_id = V4L2_IDENT_CAFE;
spin_lock_init(&mcam->dev_lock);
init_waitqueue_head(&cam->smbus_wait);
mcam->plat_power_up = cafe_ctlr_power_up;
mcam->plat_power_down = cafe_ctlr_power_down;
mcam->dev = &pdev->dev;
/*
* Set the clock speed for the XO 1; I don't believe this
* driver has ever run anywhere else.
*/
mcam->clock_speed = 45;
mcam->use_smbus = 1;
/*
* Vmalloc mode for buffers is traditional with this driver.
* We *might* be able to run DMA_contig, especially on a system
* with CMA in it.
*/
mcam->buffer_mode = B_vmalloc;
/*
* Get set up on the PCI bus.
*/
ret = pci_enable_device(pdev);
if (ret)
goto out_free;
pci_set_master(pdev);
ret = -EIO;
mcam->regs = pci_iomap(pdev, 0, 0);
if (!mcam->regs) {
printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
goto out_disable;
}
ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
if (ret)
goto out_iounmap;
/*
* Initialize the controller and leave it powered up. It will
* stay that way until the sensor driver shows up.
*/
cafe_ctlr_init(mcam);
cafe_ctlr_power_up(mcam);
/*
* Set up I2C/SMBUS communications. We have to drop the mutex here
* because the sensor could attach in this call chain, leading to
* unsightly deadlocks.
*/
ret = cafe_smbus_setup(cam);
if (ret)
goto out_pdown;
ret = mccic_register(mcam);
if (ret == 0) {
cam->registered = 1;
return 0;
}
cafe_smbus_shutdown(cam);
out_pdown:
cafe_ctlr_power_down(mcam);
free_irq(pdev->irq, cam);
out_iounmap:
pci_iounmap(pdev, mcam->regs);
out_disable:
pci_disable_device(pdev);
out_free:
kfree(cam);
out:
return ret;
}
/*
* Shut down an initialized device
*/
static void cafe_shutdown(struct cafe_camera *cam)
{
mccic_shutdown(&cam->mcam);
cafe_smbus_shutdown(cam);
free_irq(cam->pdev->irq, cam);
pci_iounmap(cam->pdev, cam->mcam.regs);
}
static void cafe_pci_remove(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
struct cafe_camera *cam = to_cam(v4l2_dev);
if (cam == NULL) {
printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
return;
}
cafe_shutdown(cam);
kfree(cam);
}
#ifdef CONFIG_PM
/*
* Basic power management.
*/
static int cafe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
struct cafe_camera *cam = to_cam(v4l2_dev);
int ret;
ret = pci_save_state(pdev);
if (ret)
return ret;
mccic_suspend(&cam->mcam);
pci_disable_device(pdev);
return 0;
}
static int cafe_pci_resume(struct pci_dev *pdev)
{
struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
struct cafe_camera *cam = to_cam(v4l2_dev);
int ret = 0;
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
if (ret) {
cam_warn(cam, "Unable to re-enable device on resume!\n");
return ret;
}
cafe_ctlr_init(&cam->mcam);
return mccic_resume(&cam->mcam);
}
#endif /* CONFIG_PM */
static struct pci_device_id cafe_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cafe_ids);
static struct pci_driver cafe_pci_driver = {
.name = "cafe1000-ccic",
.id_table = cafe_ids,
.probe = cafe_pci_probe,
.remove = cafe_pci_remove,
#ifdef CONFIG_PM
.suspend = cafe_pci_suspend,
.resume = cafe_pci_resume,
#endif
};
static int __init cafe_init(void)
{
int ret;
printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
CAFE_VERSION);
ret = pci_register_driver(&cafe_pci_driver);
if (ret) {
printk(KERN_ERR "Unable to register cafe_ccic driver\n");
goto out;
}
ret = 0;
out:
return ret;
}
static void __exit cafe_exit(void)
{
pci_unregister_driver(&cafe_pci_driver);
}
module_init(cafe_init);
module_exit(cafe_exit);
| gpl-2.0 |
lyapota/enru-3.1.10-g7f360be | drivers/infiniband/hw/ehca/hcp_if.c | 8415 | 28897 | /*
* IBM eServer eHCA Infiniband device driver for Linux on POWER
*
* Firmware Infiniband Interface code for POWER
*
* Authors: Christoph Raisch <raisch@de.ibm.com>
* Hoang-Nam Nguyen <hnguyen@de.ibm.com>
* Joachim Fenkes <fenkes@de.ibm.com>
* Gerd Bayer <gerd.bayer@de.ibm.com>
* Waleri Fomin <fomin@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
*
* All rights reserved.
*
* This source code is distributed under a dual license of GPL v2.0 and OpenIB
* BSD.
*
* OpenIB BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <asm/hvcall.h>
#include "ehca_tools.h"
#include "hcp_if.h"
#include "hcp_phyp.h"
#include "hipz_fns.h"
#include "ipz_pt_fn.h"
#define H_ALL_RES_QP_ENHANCED_OPS EHCA_BMASK_IBM(9, 11)
#define H_ALL_RES_QP_PTE_PIN EHCA_BMASK_IBM(12, 12)
#define H_ALL_RES_QP_SERVICE_TYPE EHCA_BMASK_IBM(13, 15)
#define H_ALL_RES_QP_STORAGE EHCA_BMASK_IBM(16, 17)
#define H_ALL_RES_QP_LL_RQ_CQE_POSTING EHCA_BMASK_IBM(18, 18)
#define H_ALL_RES_QP_LL_SQ_CQE_POSTING EHCA_BMASK_IBM(19, 21)
#define H_ALL_RES_QP_SIGNALING_TYPE EHCA_BMASK_IBM(22, 23)
#define H_ALL_RES_QP_UD_AV_LKEY_CTRL EHCA_BMASK_IBM(31, 31)
#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
#define H_ALL_RES_QP_RESOURCE_TYPE EHCA_BMASK_IBM(56, 63)
#define H_ALL_RES_QP_MAX_OUTST_SEND_WR EHCA_BMASK_IBM(0, 15)
#define H_ALL_RES_QP_MAX_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
#define H_ALL_RES_QP_ACT_RECV_SGE EHCA_BMASK_IBM(24, 31)
#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
static DEFINE_SPINLOCK(hcall_lock);
static u32 get_longbusy_msecs(int longbusy_rc)
{
switch (longbusy_rc) {
case H_LONG_BUSY_ORDER_1_MSEC:
return 1;
case H_LONG_BUSY_ORDER_10_MSEC:
return 10;
case H_LONG_BUSY_ORDER_100_MSEC:
return 100;
case H_LONG_BUSY_ORDER_1_SEC:
return 1000;
case H_LONG_BUSY_ORDER_10_SEC:
return 10000;
case H_LONG_BUSY_ORDER_100_SEC:
return 100000;
default:
return 1;
}
}
static long ehca_plpar_hcall_norets(unsigned long opcode,
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
unsigned long arg6,
unsigned long arg7)
{
long ret;
int i, sleep_msecs;
unsigned long flags = 0;
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
if (ehca_lock_hcalls)
spin_lock_irqsave(&hcall_lock, flags);
ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
arg5, arg6, arg7);
if (ehca_lock_hcalls)
spin_unlock_irqrestore(&hcall_lock, flags);
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
if (ret < H_SUCCESS)
ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
opcode, ret, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
else
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
return ret;
}
return H_BUSY;
}
static long ehca_plpar_hcall9(unsigned long opcode,
unsigned long *outs, /* array of 9 outputs */
unsigned long arg1,
unsigned long arg2,
unsigned long arg3,
unsigned long arg4,
unsigned long arg5,
unsigned long arg6,
unsigned long arg7,
unsigned long arg8,
unsigned long arg9)
{
long ret;
int i, sleep_msecs;
unsigned long flags = 0;
if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
for (i = 0; i < 5; i++) {
/* serialize hCalls to work around firmware issue */
if (ehca_lock_hcalls)
spin_lock_irqsave(&hcall_lock, flags);
ret = plpar_hcall9(opcode, outs,
arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
if (ehca_lock_hcalls)
spin_unlock_irqrestore(&hcall_lock, flags);
if (H_IS_LONG_BUSY(ret)) {
sleep_msecs = get_longbusy_msecs(ret);
msleep_interruptible(sleep_msecs);
continue;
}
if (ret < H_SUCCESS) {
ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
opcode, arg1, arg2, arg3, arg4, arg5,
arg6, arg7, arg8, arg9);
ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
outs[8]);
} else if (unlikely(ehca_debug_level >= 2))
ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
ret, outs[0], outs[1], outs[2], outs[3],
outs[4], outs[5], outs[6], outs[7],
outs[8]);
return ret;
}
return H_BUSY;
}
u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_pfeq *pfeq,
const u32 neq_control,
const u32 number_of_entries,
struct ipz_eq_handle *eq_handle,
u32 *act_nr_of_entries,
u32 *act_pages,
u32 *eq_ist)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
u64 allocate_controls;
/* resource type */
allocate_controls = 3ULL;
/* ISN is associated */
if (neq_control != 1)
allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
else /* notification event queue */
allocate_controls = (1ULL << 63) | allocate_controls;
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
allocate_controls, /* r5 */
number_of_entries, /* r6 */
0, 0, 0, 0, 0, 0);
eq_handle->handle = outs[0];
*act_nr_of_entries = (u32)outs[3];
*act_pages = (u32)outs[4];
*eq_ist = (u32)outs[5];
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resource - ret=%lli ", ret);
return ret;
}
u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
struct ipz_eq_handle eq_handle,
const u64 event_mask)
{
return ehca_plpar_hcall_norets(H_RESET_EVENTS,
adapter_handle.handle, /* r4 */
eq_handle.handle, /* r5 */
event_mask, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
struct ehca_alloc_cq_parms *param)
{
int rc;
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
2, /* r5 */
param->eq_handle.handle, /* r6 */
cq->token, /* r7 */
param->nr_cqe, /* r8 */
0, 0, 0, 0);
cq->ipz_cq_handle.handle = outs[0];
param->act_nr_of_entries = (u32)outs[3];
param->act_pages = (u32)outs[4];
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[5]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
cq->ipz_cq_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_qp_parms *parms, int is_user)
{
int rc;
u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
allocate_controls =
EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
| EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
| EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
| EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
| EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
parms->squeue.page_size)
| EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
parms->rqueue.page_size)
| EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
!!(parms->ll_comp_flags & LLQP_RECV_COMP))
| EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
!!(parms->ll_comp_flags & LLQP_SEND_COMP))
| EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
parms->ud_av_l_key_ctl)
| EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
max_r10_reg =
EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
parms->squeue.max_wr + 1)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
parms->rqueue.max_wr + 1)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
parms->squeue.max_sge)
| EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
parms->rqueue.max_sge);
r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
if (parms->ext_type == EQPT_SRQ)
r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
else
r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
allocate_controls, /* r5 */
parms->send_cq_handle.handle,
parms->recv_cq_handle.handle,
parms->eq_handle.handle,
((u64)parms->token << 32) | parms->pd.value,
max_r10_reg, r11, r12);
parms->qp_handle.handle = outs[0];
parms->real_qp_num = (u32)outs[1];
parms->squeue.act_nr_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
parms->rqueue.act_nr_wqes =
(u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
parms->squeue.act_nr_sges =
(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
parms->rqueue.act_nr_sges =
(u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
parms->squeue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
parms->rqueue.queue_size =
(u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
if (ret == H_SUCCESS) {
rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
if (rc) {
ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
rc, outs[6]);
ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
parms->qp_handle.handle, /* r5 */
0, 0, 0, 0, 0);
ret = H_NO_MEM;
}
}
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id,
struct hipz_query_port *query_port_response_block)
{
u64 ret;
u64 r_cb = virt_to_abs(query_port_response_block);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("response block not page aligned");
return H_PARAMETER;
}
ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
adapter_handle.handle, /* r4 */
port_id, /* r5 */
r_cb, /* r6 */
0, 0, 0, 0);
if (ehca_debug_level >= 2)
ehca_dmp(query_port_response_block, 64, "response_block");
return ret;
}
u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id, const u32 port_cap,
const u8 init_type, const int modify_mask)
{
u64 port_attributes = port_cap;
if (modify_mask & IB_PORT_SHUTDOWN)
port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
if (modify_mask & IB_PORT_INIT_TYPE)
port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
return ehca_plpar_hcall_norets(H_MODIFY_PORT,
adapter_handle.handle, /* r4 */
port_id, /* r5 */
port_attributes, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
struct hipz_query_hca *query_hca_rblock)
{
u64 r_cb = virt_to_abs(query_hca_rblock);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("response_block=%p not page aligned",
query_hca_rblock);
return H_PARAMETER;
}
return ehca_plpar_hcall_norets(H_QUERY_HCA,
adapter_handle.handle, /* r4 */
r_cb, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
const u8 pagesize,
const u8 queue_type,
const u64 resource_handle,
const u64 logical_address_of_page,
u64 count)
{
return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
adapter_handle.handle, /* r4 */
(u64)queue_type | ((u64)pagesize) << 8,
/* r5 */
resource_handle, /* r6 */
logical_address_of_page, /* r7 */
count, /* r8 */
0, 0);
}
u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
const struct ipz_eq_handle eq_handle,
struct ehca_pfeq *pfeq,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count)
{
if (count != 1) {
ehca_gen_err("Ppage counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle,
pagesize,
queue_type,
eq_handle.handle,
logical_address_of_page, count);
}
u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
u32 ist)
{
u64 ret;
ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
adapter_handle.handle, /* r4 */
ist, /* r5 */
0, 0, 0, 0, 0);
if (ret != H_SUCCESS && ret != H_BUSY)
ehca_gen_err("Could not query interrupt state.");
return ret;
}
u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
const struct ipz_cq_handle cq_handle,
struct ehca_pfcq *pfcq,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count,
const struct h_galpa gal)
{
if (count != 1) {
ehca_gen_err("Page counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
cq_handle.handle, logical_address_of_page,
count);
}
u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count,
const struct h_galpa galpa)
{
if (count > 1) {
ehca_gen_err("Page counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
qp_handle.handle, logical_address_of_page,
count);
}
u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
void **log_addr_next_sq_wqe2processed,
void **log_addr_next_rq_wqe2processed,
int dis_and_get_function_code)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
adapter_handle.handle, /* r4 */
dis_and_get_function_code, /* r5 */
qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (log_addr_next_sq_wqe2processed)
*log_addr_next_sq_wqe2processed = (void *)outs[0];
if (log_addr_next_rq_wqe2processed)
*log_addr_next_rq_wqe2processed = (void *)outs[1];
return ret;
}
u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
const u64 update_mask,
struct hcp_modify_qp_control_block *mqpcb,
struct h_galpa gal)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
update_mask, /* r6 */
virt_to_abs(mqpcb), /* r7 */
0, 0, 0, 0, 0);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Insufficient resources ret=%lli", ret);
return ret;
}
u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct ehca_pfqp *pfqp,
struct hcp_modify_qp_control_block *qqpcb,
struct h_galpa gal)
{
return ehca_plpar_hcall_norets(H_QUERY_QP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
virt_to_abs(qqpcb), /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_qp *qp)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = hcp_galpas_dtor(&qp->galpas);
if (ret) {
ehca_gen_err("Could not destruct qp->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
adapter_handle.handle, /* r4 */
/* function code */
1, /* r5 */
qp->ipz_qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (ret == H_HARDWARE)
ehca_gen_err("HCA not operational. ret=%lli", ret);
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
qp->ipz_qp_handle.handle, /* r5 */
0, 0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("Resource still in use. ret=%lli", ret);
return ret;
}
u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u32 port)
{
return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
port, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u32 port, u32 * pma_qp_nr,
u32 * bma_qp_nr)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
port, /* r6 */
0, 0, 0, 0, 0, 0);
*pma_qp_nr = (u32)outs[0];
*bma_qp_nr = (u32)outs[1];
if (ret == H_ALIAS_EXIST)
ehca_gen_err("AQP1 already exists. ret=%lli", ret);
return ret;
}
u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id)
{
u64 ret;
ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
mcg_dlid, /* r6 */
interface_id, /* r7 */
subnet_prefix, /* r8 */
0, 0);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
const struct ipz_qp_handle qp_handle,
struct h_galpa gal,
u16 mcg_dlid,
u64 subnet_prefix, u64 interface_id)
{
return ehca_plpar_hcall_norets(H_DETACH_MCQP,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
mcg_dlid, /* r6 */
interface_id, /* r7 */
subnet_prefix, /* r8 */
0, 0);
}
u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_cq *cq,
u8 force_flag)
{
u64 ret;
ret = hcp_galpas_dtor(&cq->galpas);
if (ret) {
ehca_gen_err("Could not destruct cp->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
cq->ipz_cq_handle.handle, /* r5 */
force_flag != 0 ? 1L : 0L, /* r6 */
0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
return ret;
}
u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
struct ehca_eq *eq)
{
u64 ret;
ret = hcp_galpas_dtor(&eq->galpas);
if (ret) {
ehca_gen_err("Could not destruct eq->galpas");
return H_RESOURCE;
}
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
eq->ipz_eq_handle.handle, /* r5 */
0, 0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("Resource in use. ret=%lli ", ret);
return ret;
}
u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u64 vaddr,
const u64 length,
const u32 access_ctrl,
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
5, /* r5 */
vaddr, /* r6 */
length, /* r7 */
(((u64)access_ctrl) << 32ULL), /* r8 */
pd.value, /* r9 */
0, 0, 0);
outparms->handle.handle = outs[0];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u8 pagesize,
const u8 queue_type,
const u64 logical_address_of_page,
const u64 count)
{
u64 ret;
if (unlikely(ehca_debug_level >= 3)) {
if (count > 1) {
u64 *kpage;
int i;
kpage = (u64 *)abs_to_virt(logical_address_of_page);
for (i = 0; i < count; i++)
ehca_gen_dbg("kpage[%d]=%p",
i, (void *)kpage[i]);
} else
ehca_gen_dbg("kpage=%p",
(void *)logical_address_of_page);
}
if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
ehca_gen_err("logical_address_of_page not on a 4k boundary "
"adapter_handle=%llx mr=%p mr_handle=%llx "
"pagesize=%x queue_type=%x "
"logical_address_of_page=%llx count=%llx",
adapter_handle.handle, mr,
mr->ipz_mr_handle.handle, pagesize, queue_type,
logical_address_of_page, count);
ret = H_PARAMETER;
} else
ret = hipz_h_register_rpage(adapter_handle, pagesize,
queue_type,
mr->ipz_mr_handle.handle,
logical_address_of_page, count);
return ret;
}
u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
0, 0, 0, 0, 0, 0, 0);
outparms->len = outs[0];
outparms->vaddr = outs[1];
outparms->acl = outs[4] >> 32;
outparms->lkey = (u32)(outs[5] >> 32);
outparms->rkey = (u32)(outs[5] & (0xffffffff));
return ret;
}
u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr)
{
return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const u64 vaddr_in,
const u64 length,
const u32 access_ctrl,
const struct ipz_pd pd,
const u64 mr_addr_cb,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
adapter_handle.handle, /* r4 */
mr->ipz_mr_handle.handle, /* r5 */
vaddr_in, /* r6 */
length, /* r7 */
/* r8 */
((((u64)access_ctrl) << 32ULL) | pd.value),
mr_addr_cb, /* r9 */
0, 0, 0);
outparms->vaddr = outs[1];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mr *mr,
const struct ehca_mr *orig_mr,
const u64 vaddr_in,
const u32 access_ctrl,
const struct ipz_pd pd,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
adapter_handle.handle, /* r4 */
orig_mr->ipz_mr_handle.handle, /* r5 */
vaddr_in, /* r6 */
(((u64)access_ctrl) << 32ULL), /* r7 */
pd.value, /* r8 */
0, 0, 0, 0);
outparms->handle.handle = outs[0];
outparms->lkey = (u32)outs[2];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw,
const struct ipz_pd pd,
struct ehca_mw_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
6, /* r5 */
pd.value, /* r6 */
0, 0, 0, 0, 0, 0);
outparms->handle.handle = outs[0];
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw,
struct ehca_mw_hipzout_parms *outparms)
{
u64 ret;
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
adapter_handle.handle, /* r4 */
mw->ipz_mw_handle.handle, /* r5 */
0, 0, 0, 0, 0, 0, 0);
outparms->rkey = (u32)outs[3];
return ret;
}
u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
const struct ehca_mw *mw)
{
return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
mw->ipz_mw_handle.handle, /* r5 */
0, 0, 0, 0, 0);
}
u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
const u64 ressource_handle,
void *rblock,
unsigned long *byte_count)
{
u64 r_cb = virt_to_abs(rblock);
if (r_cb & (EHCA_PAGESIZE-1)) {
ehca_gen_err("rblock not page aligned.");
return H_PARAMETER;
}
return ehca_plpar_hcall_norets(H_ERROR_DATA,
adapter_handle.handle,
ressource_handle,
r_cb,
0, 0, 0, 0);
}
u64 hipz_h_eoi(int irq)
{
unsigned long xirr;
iosync();
xirr = (0xffULL << 24) | irq;
return plpar_hcall_norets(H_EOI, xirr);
}
| gpl-2.0 |
Jimmyk422/android_kernel_samsung_iconvmu | drivers/s390/char/tape_proc.c | 9183 | 3401 | /*
* drivers/s390/char/tape.c
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
*
* PROCFS Functions
*/
#define KMSG_COMPONENT "tape"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#define TAPE_DBF_AREA tape_core_dbf
#include "tape.h"
static const char *tape_med_st_verbose[MS_SIZE] =
{
[MS_UNKNOWN] = "UNKNOWN ",
[MS_LOADED] = "LOADED ",
[MS_UNLOADED] = "UNLOADED"
};
/* our proc tapedevices entry */
static struct proc_dir_entry *tape_proc_devices;
/*
* Show function for /proc/tapedevices
*/
static int tape_proc_show(struct seq_file *m, void *v)
{
struct tape_device *device;
struct tape_request *request;
const char *str;
unsigned long n;
n = (unsigned long) v - 1;
if (!n) {
seq_printf(m, "TapeNo\tBusID CuType/Model\t"
"DevType/Model\tBlkSize\tState\tOp\tMedState\n");
}
device = tape_find_device(n);
if (IS_ERR(device))
return 0;
spin_lock_irq(get_ccwdev_lock(device->cdev));
seq_printf(m, "%d\t", (int) n);
seq_printf(m, "%-10.10s ", dev_name(&device->cdev->dev));
seq_printf(m, "%04X/", device->cdev->id.cu_type);
seq_printf(m, "%02X\t", device->cdev->id.cu_model);
seq_printf(m, "%04X/", device->cdev->id.dev_type);
seq_printf(m, "%02X\t\t", device->cdev->id.dev_model);
if (device->char_data.block_size == 0)
seq_printf(m, "auto\t");
else
seq_printf(m, "%i\t", device->char_data.block_size);
if (device->tape_state >= 0 &&
device->tape_state < TS_SIZE)
str = tape_state_verbose[device->tape_state];
else
str = "UNKNOWN";
seq_printf(m, "%s\t", str);
if (!list_empty(&device->req_queue)) {
request = list_entry(device->req_queue.next,
struct tape_request, list);
str = tape_op_verbose[request->op];
} else
str = "---";
seq_printf(m, "%s\t", str);
seq_printf(m, "%s\n", tape_med_st_verbose[device->medium_state]);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
tape_put_device(device);
return 0;
}
static void *tape_proc_start(struct seq_file *m, loff_t *pos)
{
if (*pos >= 256 / TAPE_MINORS_PER_DEV)
return NULL;
return (void *)((unsigned long) *pos + 1);
}
static void *tape_proc_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return tape_proc_start(m, pos);
}
static void tape_proc_stop(struct seq_file *m, void *v)
{
}
static const struct seq_operations tape_proc_seq = {
.start = tape_proc_start,
.next = tape_proc_next,
.stop = tape_proc_stop,
.show = tape_proc_show,
};
static int tape_proc_open(struct inode *inode, struct file *file)
{
return seq_open(file, &tape_proc_seq);
}
static const struct file_operations tape_proc_ops =
{
.owner = THIS_MODULE,
.open = tape_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* Initialize procfs stuff on startup
*/
void
tape_proc_init(void)
{
tape_proc_devices =
proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
&tape_proc_ops);
if (tape_proc_devices == NULL) {
return;
}
}
/*
* Cleanup all stuff registered to the procfs
*/
void
tape_proc_cleanup(void)
{
if (tape_proc_devices != NULL)
remove_proc_entry ("tapedevices", NULL);
}
| gpl-2.0 |
mgbotoe/GT-I8552-kernel-source | tools/power/cpupower/bench/benchmark.c | 9951 | 5607 | /* cpufreq-bench CPUFreq microbenchmark
*
* Copyright (C) 2008 Christian Kornacker <ckornacker@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include "config.h"
#include "system.h"
#include "benchmark.h"
/* Print out progress if we log into a file */
#define show_progress(total_time, progress_time) \
if (config->output != stdout) { \
fprintf(stdout, "Progress: %02lu %%\r", \
(progress_time * 100) / total_time); \
fflush(stdout); \
}
/**
* compute how many rounds of calculation we should do
* to get the given load time
*
* @param load aimed load time in µs
*
* @retval rounds of calculation
**/
unsigned int calculate_timespace(long load, struct config *config)
{
int i;
long long now, then;
unsigned int estimated = GAUGECOUNT;
unsigned int rounds = 0;
unsigned int timed = 0;
if (config->verbose)
printf("calibrating load of %lius, please wait...\n", load);
/* get the initial calculation time for a specific number of rounds */
now = get_time();
ROUNDS(estimated);
then = get_time();
timed = (unsigned int)(then - now);
/* approximation of the wanted load time by comparing with the
* initial calculation time */
for (i = 0; i < 4; i++) {
rounds = (unsigned int)(load * estimated / timed);
dprintf("calibrating with %u rounds\n", rounds);
now = get_time();
ROUNDS(rounds);
then = get_time();
timed = (unsigned int)(then - now);
estimated = rounds;
}
if (config->verbose)
printf("calibration done\n");
return estimated;
}
/**
* benchmark
* generates a specific sleep an load time with the performance
* governor and compares the used time for same calculations done
* with the configured powersave governor
*
* @param config config values for the benchmark
*
**/
void start_benchmark(struct config *config)
{
unsigned int _round, cycle;
long long now, then;
long sleep_time = 0, load_time = 0;
long performance_time = 0, powersave_time = 0;
unsigned int calculations;
unsigned long total_time = 0, progress_time = 0;
sleep_time = config->sleep;
load_time = config->load;
/* For the progress bar */
for (_round = 1; _round <= config->rounds; _round++)
total_time += _round * (config->sleep + config->load);
total_time *= 2; /* powersave and performance cycles */
for (_round = 0; _round < config->rounds; _round++) {
performance_time = 0LL;
powersave_time = 0LL;
show_progress(total_time, progress_time);
/* set the cpufreq governor to "performance" which disables
* P-State switching. */
if (set_cpufreq_governor("performance", config->cpu) != 0)
return;
/* calibrate the calculation time. the resulting calculation
* _rounds should produce a load which matches the configured
* load time */
calculations = calculate_timespace(load_time, config);
if (config->verbose)
printf("_round %i: doing %u cycles with %u calculations"
" for %lius\n", _round + 1, config->cycles,
calculations, load_time);
fprintf(config->output, "%u %li %li ",
_round, load_time, sleep_time);
if (config->verbose)
printf("avarage: %lius, rps:%li\n",
load_time / calculations,
1000000 * calculations / load_time);
/* do some sleep/load cycles with the performance governor */
for (cycle = 0; cycle < config->cycles; cycle++) {
now = get_time();
usleep(sleep_time);
ROUNDS(calculations);
then = get_time();
performance_time += then - now - sleep_time;
if (config->verbose)
printf("performance cycle took %lius, "
"sleep: %lius, "
"load: %lius, rounds: %u\n",
(long)(then - now), sleep_time,
load_time, calculations);
}
fprintf(config->output, "%li ",
performance_time / config->cycles);
progress_time += sleep_time + load_time;
show_progress(total_time, progress_time);
/* set the powersave governor which activates P-State switching
* again */
if (set_cpufreq_governor(config->governor, config->cpu) != 0)
return;
/* again, do some sleep/load cycles with the
* powersave governor */
for (cycle = 0; cycle < config->cycles; cycle++) {
now = get_time();
usleep(sleep_time);
ROUNDS(calculations);
then = get_time();
powersave_time += then - now - sleep_time;
if (config->verbose)
printf("powersave cycle took %lius, "
"sleep: %lius, "
"load: %lius, rounds: %u\n",
(long)(then - now), sleep_time,
load_time, calculations);
}
progress_time += sleep_time + load_time;
/* compare the avarage sleep/load cycles */
fprintf(config->output, "%li ",
powersave_time / config->cycles);
fprintf(config->output, "%.3f\n",
performance_time * 100.0 / powersave_time);
fflush(config->output);
if (config->verbose)
printf("performance is at %.2f%%\n",
performance_time * 100.0 / powersave_time);
sleep_time += config->sleep_step;
load_time += config->load_step;
}
}
| gpl-2.0 |
Xanwar/android_kernel_asus_a500cg | drivers/oprofile/oprof.c | 9951 | 5163 | /**
* @file oprof.c
*
* @remark Copyright 2002 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/oprofile.h>
#include <linux/moduleparam.h>
#include <linux/workqueue.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include "oprof.h"
#include "event_buffer.h"
#include "cpu_buffer.h"
#include "buffer_sync.h"
#include "oprofile_stats.h"
struct oprofile_operations oprofile_ops;
unsigned long oprofile_started;
unsigned long oprofile_backtrace_depth;
static unsigned long is_setup;
static DEFINE_MUTEX(start_mutex);
/* timer
0 - use performance monitoring hardware if available
1 - use the timer int mechanism regardless
*/
static int timer = 0;
int oprofile_setup(void)
{
int err;
mutex_lock(&start_mutex);
if ((err = alloc_cpu_buffers()))
goto out;
if ((err = alloc_event_buffer()))
goto out1;
if (oprofile_ops.setup && (err = oprofile_ops.setup()))
goto out2;
/* Note even though this starts part of the
* profiling overhead, it's necessary to prevent
* us missing task deaths and eventually oopsing
* when trying to process the event buffer.
*/
if (oprofile_ops.sync_start) {
int sync_ret = oprofile_ops.sync_start();
switch (sync_ret) {
case 0:
goto post_sync;
case 1:
goto do_generic;
case -1:
goto out3;
default:
goto out3;
}
}
do_generic:
if ((err = sync_start()))
goto out3;
post_sync:
is_setup = 1;
mutex_unlock(&start_mutex);
return 0;
out3:
if (oprofile_ops.shutdown)
oprofile_ops.shutdown();
out2:
free_event_buffer();
out1:
free_cpu_buffers();
out:
mutex_unlock(&start_mutex);
return err;
}
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
static void switch_worker(struct work_struct *work);
static DECLARE_DELAYED_WORK(switch_work, switch_worker);
static void start_switch_worker(void)
{
if (oprofile_ops.switch_events)
schedule_delayed_work(&switch_work, oprofile_time_slice);
}
static void stop_switch_worker(void)
{
cancel_delayed_work_sync(&switch_work);
}
static void switch_worker(struct work_struct *work)
{
if (oprofile_ops.switch_events())
return;
atomic_inc(&oprofile_stats.multiplex_counter);
start_switch_worker();
}
/* User inputs in ms, converts to jiffies */
int oprofile_set_timeout(unsigned long val_msec)
{
int err = 0;
unsigned long time_slice;
mutex_lock(&start_mutex);
if (oprofile_started) {
err = -EBUSY;
goto out;
}
if (!oprofile_ops.switch_events) {
err = -EINVAL;
goto out;
}
time_slice = msecs_to_jiffies(val_msec);
if (time_slice == MAX_JIFFY_OFFSET) {
err = -EINVAL;
goto out;
}
oprofile_time_slice = time_slice;
out:
mutex_unlock(&start_mutex);
return err;
}
#else
static inline void start_switch_worker(void) { }
static inline void stop_switch_worker(void) { }
#endif
/* Actually start profiling (echo 1>/dev/oprofile/enable) */
int oprofile_start(void)
{
int err = -EINVAL;
mutex_lock(&start_mutex);
if (!is_setup)
goto out;
err = 0;
if (oprofile_started)
goto out;
oprofile_reset_stats();
if ((err = oprofile_ops.start()))
goto out;
start_switch_worker();
oprofile_started = 1;
out:
mutex_unlock(&start_mutex);
return err;
}
/* echo 0>/dev/oprofile/enable */
void oprofile_stop(void)
{
mutex_lock(&start_mutex);
if (!oprofile_started)
goto out;
oprofile_ops.stop();
oprofile_started = 0;
stop_switch_worker();
/* wake up the daemon to read what remains */
wake_up_buffer_waiter();
out:
mutex_unlock(&start_mutex);
}
void oprofile_shutdown(void)
{
mutex_lock(&start_mutex);
if (oprofile_ops.sync_stop) {
int sync_ret = oprofile_ops.sync_stop();
switch (sync_ret) {
case 0:
goto post_sync;
case 1:
goto do_generic;
default:
goto post_sync;
}
}
do_generic:
sync_stop();
post_sync:
if (oprofile_ops.shutdown)
oprofile_ops.shutdown();
is_setup = 0;
free_event_buffer();
free_cpu_buffers();
mutex_unlock(&start_mutex);
}
int oprofile_set_ulong(unsigned long *addr, unsigned long val)
{
int err = -EBUSY;
mutex_lock(&start_mutex);
if (!oprofile_started) {
*addr = val;
err = 0;
}
mutex_unlock(&start_mutex);
return err;
}
static int timer_mode;
static int __init oprofile_init(void)
{
int err;
/* always init architecture to setup backtrace support */
timer_mode = 0;
err = oprofile_arch_init(&oprofile_ops);
if (!err) {
if (!timer && !oprofilefs_register())
return 0;
oprofile_arch_exit();
}
/* setup timer mode: */
timer_mode = 1;
/* no nmi timer mode if oprofile.timer is set */
if (timer || op_nmi_timer_init(&oprofile_ops)) {
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
return oprofilefs_register();
}
static void __exit oprofile_exit(void)
{
oprofilefs_unregister();
if (!timer_mode)
oprofile_arch_exit();
}
module_init(oprofile_init);
module_exit(oprofile_exit);
module_param_named(timer, timer, int, 0644);
MODULE_PARM_DESC(timer, "force use of timer interrupt");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Levon <levon@movementarian.org>");
MODULE_DESCRIPTION("OProfile system profiler");
| gpl-2.0 |
StarKissed/starkissed-kernel-mecha | drivers/atm/nicstarmac.c | 13023 | 6098 | /*
* this file included by nicstar.c
*/
/*
* nicstarmac.c
* Read this ForeRunner's MAC address from eprom/eeprom
*/
#include <linux/kernel.h>
typedef void __iomem *virt_addr_t;
#define CYCLE_DELAY 5
/*
This was the original definition
#define osp_MicroDelay(microsec) \
do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
*/
#define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \
udelay((useconds));}
/*
* The following tables represent the timing diagrams found in
* the Data Sheet for the Xicor X25020 EEProm. The #defines below
* represent the bits in the NICStAR's General Purpose register
* that must be toggled for the corresponding actions on the EEProm
* to occur.
*/
/* Write Data To EEProm from SI line on rising edge of CLK */
/* Read Data From EEProm on falling edge of CLK */
#define CS_HIGH 0x0002 /* Chip select high */
#define CS_LOW 0x0000 /* Chip select low (active low) */
#define CLK_HIGH 0x0004 /* Clock high */
#define CLK_LOW 0x0000 /* Clock low */
#define SI_HIGH 0x0001 /* Serial input data high */
#define SI_LOW 0x0000 /* Serial input data low */
/* Read Status Register = 0000 0101b */
#if 0
static u_int32_t rdsrtab[] = {
CS_HIGH | CLK_HIGH,
CS_LOW | CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH, /* 1 */
CLK_LOW | SI_LOW,
CLK_HIGH, /* 0 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH /* 1 */
};
#endif /* 0 */
/* Read from EEPROM = 0000 0011b */
static u_int32_t readtab[] = {
/*
CS_HIGH | CLK_HIGH,
*/
CS_LOW | CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH, /* 1 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH /* 1 */
};
/* Clock to read from/write to the eeprom */
static u_int32_t clocktab[] = {
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW
};
#define NICSTAR_REG_WRITE(bs, reg, val) \
while ( readl(bs + STAT) & 0x0200 ) ; \
writel((val),(base)+(reg))
#define NICSTAR_REG_READ(bs, reg) \
readl((base)+(reg))
#define NICSTAR_REG_GENERAL_PURPOSE GP
/*
* This routine will clock the Read_Status_reg function into the X2520
* eeprom, then pull the result from bit 16 of the NicSTaR's General Purpose
* register.
*/
#if 0
u_int32_t nicstar_read_eprom_status(virt_addr_t base)
{
u_int32_t val;
u_int32_t rbyte;
int32_t i, j;
/* Send read instruction */
val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
for (i = 0; i < ARRAY_SIZE(rdsrtab); i++) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | rdsrtab[i]));
osp_MicroDelay(CYCLE_DELAY);
}
/* Done sending instruction - now pull data off of bit 16, MSB first */
/* Data clocked out of eeprom on falling edge of clock */
rbyte = 0;
for (i = 7, j = 0; i >= 0; i--) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++]));
rbyte |= (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE)
& 0x00010000) >> 16) << i);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++]));
osp_MicroDelay(CYCLE_DELAY);
}
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2);
osp_MicroDelay(CYCLE_DELAY);
return rbyte;
}
#endif /* 0 */
/*
* This routine will clock the Read_data function into the X2520
* eeprom, followed by the address to read from, through the NicSTaR's General
* Purpose register.
*/
static u_int8_t read_eprom_byte(virt_addr_t base, u_int8_t offset)
{
u_int32_t val = 0;
int i, j = 0;
u_int8_t tempread = 0;
val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
/* Send READ instruction */
for (i = 0; i < ARRAY_SIZE(readtab); i++) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | readtab[i]));
osp_MicroDelay(CYCLE_DELAY);
}
/* Next, we need to send the byte address to read from */
for (i = 7; i >= 0; i--) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++] | ((offset >> i) & 1)));
osp_MicroDelay(CYCLE_DELAY);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++] | ((offset >> i) & 1)));
osp_MicroDelay(CYCLE_DELAY);
}
j = 0;
/* Now, we can read data from the eeprom by clocking it in */
for (i = 7; i >= 0; i--) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++]));
osp_MicroDelay(CYCLE_DELAY);
tempread |=
(((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE)
& 0x00010000) >> 16) << i);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++]));
osp_MicroDelay(CYCLE_DELAY);
}
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2);
osp_MicroDelay(CYCLE_DELAY);
return tempread;
}
static void nicstar_init_eprom(virt_addr_t base)
{
u_int32_t val;
/*
* turn chip select off
*/
val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | CS_HIGH | CLK_HIGH));
osp_MicroDelay(CYCLE_DELAY);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | CS_HIGH | CLK_LOW));
osp_MicroDelay(CYCLE_DELAY);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | CS_HIGH | CLK_HIGH));
osp_MicroDelay(CYCLE_DELAY);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | CS_HIGH | CLK_LOW));
osp_MicroDelay(CYCLE_DELAY);
}
/*
* This routine will be the interface to the ReadPromByte function
* above.
*/
static void
nicstar_read_eprom(virt_addr_t base,
u_int8_t prom_offset, u_int8_t * buffer, u_int32_t nbytes)
{
u_int i;
for (i = 0; i < nbytes; i++) {
buffer[i] = read_eprom_byte(base, prom_offset);
++prom_offset;
osp_MicroDelay(CYCLE_DELAY);
}
}
| gpl-2.0 |
gwlim/barrier-breaker-hw550-3g | target/linux/amazon/files/drivers/net/ethernet/admmod.c | 224 | 41442 | /******************************************************************************
Copyright (c) 2004, Infineon Technologies. All rights reserved.
No Warranty
Because the program is licensed free of charge, there is no warranty for
the program, to the extent permitted by applicable law. Except when
otherwise stated in writing the copyright holders and/or other parties
provide the program "as is" without warranty of any kind, either
expressed or implied, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose. The
entire risk as to the quality and performance of the program is with
you. should the program prove defective, you assume the cost of all
necessary servicing, repair or correction.
In no event unless required by applicable law or agreed to in writing
will any copyright holder, or any other party who may modify and/or
redistribute the program as permitted above, be liable to you for
damages, including any general, special, incidental or consequential
damages arising out of the use or inability to use the program
(including but not limited to loss of data or data being rendered
inaccurate or losses sustained by you or third parties or a failure of
the program to operate with any other programs), even if such holder or
other party has been advised of the possibility of such damages.
******************************************************************************
Module : admmod.c
Date : 2004-09-01
Description : JoeLin
Remarks:
Revision:
MarsLin, add to support VLAN
*****************************************************************************/
//000001.joelin 2005/06/02 add"ADM6996_MDC_MDIO_MODE" define,
// if define ADM6996_MDC_MDIO_MODE==> ADM6996LC and ADM6996I will be in MDIO/MDC(SMI)(16 bit) mode,
// amazon should contrl ADM6996 by MDC/MDIO pin
// if undef ADM6996_MDC_MDIO_MODE==> ADM6996 will be in EEProm(32 bit) mode,
// amazon should contrl ADM6996 by GPIO15,16,17,18 pin
/* 507281:linmars 2005/07/28 support MDIO/EEPROM config mode */
/* 509201:linmars remove driver testing codes */
#include <linux/version.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <asm/atomic.h>
#include <asm-mips/amazon/amazon.h>
#include <asm-mips/amazon/adm6996.h>
//#include <linux/amazon/adm6996.h>
unsigned int ifx_sw_conf[ADM_SW_MAX_PORT_NUM+1] = \
{ADM_SW_PORT0_CONF, ADM_SW_PORT1_CONF, ADM_SW_PORT2_CONF, \
ADM_SW_PORT3_CONF, ADM_SW_PORT4_CONF, ADM_SW_PORT5_CONF};
unsigned int ifx_sw_bits[8] = \
{0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff};
unsigned int ifx_sw_vlan_port[6] = {0, 2, 4, 6, 7, 8};
//050613:fchang
/* 507281:linmars start */
#ifdef CONFIG_SWITCH_ADM6996_MDIO
#define ADM6996_MDC_MDIO_MODE 1 //000001.joelin
#else
#undef ADM6996_MDC_MDIO_MODE
#endif
/* 507281:linmars end */
#define adm6996i 0
#define adm6996lc 1
#define adm6996l 2
unsigned int adm6996_mode=adm6996i;
/*
initialize GPIO pins.
output mode, low
*/
void ifx_gpio_init(void)
{
//GPIO16,17,18 direction:output
//GPIO16,17,18 output 0
AMAZON_SW_REG(AMAZON_GPIO_P1_DIR) |= (GPIO_MDIO|GPIO_MDCS|GPIO_MDC);
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT) =AMAZON_SW_REG(AMAZON_GPIO_P1_IN)& ~(GPIO_MDIO|GPIO_MDCS|GPIO_MDC);
}
/* read one bit from mdio port */
int ifx_sw_mdio_readbit(void)
{
//int val;
//val = (AMAZON_SW_REG(GPIO_conf0_REG) & GPIO0_INPUT_MASK) >> 8;
//return val;
//GPIO16
return AMAZON_SW_REG(AMAZON_GPIO_P1_IN)&1;
}
/*
MDIO mode selection
1 -> output
0 -> input
switch input/output mode of GPIO 0
*/
void ifx_mdio_mode(int mode)
{
// AMAZON_SW_REG(GPIO_conf0_REG) = mode ? GPIO_ENABLEBITS :
// ((GPIO_ENABLEBITS | MDIO_INPUT) & ~MDIO_OUTPUT_EN);
mode?(AMAZON_SW_REG(AMAZON_GPIO_P1_DIR)|=GPIO_MDIO):
(AMAZON_SW_REG(AMAZON_GPIO_P1_DIR)&=~GPIO_MDIO);
/*int r=AMAZON_SW_REG(AMAZON_GPIO_P1_DIR);
mode?(r|=GPIO_MDIO):(r&=~GPIO_MDIO);
AMAZON_SW_REG(AMAZON_GPIO_P1_DIR)=r;*/
}
void ifx_mdc_hi(void)
{
//GPIO_SET_HI(GPIO_MDC);
//AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)|=GPIO_MDC;
/*int r=AMAZON_SW_REG(AMAZON_GPIO_P1_OUT);
r|=GPIO_MDC;
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=r;*/
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=AMAZON_SW_REG(AMAZON_GPIO_P1_IN)|GPIO_MDC;
}
void ifx_mdio_hi(void)
{
//GPIO_SET_HI(GPIO_MDIO);
//AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)|=GPIO_MDIO;
/*int r=AMAZON_SW_REG(AMAZON_GPIO_P1_OUT);
r|=GPIO_MDIO;
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=r;*/
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=AMAZON_SW_REG(AMAZON_GPIO_P1_IN)|GPIO_MDIO;
}
void ifx_mdcs_hi(void)
{
//GPIO_SET_HI(GPIO_MDCS);
//AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)|=GPIO_MDCS;
/*int r=AMAZON_SW_REG(AMAZON_GPIO_P1_OUT);
r|=GPIO_MDCS;
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=r;*/
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=AMAZON_SW_REG(AMAZON_GPIO_P1_IN)|GPIO_MDCS;
}
void ifx_mdc_lo(void)
{
//GPIO_SET_LOW(GPIO_MDC);
//AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)&=~GPIO_MDC;
/*int r=AMAZON_SW_REG(AMAZON_GPIO_P1_OUT);
r&=~GPIO_MDC;
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=r;*/
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=AMAZON_SW_REG(AMAZON_GPIO_P1_IN)&(~GPIO_MDC);
}
void ifx_mdio_lo(void)
{
//GPIO_SET_LOW(GPIO_MDIO);
//AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)&=~GPIO_MDIO;
/*int r=AMAZON_SW_REG(AMAZON_GPIO_P1_OUT);
r&=~GPIO_MDIO;
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=r;*/
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=AMAZON_SW_REG(AMAZON_GPIO_P1_IN)&(~GPIO_MDIO);
}
void ifx_mdcs_lo(void)
{
//GPIO_SET_LOW(GPIO_MDCS);
//AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)&=~GPIO_MDCS;
/*int r=AMAZON_SW_REG(AMAZON_GPIO_P1_OUT);
r&=~GPIO_MDCS;
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=r;*/
AMAZON_SW_REG(AMAZON_GPIO_P1_OUT)=AMAZON_SW_REG(AMAZON_GPIO_P1_IN)&(~GPIO_MDCS);
}
/*
mdc pulse
0 -> 1 -> 0
*/
static void ifx_sw_mdc_pulse(void)
{
ifx_mdc_lo();
udelay(ADM_SW_MDC_DOWN_DELAY);
ifx_mdc_hi();
udelay(ADM_SW_MDC_UP_DELAY);
ifx_mdc_lo();
}
/*
mdc toggle
1 -> 0
*/
static void ifx_sw_mdc_toggle(void)
{
ifx_mdc_hi();
udelay(ADM_SW_MDC_UP_DELAY);
ifx_mdc_lo();
udelay(ADM_SW_MDC_DOWN_DELAY);
}
/*
enable eeprom write
For ATC 93C66 type EEPROM; accessing ADM6996 internal EEPROM type registers
*/
static void ifx_sw_eeprom_write_enable(void)
{
unsigned int op;
ifx_mdcs_lo();
ifx_mdc_lo();
ifx_mdio_hi();
udelay(ADM_SW_CS_DELAY);
/* enable chip select */
ifx_mdcs_hi();
udelay(ADM_SW_CS_DELAY);
/* start bit */
ifx_mdio_hi();
ifx_sw_mdc_pulse();
/* eeprom write enable */
op = ADM_SW_BIT_MASK_4;
while (op)
{
if (op & ADM_SW_EEPROM_WRITE_ENABLE)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
op = ADM_SW_BIT_MASK_1 << (EEPROM_TYPE - 3);
while (op)
{
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* disable chip select */
ifx_mdcs_lo();
udelay(ADM_SW_CS_DELAY);
ifx_sw_mdc_pulse();
}
/*
disable eeprom write
*/
static void ifx_sw_eeprom_write_disable(void)
{
unsigned int op;
ifx_mdcs_lo();
ifx_mdc_lo();
ifx_mdio_hi();
udelay(ADM_SW_CS_DELAY);
/* enable chip select */
ifx_mdcs_hi();
udelay(ADM_SW_CS_DELAY);
/* start bit */
ifx_mdio_hi();
ifx_sw_mdc_pulse();
/* eeprom write disable */
op = ADM_SW_BIT_MASK_4;
while (op)
{
if (op & ADM_SW_EEPROM_WRITE_DISABLE)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
op = ADM_SW_BIT_MASK_1 << (EEPROM_TYPE - 3);
while (op)
{
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* disable chip select */
ifx_mdcs_lo();
udelay(ADM_SW_CS_DELAY);
ifx_sw_mdc_pulse();
}
/*
read registers from ADM6996
serial registers start at 0x200 (addr bit 9 = 1b)
EEPROM registers -> 16bits; Serial registers -> 32bits
*/
#ifdef ADM6996_MDC_MDIO_MODE //smi mode//000001.joelin
static int ifx_sw_read_adm6996i_smi(unsigned int addr, unsigned int *dat)
{
addr=(addr<<16)&0x3ff0000;
AMAZON_SW_REG(AMAZON_SW_MDIO_ACC) =(0xC0000000|addr);
while ((AMAZON_SW_REG(AMAZON_SW_MDIO_ACC))&0x80000000){};
*dat=((AMAZON_SW_REG(AMAZON_SW_MDIO_ACC))&0x0FFFF);
return 0;
}
#endif
static int ifx_sw_read_adm6996i(unsigned int addr, unsigned int *dat)
{
unsigned int op;
ifx_gpio_init();
ifx_mdcs_hi();
udelay(ADM_SW_CS_DELAY);
ifx_mdcs_lo();
ifx_mdc_lo();
ifx_mdio_lo();
udelay(ADM_SW_CS_DELAY);
/* preamble, 32 bit 1 */
ifx_mdio_hi();
op = ADM_SW_BIT_MASK_32;
while (op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
/* command start (01b) */
op = ADM_SW_BIT_MASK_2;
while (op)
{
if (op & ADM_SW_SMI_START)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* read command (10b) */
op = ADM_SW_BIT_MASK_2;
while (op)
{
if (op & ADM_SW_SMI_READ)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* send address A9 ~ A0 */
op = ADM_SW_BIT_MASK_10;
while (op)
{
if (op & addr)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* turnaround bits */
op = ADM_SW_BIT_MASK_2;
ifx_mdio_hi();
while (op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
udelay(ADM_SW_MDC_DOWN_DELAY);
/* set MDIO pin to input mode */
ifx_mdio_mode(ADM_SW_MDIO_INPUT);
/* start read data */
*dat = 0;
//adm6996i op = ADM_SW_BIT_MASK_32;
op = ADM_SW_BIT_MASK_16;//adm6996i
while (op)
{
*dat <<= 1;
if (ifx_sw_mdio_readbit()) *dat |= 1;
ifx_sw_mdc_toggle();
op >>= 1;
}
/* set MDIO to output mode */
ifx_mdio_mode(ADM_SW_MDIO_OUTPUT);
/* dummy clock */
op = ADM_SW_BIT_MASK_4;
ifx_mdio_lo();
while(op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
ifx_mdc_lo();
ifx_mdio_lo();
ifx_mdcs_hi();
/* EEPROM registers */
//adm6996i if (!(addr & 0x200))
//adm6996i {
//adm6996i if (addr % 2)
//adm6996i *dat >>= 16;
//adm6996i else
//adm6996i *dat &= 0xffff;
//adm6996i }
return 0;
}
//adm6996
static int ifx_sw_read_adm6996l(unsigned int addr, unsigned int *dat)
{
unsigned int op;
ifx_gpio_init();
ifx_mdcs_hi();
udelay(ADM_SW_CS_DELAY);
ifx_mdcs_lo();
ifx_mdc_lo();
ifx_mdio_lo();
udelay(ADM_SW_CS_DELAY);
/* preamble, 32 bit 1 */
ifx_mdio_hi();
op = ADM_SW_BIT_MASK_32;
while (op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
/* command start (01b) */
op = ADM_SW_BIT_MASK_2;
while (op)
{
if (op & ADM_SW_SMI_START)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* read command (10b) */
op = ADM_SW_BIT_MASK_2;
while (op)
{
if (op & ADM_SW_SMI_READ)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* send address A9 ~ A0 */
op = ADM_SW_BIT_MASK_10;
while (op)
{
if (op & addr)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* turnaround bits */
op = ADM_SW_BIT_MASK_2;
ifx_mdio_hi();
while (op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
udelay(ADM_SW_MDC_DOWN_DELAY);
/* set MDIO pin to input mode */
ifx_mdio_mode(ADM_SW_MDIO_INPUT);
/* start read data */
*dat = 0;
op = ADM_SW_BIT_MASK_32;
while (op)
{
*dat <<= 1;
if (ifx_sw_mdio_readbit()) *dat |= 1;
ifx_sw_mdc_toggle();
op >>= 1;
}
/* set MDIO to output mode */
ifx_mdio_mode(ADM_SW_MDIO_OUTPUT);
/* dummy clock */
op = ADM_SW_BIT_MASK_4;
ifx_mdio_lo();
while(op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
ifx_mdc_lo();
ifx_mdio_lo();
ifx_mdcs_hi();
/* EEPROM registers */
if (!(addr & 0x200))
{
if (addr % 2)
*dat >>= 16;
else
*dat &= 0xffff;
}
return 0;
}
static int ifx_sw_read(unsigned int addr, unsigned int *dat)
{
#ifdef ADM6996_MDC_MDIO_MODE //smi mode ////000001.joelin
ifx_sw_read_adm6996i_smi(addr,dat);
#else
if (adm6996_mode==adm6996i) ifx_sw_read_adm6996i(addr,dat);
else ifx_sw_read_adm6996l(addr,dat);
#endif
return 0;
}
/*
write register to ADM6996 eeprom registers
*/
//for adm6996i -start
#ifdef ADM6996_MDC_MDIO_MODE //smi mode //000001.joelin
static int ifx_sw_write_adm6996i_smi(unsigned int addr, unsigned int dat)
{
AMAZON_SW_REG(AMAZON_SW_MDIO_ACC) = ((addr<<16)&0x3ff0000)|dat|0x80000000;
while ((AMAZON_SW_REG(AMAZON_SW_MDIO_ACC))&0x80000000){};
return 0;
}
#endif //ADM6996_MDC_MDIO_MODE //000001.joelin
static int ifx_sw_write_adm6996i(unsigned int addr, unsigned int dat)
{
unsigned int op;
ifx_gpio_init();
ifx_mdcs_hi();
udelay(ADM_SW_CS_DELAY);
ifx_mdcs_lo();
ifx_mdc_lo();
ifx_mdio_lo();
udelay(ADM_SW_CS_DELAY);
/* preamble, 32 bit 1 */
ifx_mdio_hi();
op = ADM_SW_BIT_MASK_32;
while (op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
/* command start (01b) */
op = ADM_SW_BIT_MASK_2;
while (op)
{
if (op & ADM_SW_SMI_START)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* write command (01b) */
op = ADM_SW_BIT_MASK_2;
while (op)
{
if (op & ADM_SW_SMI_WRITE)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* send address A9 ~ A0 */
op = ADM_SW_BIT_MASK_10;
while (op)
{
if (op & addr)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* turnaround bits */
op = ADM_SW_BIT_MASK_2;
ifx_mdio_hi();
while (op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
udelay(ADM_SW_MDC_DOWN_DELAY);
/* set MDIO pin to output mode */
ifx_mdio_mode(ADM_SW_MDIO_OUTPUT);
/* start write data */
op = ADM_SW_BIT_MASK_16;
while (op)
{
if (op & dat)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_toggle();
op >>= 1;
}
// /* set MDIO to output mode */
// ifx_mdio_mode(ADM_SW_MDIO_OUTPUT);
/* dummy clock */
op = ADM_SW_BIT_MASK_4;
ifx_mdio_lo();
while(op)
{
ifx_sw_mdc_pulse();
op >>= 1;
}
ifx_mdc_lo();
ifx_mdio_lo();
ifx_mdcs_hi();
/* EEPROM registers */
//adm6996i if (!(addr & 0x200))
//adm6996i {
//adm6996i if (addr % 2)
//adm6996i *dat >>= 16;
//adm6996i else
//adm6996i *dat &= 0xffff;
//adm6996i }
return 0;
}
//for adm6996i-end
static int ifx_sw_write_adm6996l(unsigned int addr, unsigned int dat)
{
unsigned int op;
ifx_gpio_init();
/* enable write */
ifx_sw_eeprom_write_enable();
/* chip select */
ifx_mdcs_hi();
udelay(ADM_SW_CS_DELAY);
/* issue write command */
/* start bit */
ifx_mdio_hi();
ifx_sw_mdc_pulse();
/* EEPROM write command */
op = ADM_SW_BIT_MASK_2;
while (op)
{
if (op & ADM_SW_EEPROM_WRITE)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_pulse();
op >>= 1;
}
/* send address A7 ~ A0 */
op = ADM_SW_BIT_MASK_1 << (EEPROM_TYPE - 1);
while (op)
{
if (op & addr)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_toggle();
op >>= 1;
}
/* start write data */
op = ADM_SW_BIT_MASK_16;
while (op)
{
if (op & dat)
ifx_mdio_hi();
else
ifx_mdio_lo();
ifx_sw_mdc_toggle();
op >>= 1;
}
/* disable cs & wait 1 clock */
ifx_mdcs_lo();
udelay(ADM_SW_CS_DELAY);
ifx_sw_mdc_toggle();
ifx_sw_eeprom_write_disable();
return 0;
}
static int ifx_sw_write(unsigned int addr, unsigned int dat)
{
#ifdef ADM6996_MDC_MDIO_MODE //smi mode ////000001.joelin
ifx_sw_write_adm6996i_smi(addr,dat);
#else //000001.joelin
if (adm6996_mode==adm6996i) ifx_sw_write_adm6996i(addr,dat);
else ifx_sw_write_adm6996l(addr,dat);
#endif //000001.joelin
return 0;
}
/*
do switch PHY reset
*/
int ifx_sw_reset(void)
{
/* reset PHY */
ifx_sw_write(ADM_SW_PHY_RESET, 0);
return 0;
}
/* 509201:linmars start */
#if 0
/*
check port status
*/
int ifx_check_port_status(int port)
{
unsigned int val;
if ((port < 0) || (port > ADM_SW_MAX_PORT_NUM))
{
ifx_printf(("error on port number (%d)!!\n", port));
return -1;
}
ifx_sw_read(ifx_sw_conf[port], &val);
if (ifx_sw_conf[port]%2) val >>= 16;
/* only 16bits are effective */
val &= 0xFFFF;
ifx_printf(("Port %d status (%.8x): \n", port, val));
if (val & ADM_SW_PORT_FLOWCTL)
ifx_printf(("\t802.3x flow control supported!\n"));
else
ifx_printf(("\t802.3x flow control not supported!\n"));
if (val & ADM_SW_PORT_AN)
ifx_printf(("\tAuto negotiation ON!\n"));
else
ifx_printf(("\tAuto negotiation OFF!\n"));
if (val & ADM_SW_PORT_100M)
ifx_printf(("\tLink at 100M!\n"));
else
ifx_printf(("\tLink at 10M!\n"));
if (val & ADM_SW_PORT_FULL)
ifx_printf(("\tFull duplex!\n"));
else
ifx_printf(("\tHalf duplex!\n"));
if (val & ADM_SW_PORT_DISABLE)
ifx_printf(("\tPort disabled!\n"));
else
ifx_printf(("\tPort enabled!\n"));
if (val & ADM_SW_PORT_TOS)
ifx_printf(("\tTOS enabled!\n"));
else
ifx_printf(("\tTOS disabled!\n"));
if (val & ADM_SW_PORT_PPRI)
ifx_printf(("\tPort priority first!\n"));
else
ifx_printf(("\tVLAN or TOS priority first!\n"));
if (val & ADM_SW_PORT_MDIX)
ifx_printf(("\tAuto MDIX!\n"));
else
ifx_printf(("\tNo auto MDIX\n"));
ifx_printf(("\tPVID: %d\n", \
((val >> ADM_SW_PORT_PVID_SHIFT)&ifx_sw_bits[ADM_SW_PORT_PVID_BITS])));
return 0;
}
/*
initialize a VLAN
clear all VLAN bits
*/
int ifx_sw_vlan_init(int vlanid)
{
ifx_sw_write(ADM_SW_VLAN0_CONF + vlanid, 0);
return 0;
}
/*
add a port to certain vlan
*/
int ifx_sw_vlan_add(int port, int vlanid)
{
int reg = 0;
if ((port < 0) || (port > ADM_SW_MAX_PORT_NUM) || (vlanid < 0) ||
(vlanid > ADM_SW_MAX_VLAN_NUM))
{
ifx_printf(("Port number or VLAN number ERROR!!\n"));
return -1;
}
ifx_sw_read(ADM_SW_VLAN0_CONF + vlanid, ®);
reg |= (1 << ifx_sw_vlan_port[port]);
ifx_sw_write(ADM_SW_VLAN0_CONF + vlanid, reg);
return 0;
}
/*
delete a given port from certain vlan
*/
int ifx_sw_vlan_del(int port, int vlanid)
{
unsigned int reg = 0;
if ((port < 0) || (port > ADM_SW_MAX_PORT_NUM) || (vlanid < 0) || (vlanid > ADM_SW_MAX_VLAN_NUM))
{
ifx_printf(("Port number or VLAN number ERROR!!\n"));
return -1;
}
ifx_sw_read(ADM_SW_VLAN0_CONF + vlanid, ®);
reg &= ~(1 << ifx_sw_vlan_port[port]);
ifx_sw_write(ADM_SW_VLAN0_CONF + vlanid, reg);
return 0;
}
/*
default VLAN setting
port 0~3 as untag port and PVID = 1
VLAN1: port 0~3 and port 5 (MII)
*/
static int ifx_sw_init(void)
{
ifx_printf(("Setting default ADM6996 registers... \n"));
/* MAC clone, 802.1q based VLAN */
ifx_sw_write(ADM_SW_VLAN_MODE, 0xff30);
/* auto MDIX, PVID=1, untag */
ifx_sw_write(ADM_SW_PORT0_CONF, 0x840f);
ifx_sw_write(ADM_SW_PORT1_CONF, 0x840f);
ifx_sw_write(ADM_SW_PORT2_CONF, 0x840f);
ifx_sw_write(ADM_SW_PORT3_CONF, 0x840f);
/* auto MDIX, PVID=2, untag */
ifx_sw_write(ADM_SW_PORT5_CONF, 0x880f);
/* port 0~3 & 5 as VLAN1 */
ifx_sw_write(ADM_SW_VLAN0_CONF+1, 0x0155);
return 0;
}
#endif
/* 509201:linmars end */
int adm_open(struct inode *node, struct file *filp)
{
return 0;
}
ssize_t adm_read(struct file *filep, char *buf, size_t count, loff_t *ppos)
{
return count;
}
ssize_t adm_write(struct file *filep, const char *buf, size_t count, loff_t *ppos)
{
return count;
}
/* close */
int adm_release(struct inode *inode, struct file *filp)
{
return 0;
}
/* IOCTL function */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
static long adm_ioctl(struct file *filp, unsigned int cmd, unsigned long args)
#else
static int adm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long args)
#endif
{
PREGRW uREGRW;
unsigned int rtval;
unsigned int val; //6996i
unsigned int control[6] ; //6996i
unsigned int status[6] ; //6996i
PMACENTRY mMACENTRY;//adm6996i
PPROTOCOLFILTER uPROTOCOLFILTER ;///adm6996i
if (_IOC_TYPE(cmd) != ADM_MAGIC)
{
printk("adm_ioctl: IOC_TYPE(%x) != ADM_MAGIC(%x)! \n", _IOC_TYPE(cmd), ADM_MAGIC);
return (-EINVAL);
}
if(_IOC_NR(cmd) >= KEY_IOCTL_MAX_KEY)
{
printk(KERN_WARNING "adm_ioctl: IOC_NR(%x) invalid! \n", _IOC_NR(cmd));
return (-EINVAL);
}
switch (cmd)
{
case ADM_IOCTL_REGRW:
{
uREGRW = (PREGRW)kmalloc(sizeof(REGRW), GFP_KERNEL);
rtval = copy_from_user(uREGRW, (PREGRW)args, sizeof(REGRW));
if (rtval != 0)
{
printk("ADM_IOCTL_REGRW: copy from user FAILED!! \n");
return (-EFAULT);
}
switch(uREGRW->mode)
{
case REG_READ:
uREGRW->value = 0x12345678;//inl(uREGRW->addr);
copy_to_user((PREGRW)args, uREGRW, sizeof(REGRW));
break;
case REG_WRITE:
//outl(uREGRW->value, uREGRW->addr);
break;
default:
printk("No such Register Read/Write function!! \n");
return (-EFAULT);
}
kfree(uREGRW);
break;
}
case ADM_SW_IOCTL_REGRW:
{
unsigned int val = 0xff;
uREGRW = (PREGRW)kmalloc(sizeof(REGRW), GFP_KERNEL);
rtval = copy_from_user(uREGRW, (PREGRW)args, sizeof(REGRW));
if (rtval != 0)
{
printk("ADM_IOCTL_REGRW: copy from user FAILED!! \n");
return (-EFAULT);
}
switch(uREGRW->mode)
{
case REG_READ:
ifx_sw_read(uREGRW->addr, &val);
uREGRW->value = val;
copy_to_user((PREGRW)args, uREGRW, sizeof(REGRW));
break;
case REG_WRITE:
ifx_sw_write(uREGRW->addr, uREGRW->value);
break;
default:
printk("No such Register Read/Write function!! \n");
return (-EFAULT);
}
kfree(uREGRW);
break;
}
/* 509201:linmars start */
#if 0
case ADM_SW_IOCTL_PORTSTS:
for (rtval = 0; rtval < ADM_SW_MAX_PORT_NUM+1; rtval++)
ifx_check_port_status(rtval);
break;
case ADM_SW_IOCTL_INIT:
ifx_sw_init();
break;
#endif
/* 509201:linmars end */
//adm6996i
case ADM_SW_IOCTL_MACENTRY_ADD:
case ADM_SW_IOCTL_MACENTRY_DEL:
case ADM_SW_IOCTL_MACENTRY_GET_INIT:
case ADM_SW_IOCTL_MACENTRY_GET_MORE:
mMACENTRY = (PMACENTRY)kmalloc(sizeof(MACENTRY), GFP_KERNEL);
rtval = copy_from_user(mMACENTRY, (PMACENTRY)args, sizeof(MACENTRY));
if (rtval != 0)
{
printk("ADM_SW_IOCTL_MACENTRY: copy from user FAILED!! \n");
return (-EFAULT);
}
control[0]=(mMACENTRY->mac_addr[1]<<8)+mMACENTRY->mac_addr[0] ;
control[1]=(mMACENTRY->mac_addr[3]<<8)+mMACENTRY->mac_addr[2] ;
control[2]=(mMACENTRY->mac_addr[5]<<8)+mMACENTRY->mac_addr[4] ;
control[3]=(mMACENTRY->fid&0xf)+((mMACENTRY->portmap&0x3f)<<4);
if (((mMACENTRY->info_type)&0x01)) control[4]=(mMACENTRY->ctrl.info_ctrl)+0x1000; //static ,info control
else control[4]=((mMACENTRY->ctrl.age_timer)&0xff);//not static ,agetimer
if (cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT) {
//initial the pointer to the first address
val=0x8000;//busy ,status5[15]
while(val&0x8000){ //check busy ?
ifx_sw_read(0x125, &val);
}
control[5]=0x030;//initial the first address
ifx_sw_write(0x11f,control[5]);
val=0x8000;//busy ,status5[15]
while(val&0x8000){ //check busy ?
ifx_sw_read(0x125, &val);
}
} //if (cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT)
if (cmd==ADM_SW_IOCTL_MACENTRY_ADD) control[5]=0x07;//create a new address
else if (cmd==ADM_SW_IOCTL_MACENTRY_DEL) control[5]=0x01f;//erased an existed address
else if ((cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT)||(cmd==ADM_SW_IOCTL_MACENTRY_GET_MORE))
control[5]=0x02c;//search by the mac address field
val=0x8000;//busy ,status5[15]
while(val&0x8000){ //check busy ?
ifx_sw_read(0x125, &val);
}
ifx_sw_write(0x11a,control[0]);
ifx_sw_write(0x11b,control[1]);
ifx_sw_write(0x11c,control[2]);
ifx_sw_write(0x11d,control[3]);
ifx_sw_write(0x11e,control[4]);
ifx_sw_write(0x11f,control[5]);
val=0x8000;//busy ,status5[15]
while(val&0x8000){ //check busy ?
ifx_sw_read(0x125, &val);
}
val=((val&0x7000)>>12);//result ,status5[14:12]
mMACENTRY->result=val;
if (!val) {
printk(" Command OK!! \n");
if ((cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT)||(cmd==ADM_SW_IOCTL_MACENTRY_GET_MORE)) {
ifx_sw_read(0x120,&(status[0]));
ifx_sw_read(0x121,&(status[1]));
ifx_sw_read(0x122,&(status[2]));
ifx_sw_read(0x123,&(status[3]));
ifx_sw_read(0x124,&(status[4]));
ifx_sw_read(0x125,&(status[5]));
mMACENTRY->mac_addr[0]=(status[0]&0x00ff) ;
mMACENTRY->mac_addr[1]=(status[0]&0xff00)>>8 ;
mMACENTRY->mac_addr[2]=(status[1]&0x00ff) ;
mMACENTRY->mac_addr[3]=(status[1]&0xff00)>>8 ;
mMACENTRY->mac_addr[4]=(status[2]&0x00ff) ;
mMACENTRY->mac_addr[5]=(status[2]&0xff00)>>8 ;
mMACENTRY->fid=(status[3]&0xf);
mMACENTRY->portmap=((status[3]>>4)&0x3f);
if (status[5]&0x2) {//static info_ctrl //status5[1]????
mMACENTRY->ctrl.info_ctrl=(status[4]&0x00ff);
mMACENTRY->info_type=1;
}
else {//not static age_timer
mMACENTRY->ctrl.age_timer=(status[4]&0x00ff);
mMACENTRY->info_type=0;
}
//status5[13]???? mMACENTRY->occupy=(status[5]&0x02)>>1;//status5[1]
mMACENTRY->occupy=(status[5]&0x02000)>>13;//status5[13] ???
mMACENTRY->bad=(status[5]&0x04)>>2;//status5[2]
}//if ((cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT)||(cmd==ADM_SW_IOCTL_MACENTRY_GET_MORE))
}
else if (val==0x001)
printk(" All Entry Used!! \n");
else if (val==0x002)
printk(" Entry Not Found!! \n");
else if (val==0x003)
printk(" Try Next Entry!! \n");
else if (val==0x005)
printk(" Command Error!! \n");
else
printk(" UnKnown Error!! \n");
copy_to_user((PMACENTRY)args, mMACENTRY,sizeof(MACENTRY));
break;
case ADM_SW_IOCTL_FILTER_ADD:
case ADM_SW_IOCTL_FILTER_DEL:
case ADM_SW_IOCTL_FILTER_GET:
uPROTOCOLFILTER = (PPROTOCOLFILTER)kmalloc(sizeof(PROTOCOLFILTER), GFP_KERNEL);
rtval = copy_from_user(uPROTOCOLFILTER, (PPROTOCOLFILTER)args, sizeof(PROTOCOLFILTER));
if (rtval != 0)
{
printk("ADM_SW_IOCTL_FILTER_ADD: copy from user FAILED!! \n");
return (-EFAULT);
}
if(cmd==ADM_SW_IOCTL_FILTER_DEL) { //delete filter
uPROTOCOLFILTER->ip_p=00; //delet filter
uPROTOCOLFILTER->action=00; //delete filter
} //delete filter
ifx_sw_read(((uPROTOCOLFILTER->protocol_filter_num/2)+0x68), &val);//rx68~rx6b,protocol filter0~7
if (((uPROTOCOLFILTER->protocol_filter_num)%2)==00){
if(cmd==ADM_SW_IOCTL_FILTER_GET) uPROTOCOLFILTER->ip_p= val&0x00ff;//get filter ip_p
else val=(val&0xff00)|(uPROTOCOLFILTER->ip_p);//set filter ip_p
}
else {
if(cmd==ADM_SW_IOCTL_FILTER_GET) uPROTOCOLFILTER->ip_p= (val>>8);//get filter ip_p
else val=(val&0x00ff)|((uPROTOCOLFILTER->ip_p)<<8);//set filter ip_p
}
if(cmd!=ADM_SW_IOCTL_FILTER_GET) ifx_sw_write(((uPROTOCOLFILTER->protocol_filter_num/2)+0x68), val);//write rx68~rx6b,protocol filter0~7
ifx_sw_read(0x95, &val); //protocol filter action
if(cmd==ADM_SW_IOCTL_FILTER_GET) {
uPROTOCOLFILTER->action= ((val>>(uPROTOCOLFILTER->protocol_filter_num*2))&0x3);//get filter action
copy_to_user((PPROTOCOLFILTER)args, uPROTOCOLFILTER, sizeof(PROTOCOLFILTER));
}
else {
val=(val&(~(0x03<<(uPROTOCOLFILTER->protocol_filter_num*2))))|(((uPROTOCOLFILTER->action)&0x03)<<(uPROTOCOLFILTER->protocol_filter_num*2));
// printk("%d----\n",val);
ifx_sw_write(0x95, val); //write protocol filter action
}
break;
//adm6996i
/* others */
default:
return -EFAULT;
}
/* end of switch */
return 0;
}
/* Santosh: handle IGMP protocol filter ADD/DEL/GET */
int adm_process_protocol_filter_request (unsigned int cmd, PPROTOCOLFILTER uPROTOCOLFILTER)
{
unsigned int val; //6996i
if(cmd==ADM_SW_IOCTL_FILTER_DEL) { //delete filter
uPROTOCOLFILTER->ip_p=00; //delet filter
uPROTOCOLFILTER->action=00; //delete filter
} //delete filter
ifx_sw_read(((uPROTOCOLFILTER->protocol_filter_num/2)+0x68), &val);//rx68~rx6b,protocol filter0~7
if (((uPROTOCOLFILTER->protocol_filter_num)%2)==00){
if(cmd==ADM_SW_IOCTL_FILTER_GET) uPROTOCOLFILTER->ip_p= val&0x00ff;//get filter ip_p
else val=(val&0xff00)|(uPROTOCOLFILTER->ip_p);//set filter ip_p
}
else {
if(cmd==ADM_SW_IOCTL_FILTER_GET) uPROTOCOLFILTER->ip_p= (val>>8);//get filter ip_p
else val=(val&0x00ff)|((uPROTOCOLFILTER->ip_p)<<8);//set filter ip_p
}
if(cmd!=ADM_SW_IOCTL_FILTER_GET) ifx_sw_write(((uPROTOCOLFILTER->protocol_filter_num/2)+0x68), val);//write rx68~rx6b,protocol filter0~7
ifx_sw_read(0x95, &val); //protocol filter action
if(cmd==ADM_SW_IOCTL_FILTER_GET) {
uPROTOCOLFILTER->action= ((val>>(uPROTOCOLFILTER->protocol_filter_num*2))&0x3);//get filter action
}
else {
val=(val&(~(0x03<<(uPROTOCOLFILTER->protocol_filter_num*2))))|(((uPROTOCOLFILTER->action)&0x03)<<(uPROTOCOLFILTER->protocol_filter_num*2));
ifx_sw_write(0x95, val); //write protocol filter action
}
return 0;
}
/* Santosh: function for MAC ENTRY ADD/DEL/GET */
int adm_process_mac_table_request (unsigned int cmd, PMACENTRY mMACENTRY)
{
unsigned int val; //6996i
unsigned int control[6] ; //6996i
unsigned int status[6] ; //6996i
// printk ("adm_process_mac_table_request: enter\n");
control[0]=(mMACENTRY->mac_addr[1]<<8)+mMACENTRY->mac_addr[0] ;
control[1]=(mMACENTRY->mac_addr[3]<<8)+mMACENTRY->mac_addr[2] ;
control[2]=(mMACENTRY->mac_addr[5]<<8)+mMACENTRY->mac_addr[4] ;
control[3]=(mMACENTRY->fid&0xf)+((mMACENTRY->portmap&0x3f)<<4);
if (((mMACENTRY->info_type)&0x01)) control[4]=(mMACENTRY->ctrl.info_ctrl)+0x1000; //static ,info control
else control[4]=((mMACENTRY->ctrl.age_timer)&0xff);//not static ,agetimer
if (cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT) {
//initial the pointer to the first address
val=0x8000;//busy ,status5[15]
while(val&0x8000){ //check busy ?
ifx_sw_read(0x125, &val);
}
control[5]=0x030;//initial the first address
ifx_sw_write(0x11f,control[5]);
val=0x8000;//busy ,status5[15]
while(val&0x8000){ //check busy ?
ifx_sw_read(0x125, &val);
}
} //if (cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT)
if (cmd==ADM_SW_IOCTL_MACENTRY_ADD) control[5]=0x07;//create a new address
else if (cmd==ADM_SW_IOCTL_MACENTRY_DEL) control[5]=0x01f;//erased an existed address
else if ((cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT)||(cmd==ADM_SW_IOCTL_MACENTRY_GET_MORE))
control[5]=0x02c;//search by the mac address field
val=0x8000;//busy ,status5[15]
while(val&0x8000){ //check busy ?
ifx_sw_read(0x125, &val);
}
ifx_sw_write(0x11a,control[0]);
ifx_sw_write(0x11b,control[1]);
ifx_sw_write(0x11c,control[2]);
ifx_sw_write(0x11d,control[3]);
ifx_sw_write(0x11e,control[4]);
ifx_sw_write(0x11f,control[5]);
val=0x8000;//busy ,status5[15]
while(val&0x8000){ //check busy ?
ifx_sw_read(0x125, &val);
}
val=((val&0x7000)>>12);//result ,status5[14:12]
mMACENTRY->result=val;
if (!val) {
printk(" Command OK!! \n");
if ((cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT)||(cmd==ADM_SW_IOCTL_MACENTRY_GET_MORE)) {
ifx_sw_read(0x120,&(status[0]));
ifx_sw_read(0x121,&(status[1]));
ifx_sw_read(0x122,&(status[2]));
ifx_sw_read(0x123,&(status[3]));
ifx_sw_read(0x124,&(status[4]));
ifx_sw_read(0x125,&(status[5]));
mMACENTRY->mac_addr[0]=(status[0]&0x00ff) ;
mMACENTRY->mac_addr[1]=(status[0]&0xff00)>>8 ;
mMACENTRY->mac_addr[2]=(status[1]&0x00ff) ;
mMACENTRY->mac_addr[3]=(status[1]&0xff00)>>8 ;
mMACENTRY->mac_addr[4]=(status[2]&0x00ff) ;
mMACENTRY->mac_addr[5]=(status[2]&0xff00)>>8 ;
mMACENTRY->fid=(status[3]&0xf);
mMACENTRY->portmap=((status[3]>>4)&0x3f);
if (status[5]&0x2) {//static info_ctrl //status5[1]????
mMACENTRY->ctrl.info_ctrl=(status[4]&0x00ff);
mMACENTRY->info_type=1;
}
else {//not static age_timer
mMACENTRY->ctrl.age_timer=(status[4]&0x00ff);
mMACENTRY->info_type=0;
}
//status5[13]???? mMACENTRY->occupy=(status[5]&0x02)>>1;//status5[1]
mMACENTRY->occupy=(status[5]&0x02000)>>13;//status5[13] ???
mMACENTRY->bad=(status[5]&0x04)>>2;//status5[2]
}//if ((cmd==ADM_SW_IOCTL_MACENTRY_GET_INIT)||(cmd==ADM_SW_IOCTL_MACENTRY_GET_MORE))
}
else if (val==0x001)
printk(" All Entry Used!! \n");
else if (val==0x002)
printk(" Entry Not Found!! \n");
else if (val==0x003)
printk(" Try Next Entry!! \n");
else if (val==0x005)
printk(" Command Error!! \n");
else
printk(" UnKnown Error!! \n");
// printk ("adm_process_mac_table_request: Exit\n");
return 0;
}
/* Santosh: End of function for MAC ENTRY ADD/DEL*/
struct file_operations adm_ops =
{
read: adm_read,
write: adm_write,
open: adm_open,
release: adm_release,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
unlocked_ioctl: adm_ioctl
#else
ioctl: adm_ioctl
#endif
};
int adm_proc(char *buf, char **start, off_t offset, int count, int *eof, void *data)
{
int len = 0;
len += sprintf(buf+len, " ************ Registers ************ \n");
*eof = 1;
return len;
}
int __init init_adm6996_module(void)
{
unsigned int val = 000;
unsigned int val1 = 000;
printk("Loading ADM6996 driver... \n");
/* if running on adm5120 */
/* set GPIO 0~2 as adm6996 control pins */
//outl(0x003f3f00, 0x12000028);
/* enable switch port 5 (MII) as RMII mode (5120MAC <-> 6996MAC) */
//outl(0x18a, 0x12000030);
/* group adm5120 port 1 ~ 5 as VLAN0, port 5 & 6(CPU) as VLAN1 */
//outl(0x417e, 0x12000040);
/* end adm5120 fixup */
#ifdef ADM6996_MDC_MDIO_MODE //smi mode //000001.joelin
register_chrdev(69, "adm6996", &adm_ops);
AMAZON_SW_REG(AMAZON_SW_MDIO_CFG) = 0x27be;
AMAZON_SW_REG(AMAZON_SW_EPHY) = 0xfc;
adm6996_mode=adm6996i;
ifx_sw_read(0xa0, &val);
ifx_sw_read(0xa1, &val1);
val=((val1&0x0f)<<16)|val;
printk ("\nADM6996 SMI Mode-");
printk ("Chip ID:%5x \n ", val);
#else //000001.joelin
AMAZON_SW_REG(AMAZON_SW_MDIO_CFG) = 0x2c50;
AMAZON_SW_REG(AMAZON_SW_EPHY) = 0xff;
AMAZON_SW_REG(AMAZON_GPIO_P1_ALTSEL0) &= ~(GPIO_MDIO|GPIO_MDCS|GPIO_MDC);
AMAZON_SW_REG(AMAZON_GPIO_P1_ALTSEL1) &= ~(GPIO_MDIO|GPIO_MDCS|GPIO_MDC);
AMAZON_SW_REG(AMAZON_GPIO_P1_OD) |= (GPIO_MDIO|GPIO_MDCS|GPIO_MDC);
ifx_gpio_init();
register_chrdev(69, "adm6996", &adm_ops);
mdelay(100);
/* create proc entries */
// create_proc_read_entry("admide", 0, NULL, admide_proc, NULL);
//joelin adm6996i support start
adm6996_mode=adm6996i;
ifx_sw_read(0xa0, &val);
adm6996_mode=adm6996l;
ifx_sw_read(0x200, &val1);
// printk ("\n %0x \n",val1);
if ((val&0xfff0)==0x1020) {
printk ("\n ADM6996I .. \n");
adm6996_mode=adm6996i;
}
else if ((val1&0xffffff00)==0x71000) {//71010 or 71020
printk ("\n ADM6996LC .. \n");
adm6996_mode=adm6996lc;
}
else {
printk ("\n ADM6996L .. \n");
adm6996_mode=adm6996l;
}
#endif //ADM6996_MDC_MDIO_MODE //smi mode //000001.joelin
if ((adm6996_mode==adm6996lc)||(adm6996_mode==adm6996i)){
#if 0 /* removed by MarsLin */
ifx_sw_write(0x29,0xc000);
ifx_sw_write(0x30,0x0985);
#else
ifx_sw_read(0xa0, &val);
if (val == 0x1021) // for both 6996LC and 6996I, only AB version need the patch
ifx_sw_write(0x29, 0x9000);
ifx_sw_write(0x30,0x0985);
#endif
}
//joelin adm6996i support end
return 0;
}
void __exit cleanup_adm6996_module(void)
{
printk("Free ADM device driver... \n");
unregister_chrdev(69, "adm6996");
/* remove proc entries */
// remove_proc_entry("admide", NULL);
}
/* MarsLin, add start */
#if defined(CONFIG_IFX_NFEXT_AMAZON_SWITCH_PHYPORT) || defined(CONFIG_IFX_NFEXT_AMAZON_SWITCH_PHYPORT_MODULE)
#define SET_BIT(reg, mask) reg |= (mask)
#define CLEAR_BIT(reg, mask) reg &= (~mask)
static int ifx_hw_reset(void)
{
CLEAR_BIT((*AMAZON_GPIO_P0_ALTSEL0),0x2000);
CLEAR_BIT((*AMAZON_GPIO_P0_ALTSEL1),0x2000);
SET_BIT((*AMAZON_GPIO_P0_OD),0x2000);
SET_BIT((*AMAZON_GPIO_P0_DIR), 0x2000);
CLEAR_BIT((*AMAZON_GPIO_P0_OUT), 0x2000);
mdelay(500);
SET_BIT((*AMAZON_GPIO_P0_OUT), 0x2000);
cleanup_adm6996_module();
return init_adm6996_module();
}
int (*adm6996_hw_reset)(void) = ifx_hw_reset;
EXPORT_SYMBOL(adm6996_hw_reset);
EXPORT_SYMBOL(adm6996_mode);
int (*adm6996_sw_read)(unsigned int addr, unsigned int *data) = ifx_sw_read;
EXPORT_SYMBOL(adm6996_sw_read);
int (*adm6996_sw_write)(unsigned int addr, unsigned int data) = ifx_sw_write;
EXPORT_SYMBOL(adm6996_sw_write);
#endif
/* MarsLin, add end */
/* Santosh: for IGMP proxy/snooping, Begin */
EXPORT_SYMBOL (adm_process_mac_table_request);
EXPORT_SYMBOL (adm_process_protocol_filter_request);
/* Santosh: for IGMP proxy/snooping, End */
MODULE_DESCRIPTION("ADMtek 6996 Driver");
MODULE_AUTHOR("Joe Lin <joe.lin@infineon.com>");
MODULE_LICENSE("GPL");
module_init(init_adm6996_module);
module_exit(cleanup_adm6996_module);
| gpl-2.0 |
faust93/kernel_f93_a5f | arch/arm/mach-omap2/cpuidle44xx.c | 992 | 5969 | /*
* OMAP4+ CPU idle Routines
*
* Copyright (C) 2011-2013 Texas Instruments, Inc.
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Rajendra Nayak <rnayak@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/sched.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/clockchips.h>
#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
#include "common.h"
#include "pm.h"
#include "prm.h"
#include "clockdomain.h"
/* Machine specific information */
struct idle_statedata {
u32 cpu_state;
u32 mpu_logic_state;
u32 mpu_state;
};
static struct idle_statedata omap4_idle_data[] = {
{
.cpu_state = PWRDM_POWER_ON,
.mpu_state = PWRDM_POWER_ON,
.mpu_logic_state = PWRDM_POWER_RET,
},
{
.cpu_state = PWRDM_POWER_OFF,
.mpu_state = PWRDM_POWER_RET,
.mpu_logic_state = PWRDM_POWER_RET,
},
{
.cpu_state = PWRDM_POWER_OFF,
.mpu_state = PWRDM_POWER_RET,
.mpu_logic_state = PWRDM_POWER_OFF,
},
};
static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
static struct clockdomain *cpu_clkdm[NR_CPUS];
static atomic_t abort_barrier;
static bool cpu_done[NR_CPUS];
static struct idle_statedata *state_ptr = &omap4_idle_data[0];
/* Private functions */
/**
* omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
* @dev: cpuidle device
* @drv: cpuidle driver
* @index: the index of state to be entered
*
* Called from the CPUidle framework to program the device to the
* specified low power state selected by the governor.
* Returns the amount of time spent in the low power state.
*/
static int omap_enter_idle_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
omap_do_wfi();
return index;
}
static int omap_enter_idle_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
struct idle_statedata *cx = state_ptr + index;
int cpu_id = smp_processor_id();
/*
* CPU0 has to wait and stay ON until CPU1 is OFF state.
* This is necessary to honour hardware recommondation
* of triggeing all the possible low power modes once CPU1 is
* out of coherency and in OFF mode.
*/
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
cpu_relax();
/*
* CPU1 could have already entered & exited idle
* without hitting off because of a wakeup
* or a failed attempt to hit off mode. Check for
* that here, otherwise we could spin forever
* waiting for CPU1 off.
*/
if (cpu_done[1])
goto fail;
}
}
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
/*
* Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved.
*/
cpu_pm_enter();
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
/*
* Call idle CPU cluster PM enter notifier chain
* to save GIC and wakeupgen context.
*/
if ((cx->mpu_state == PWRDM_POWER_RET) &&
(cx->mpu_logic_state == PWRDM_POWER_OFF))
cpu_cluster_pm_enter();
}
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
cpu_done[dev->cpu] = true;
/* Wakeup CPU1 only if it is not offlined */
if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
clkdm_wakeup(cpu_clkdm[1]);
omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
clkdm_allow_idle(cpu_clkdm[1]);
}
/*
* Call idle CPU PM exit notifier chain to restore
* VFP and per CPU IRQ context.
*/
cpu_pm_exit();
/*
* Call idle CPU cluster PM exit notifier chain
* to restore GIC and wakeupgen context.
*/
if ((cx->mpu_state == PWRDM_POWER_RET) &&
(cx->mpu_logic_state == PWRDM_POWER_OFF))
cpu_cluster_pm_exit();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
cpu_done[dev->cpu] = false;
return index;
}
/*
* For each cpu, setup the broadcast timer because local timers
* stops for the states above C1.
*/
static void omap_setup_broadcast_timer(void *arg)
{
int cpu = smp_processor_id();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
}
static struct cpuidle_driver omap4_idle_driver = {
.name = "omap4_idle",
.owner = THIS_MODULE,
.states = {
{
/* C1 - CPU0 ON + CPU1 ON + MPU ON */
.exit_latency = 2 + 2,
.target_residency = 5,
.flags = CPUIDLE_FLAG_TIME_VALID,
.enter = omap_enter_idle_simple,
.name = "C1",
.desc = "CPUx ON, MPUSS ON"
},
{
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
.exit_latency = 328 + 440,
.target_residency = 960,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
.enter = omap_enter_idle_coupled,
.name = "C2",
.desc = "CPUx OFF, MPUSS CSWR",
},
{
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
.exit_latency = 460 + 518,
.target_residency = 1100,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
.enter = omap_enter_idle_coupled,
.name = "C3",
.desc = "CPUx OFF, MPUSS OSWR",
},
},
.state_count = ARRAY_SIZE(omap4_idle_data),
.safe_state_index = 0,
};
/* Public functions */
/**
* omap4_idle_init - Init routine for OMAP4+ idle
*
* Registers the OMAP4+ specific cpuidle driver to the cpuidle
* framework with the valid set of states.
*/
int __init omap4_idle_init(void)
{
mpu_pd = pwrdm_lookup("mpu_pwrdm");
cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
return -ENODEV;
cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
if (!cpu_clkdm[0] || !cpu_clkdm[1])
return -ENODEV;
/* Configure the broadcast timer on each cpu */
on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
}
| gpl-2.0 |
botioni/aml_linux_kernel | arch/arm/mach-bcmring/clock.c | 1504 | 5119 | /*****************************************************************************
* Copyright 2001 - 2009 Broadcom Corporation. All rights reserved.
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2, available at
* http://www.broadcom.com/licenses/GPLv2.php (the "GPL").
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a
* license other than the GPL, without Broadcom's express prior written
* consent.
*****************************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
#include <mach/csp/hw_cfg.h>
#include <mach/csp/chipcHw_def.h>
#include <mach/csp/chipcHw_reg.h>
#include <mach/csp/chipcHw_inline.h>
#include <asm/clkdev.h>
#include "clock.h"
#define clk_is_primary(x) ((x)->type & CLK_TYPE_PRIMARY)
#define clk_is_pll1(x) ((x)->type & CLK_TYPE_PLL1)
#define clk_is_pll2(x) ((x)->type & CLK_TYPE_PLL2)
#define clk_is_programmable(x) ((x)->type & CLK_TYPE_PROGRAMMABLE)
#define clk_is_bypassable(x) ((x)->type & CLK_TYPE_BYPASSABLE)
#define clk_is_using_xtal(x) ((x)->mode & CLK_MODE_XTAL)
static DEFINE_SPINLOCK(clk_lock);
static void __clk_enable(struct clk *clk)
{
if (!clk)
return;
/* enable parent clock first */
if (clk->parent)
__clk_enable(clk->parent);
if (clk->use_cnt++ == 0) {
if (clk_is_pll1(clk)) { /* PLL1 */
chipcHw_pll1Enable(clk->rate_hz, 0);
} else if (clk_is_pll2(clk)) { /* PLL2 */
chipcHw_pll2Enable(clk->rate_hz);
} else if (clk_is_using_xtal(clk)) { /* source is crystal */
if (!clk_is_primary(clk))
chipcHw_bypassClockEnable(clk->csp_id);
} else { /* source is PLL */
chipcHw_setClockEnable(clk->csp_id);
}
}
}
int clk_enable(struct clk *clk)
{
unsigned long flags;
if (!clk)
return -EINVAL;
spin_lock_irqsave(&clk_lock, flags);
__clk_enable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_enable);
static void __clk_disable(struct clk *clk)
{
if (!clk)
return;
BUG_ON(clk->use_cnt == 0);
if (--clk->use_cnt == 0) {
if (clk_is_pll1(clk)) { /* PLL1 */
chipcHw_pll1Disable();
} else if (clk_is_pll2(clk)) { /* PLL2 */
chipcHw_pll2Disable();
} else if (clk_is_using_xtal(clk)) { /* source is crystal */
if (!clk_is_primary(clk))
chipcHw_bypassClockDisable(clk->csp_id);
} else { /* source is PLL */
chipcHw_setClockDisable(clk->csp_id);
}
}
if (clk->parent)
__clk_disable(clk->parent);
}
void clk_disable(struct clk *clk)
{
unsigned long flags;
if (!clk)
return;
spin_lock_irqsave(&clk_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clk_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
unsigned long clk_get_rate(struct clk *clk)
{
if (!clk)
return 0;
return clk->rate_hz;
}
EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long flags;
unsigned long actual;
unsigned long rate_hz;
if (!clk)
return -EINVAL;
if (!clk_is_programmable(clk))
return -EINVAL;
if (clk->use_cnt)
return -EBUSY;
spin_lock_irqsave(&clk_lock, flags);
actual = clk->parent->rate_hz;
rate_hz = min(actual, rate);
spin_unlock_irqrestore(&clk_lock, flags);
return rate_hz;
}
EXPORT_SYMBOL(clk_round_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
unsigned long flags;
unsigned long actual;
unsigned long rate_hz;
if (!clk)
return -EINVAL;
if (!clk_is_programmable(clk))
return -EINVAL;
if (clk->use_cnt)
return -EBUSY;
spin_lock_irqsave(&clk_lock, flags);
actual = clk->parent->rate_hz;
rate_hz = min(actual, rate);
rate_hz = chipcHw_setClockFrequency(clk->csp_id, rate_hz);
clk->rate_hz = rate_hz;
spin_unlock_irqrestore(&clk_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_set_rate);
struct clk *clk_get_parent(struct clk *clk)
{
if (!clk)
return NULL;
return clk->parent;
}
EXPORT_SYMBOL(clk_get_parent);
int clk_set_parent(struct clk *clk, struct clk *parent)
{
unsigned long flags;
struct clk *old_parent;
if (!clk || !parent)
return -EINVAL;
if (!clk_is_primary(parent) || !clk_is_bypassable(clk))
return -EINVAL;
/* if more than one user, parent is not allowed */
if (clk->use_cnt > 1)
return -EBUSY;
if (clk->parent == parent)
return 0;
spin_lock_irqsave(&clk_lock, flags);
old_parent = clk->parent;
clk->parent = parent;
if (clk_is_using_xtal(parent))
clk->mode |= CLK_MODE_XTAL;
else
clk->mode &= (~CLK_MODE_XTAL);
/* if clock is active */
if (clk->use_cnt != 0) {
clk->use_cnt--;
/* enable clock with the new parent */
__clk_enable(clk);
/* disable the old parent */
__clk_disable(old_parent);
}
spin_unlock_irqrestore(&clk_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_set_parent);
| gpl-2.0 |
alberto-fc/gg | net/ipv6/mip6.c | 1760 | 13638 | /*
* Copyright (C)2003-2006 Helsinki University of Technology
* Copyright (C)2003-2006 USAGI/WIDE Project
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Authors:
* Noriaki TAKAMIYA @USAGI
* Masahide NAKAMURA @USAGI
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/time.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <net/sock.h>
#include <net/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/rawv6.h>
#include <net/xfrm.h>
#include <net/mip6.h>
static inline unsigned int calc_padlen(unsigned int len, unsigned int n)
{
return (n - len + 16) & 0x7;
}
static inline void *mip6_padn(__u8 *data, __u8 padlen)
{
if (!data)
return NULL;
if (padlen == 1) {
data[0] = IPV6_TLV_PAD0;
} else if (padlen > 1) {
data[0] = IPV6_TLV_PADN;
data[1] = padlen - 2;
if (padlen > 2)
memset(data+2, 0, data[1]);
}
return data + padlen;
}
static inline void mip6_param_prob(struct sk_buff *skb, u8 code, int pos)
{
icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
}
static int mip6_mh_len(int type)
{
int len = 0;
switch (type) {
case IP6_MH_TYPE_BRR:
len = 0;
break;
case IP6_MH_TYPE_HOTI:
case IP6_MH_TYPE_COTI:
case IP6_MH_TYPE_BU:
case IP6_MH_TYPE_BACK:
len = 1;
break;
case IP6_MH_TYPE_HOT:
case IP6_MH_TYPE_COT:
case IP6_MH_TYPE_BERROR:
len = 2;
break;
}
return len;
}
static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
{
struct ip6_mh *mh;
if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) +
((skb_transport_header(skb)[1] + 1) << 3))))
return -1;
mh = (struct ip6_mh *)skb_transport_header(skb);
if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
skb_network_header(skb)));
return -1;
}
if (mh->ip6mh_proto != IPPROTO_NONE) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
mh->ip6mh_proto);
mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
skb_network_header(skb)));
return -1;
}
return 0;
}
struct mip6_report_rate_limiter {
spinlock_t lock;
struct timeval stamp;
int iif;
struct in6_addr src;
struct in6_addr dst;
};
static struct mip6_report_rate_limiter mip6_report_rl = {
.lock = __SPIN_LOCK_UNLOCKED(mip6_report_rl.lock)
};
static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data;
int err = destopt->nexthdr;
spin_lock(&x->lock);
if (!ipv6_addr_equal(&iph->saddr, (struct in6_addr *)x->coaddr) &&
!ipv6_addr_any((struct in6_addr *)x->coaddr))
err = -ENOENT;
spin_unlock(&x->lock);
return err;
}
/* Destination Option Header is inserted.
* IP Header's src address is replaced with Home Address Option in
* Destination Option Header.
*/
static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *iph;
struct ipv6_destopt_hdr *dstopt;
struct ipv6_destopt_hao *hao;
u8 nexthdr;
int len;
skb_push(skb, -skb_network_offset(skb));
iph = ipv6_hdr(skb);
nexthdr = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_DSTOPTS;
dstopt = (struct ipv6_destopt_hdr *)skb_transport_header(skb);
dstopt->nexthdr = nexthdr;
hao = mip6_padn((char *)(dstopt + 1),
calc_padlen(sizeof(*dstopt), 6));
hao->type = IPV6_TLV_HAO;
BUILD_BUG_ON(sizeof(*hao) != 18);
hao->length = sizeof(*hao) - 2;
len = ((char *)hao - (char *)dstopt) + sizeof(*hao);
memcpy(&hao->addr, &iph->saddr, sizeof(hao->addr));
spin_lock_bh(&x->lock);
memcpy(&iph->saddr, x->coaddr, sizeof(iph->saddr));
spin_unlock_bh(&x->lock);
WARN_ON(len != x->props.header_len);
dstopt->hdrlen = (x->props.header_len >> 3) - 1;
return 0;
}
static inline int mip6_report_rl_allow(struct timeval *stamp,
const struct in6_addr *dst,
const struct in6_addr *src, int iif)
{
int allow = 0;
spin_lock_bh(&mip6_report_rl.lock);
if (mip6_report_rl.stamp.tv_sec != stamp->tv_sec ||
mip6_report_rl.stamp.tv_usec != stamp->tv_usec ||
mip6_report_rl.iif != iif ||
!ipv6_addr_equal(&mip6_report_rl.src, src) ||
!ipv6_addr_equal(&mip6_report_rl.dst, dst)) {
mip6_report_rl.stamp.tv_sec = stamp->tv_sec;
mip6_report_rl.stamp.tv_usec = stamp->tv_usec;
mip6_report_rl.iif = iif;
ipv6_addr_copy(&mip6_report_rl.src, src);
ipv6_addr_copy(&mip6_report_rl.dst, dst);
allow = 1;
}
spin_unlock_bh(&mip6_report_rl.lock);
return allow;
}
static int mip6_destopt_reject(struct xfrm_state *x, struct sk_buff *skb,
const struct flowi *fl)
{
struct net *net = xs_net(x);
struct inet6_skb_parm *opt = (struct inet6_skb_parm *)skb->cb;
const struct flowi6 *fl6 = &fl->u.ip6;
struct ipv6_destopt_hao *hao = NULL;
struct xfrm_selector sel;
int offset;
struct timeval stamp;
int err = 0;
if (unlikely(fl6->flowi6_proto == IPPROTO_MH &&
fl6->fl6_mh_type <= IP6_MH_TYPE_MAX))
goto out;
if (likely(opt->dsthao)) {
offset = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
if (likely(offset >= 0))
hao = (struct ipv6_destopt_hao *)
(skb_network_header(skb) + offset);
}
skb_get_timestamp(skb, &stamp);
if (!mip6_report_rl_allow(&stamp, &ipv6_hdr(skb)->daddr,
hao ? &hao->addr : &ipv6_hdr(skb)->saddr,
opt->iif))
goto out;
memset(&sel, 0, sizeof(sel));
memcpy(&sel.daddr, (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
sizeof(sel.daddr));
sel.prefixlen_d = 128;
memcpy(&sel.saddr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
sizeof(sel.saddr));
sel.prefixlen_s = 128;
sel.family = AF_INET6;
sel.proto = fl6->flowi6_proto;
sel.dport = xfrm_flowi_dport(fl, &fl6->uli);
if (sel.dport)
sel.dport_mask = htons(~0);
sel.sport = xfrm_flowi_sport(fl, &fl6->uli);
if (sel.sport)
sel.sport_mask = htons(~0);
sel.ifindex = fl6->flowi6_oif;
err = km_report(net, IPPROTO_DSTOPTS, &sel,
(hao ? (xfrm_address_t *)&hao->addr : NULL));
out:
return err;
}
static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr =
(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
const unsigned char *nh = skb_network_header(skb);
unsigned int packet_len = skb->tail - skb->network_header;
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
while (offset + 1 <= packet_len) {
switch (**nexthdr) {
case NEXTHDR_HOP:
break;
case NEXTHDR_ROUTING:
found_rhdr = 1;
break;
case NEXTHDR_DEST:
/*
* HAO MUST NOT appear more than once.
* XXX: It is better to try to find by the end of
* XXX: packet if HAO exists.
*/
if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
LIMIT_NETDEBUG(KERN_WARNING "mip6: hao exists already, override\n");
return offset;
}
if (found_rhdr)
return offset;
break;
default:
return offset;
}
offset += ipv6_optlen(exthdr);
*nexthdr = &exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
}
return offset;
}
static int mip6_destopt_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
x->id.spi);
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
printk(KERN_INFO "%s: state's mode is not %u: %u\n",
__func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
return -EINVAL;
}
x->props.header_len = sizeof(struct ipv6_destopt_hdr) +
calc_padlen(sizeof(struct ipv6_destopt_hdr), 6) +
sizeof(struct ipv6_destopt_hao);
WARN_ON(x->props.header_len != 24);
return 0;
}
/*
* Do nothing about destroying since it has no specific operation for
* destination options header unlike IPsec protocols.
*/
static void mip6_destopt_destroy(struct xfrm_state *x)
{
}
static const struct xfrm_type mip6_destopt_type =
{
.description = "MIP6DESTOPT",
.owner = THIS_MODULE,
.proto = IPPROTO_DSTOPTS,
.flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_LOCAL_COADDR,
.init_state = mip6_destopt_init_state,
.destructor = mip6_destopt_destroy,
.input = mip6_destopt_input,
.output = mip6_destopt_output,
.reject = mip6_destopt_reject,
.hdr_offset = mip6_destopt_offset,
};
static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
int err = rt2->rt_hdr.nexthdr;
spin_lock(&x->lock);
if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
!ipv6_addr_any((struct in6_addr *)x->coaddr))
err = -ENOENT;
spin_unlock(&x->lock);
return err;
}
/* Routing Header type 2 is inserted.
* IP Header's dst address is replaced with Routing Header's Home Address.
*/
static int mip6_rthdr_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *iph;
struct rt2_hdr *rt2;
u8 nexthdr;
skb_push(skb, -skb_network_offset(skb));
iph = ipv6_hdr(skb);
nexthdr = *skb_mac_header(skb);
*skb_mac_header(skb) = IPPROTO_ROUTING;
rt2 = (struct rt2_hdr *)skb_transport_header(skb);
rt2->rt_hdr.nexthdr = nexthdr;
rt2->rt_hdr.hdrlen = (x->props.header_len >> 3) - 1;
rt2->rt_hdr.type = IPV6_SRCRT_TYPE_2;
rt2->rt_hdr.segments_left = 1;
memset(&rt2->reserved, 0, sizeof(rt2->reserved));
WARN_ON(rt2->rt_hdr.hdrlen != 2);
memcpy(&rt2->addr, &iph->daddr, sizeof(rt2->addr));
spin_lock_bh(&x->lock);
memcpy(&iph->daddr, x->coaddr, sizeof(iph->daddr));
spin_unlock_bh(&x->lock);
return 0;
}
static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr =
(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
const unsigned char *nh = skb_network_header(skb);
unsigned int packet_len = skb->tail - skb->network_header;
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
while (offset + 1 <= packet_len) {
switch (**nexthdr) {
case NEXTHDR_HOP:
break;
case NEXTHDR_ROUTING:
if (offset + 3 <= packet_len) {
struct ipv6_rt_hdr *rt;
rt = (struct ipv6_rt_hdr *)(nh + offset);
if (rt->type != 0)
return offset;
}
found_rhdr = 1;
break;
case NEXTHDR_DEST:
if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
return offset;
if (found_rhdr)
return offset;
break;
default:
return offset;
}
offset += ipv6_optlen(exthdr);
*nexthdr = &exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
}
return offset;
}
static int mip6_rthdr_init_state(struct xfrm_state *x)
{
if (x->id.spi) {
printk(KERN_INFO "%s: spi is not 0: %u\n", __func__,
x->id.spi);
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_ROUTEOPTIMIZATION) {
printk(KERN_INFO "%s: state's mode is not %u: %u\n",
__func__, XFRM_MODE_ROUTEOPTIMIZATION, x->props.mode);
return -EINVAL;
}
x->props.header_len = sizeof(struct rt2_hdr);
return 0;
}
/*
* Do nothing about destroying since it has no specific operation for routing
* header type 2 unlike IPsec protocols.
*/
static void mip6_rthdr_destroy(struct xfrm_state *x)
{
}
static const struct xfrm_type mip6_rthdr_type =
{
.description = "MIP6RT",
.owner = THIS_MODULE,
.proto = IPPROTO_ROUTING,
.flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_REMOTE_COADDR,
.init_state = mip6_rthdr_init_state,
.destructor = mip6_rthdr_destroy,
.input = mip6_rthdr_input,
.output = mip6_rthdr_output,
.hdr_offset = mip6_rthdr_offset,
};
static int __init mip6_init(void)
{
printk(KERN_INFO "Mobile IPv6\n");
if (xfrm_register_type(&mip6_destopt_type, AF_INET6) < 0) {
printk(KERN_INFO "%s: can't add xfrm type(destopt)\n", __func__);
goto mip6_destopt_xfrm_fail;
}
if (xfrm_register_type(&mip6_rthdr_type, AF_INET6) < 0) {
printk(KERN_INFO "%s: can't add xfrm type(rthdr)\n", __func__);
goto mip6_rthdr_xfrm_fail;
}
if (rawv6_mh_filter_register(mip6_mh_filter) < 0) {
printk(KERN_INFO "%s: can't add rawv6 mh filter\n", __func__);
goto mip6_rawv6_mh_fail;
}
return 0;
mip6_rawv6_mh_fail:
xfrm_unregister_type(&mip6_rthdr_type, AF_INET6);
mip6_rthdr_xfrm_fail:
xfrm_unregister_type(&mip6_destopt_type, AF_INET6);
mip6_destopt_xfrm_fail:
return -EAGAIN;
}
static void __exit mip6_fini(void)
{
if (rawv6_mh_filter_unregister(mip6_mh_filter) < 0)
printk(KERN_INFO "%s: can't remove rawv6 mh filter\n", __func__);
if (xfrm_unregister_type(&mip6_rthdr_type, AF_INET6) < 0)
printk(KERN_INFO "%s: can't remove xfrm type(rthdr)\n", __func__);
if (xfrm_unregister_type(&mip6_destopt_type, AF_INET6) < 0)
printk(KERN_INFO "%s: can't remove xfrm type(destopt)\n", __func__);
}
module_init(mip6_init);
module_exit(mip6_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_DSTOPTS);
MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ROUTING);
| gpl-2.0 |
alexax66/KitKat_kernel_fortunave3g | drivers/net/ethernet/amd/lance.c | 2528 | 41377 | /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
/*
Written/copyright 1993-1998 by Donald Becker.
Copyright 1993 United States Government as represented by the
Director, National Security Agency.
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
Andrey V. Savochkin:
- alignment problem with 1.3.* kernel and some minor changes.
Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
- added support for Linux/Alpha, but removed most of it, because
it worked only for the PCI chip.
- added hook for the 32bit lance driver
- added PCnetPCI II (79C970A) to chip table
Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
- hopefully fix above so Linux/Alpha can use ISA cards too.
8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
v1.12 10/27/97 Module support -djb
v1.14 2/3/98 Module support modified, made PCI support optional -djb
v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
before unregister_netdev() which caused NULL pointer
reference later in the chain (in rtnetlink_fill_ifinfo())
-- Mika Kuoppala <miku@iki.fi>
Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
the 2.1 version of the old driver - Alan Cox
Get rid of check_region, check kmalloc return in lance_probe1
Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
Reworked detection, added support for Racal InterLan EtherBlaster cards
Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
*/
static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <asm/io.h>
#include <asm/dma.h>
static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
static int __init do_lance_probe(struct net_device *dev);
static struct card {
char id_offset14;
char id_offset15;
} cards[] = {
{ //"normal"
.id_offset14 = 0x57,
.id_offset15 = 0x57,
},
{ //NI6510EB
.id_offset14 = 0x52,
.id_offset15 = 0x44,
},
{ //Racal InterLan EtherBlaster
.id_offset14 = 0x52,
.id_offset15 = 0x49,
},
};
#define NUM_CARDS 3
#ifdef LANCE_DEBUG
static int lance_debug = LANCE_DEBUG;
#else
static int lance_debug = 1;
#endif
/*
Theory of Operation
I. Board Compatibility
This device driver is designed for the AMD 79C960, the "PCnet-ISA
single-chip ethernet controller for ISA". This chip is used in a wide
variety of boards from vendors such as Allied Telesis, HP, Kingston,
and Boca. This driver is also intended to work with older AMD 7990
designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
I use the name LANCE to refer to all of the AMD chips, even though it properly
refers only to the original 7990.
II. Board-specific settings
The driver is designed to work the boards that use the faster
bus-master mode, rather than in shared memory mode. (Only older designs
have on-board buffer memory needed to support the slower shared memory mode.)
Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
channel. This driver probes the likely base addresses:
{0x300, 0x320, 0x340, 0x360}.
After the board is found it generates a DMA-timeout interrupt and uses
autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
probed for by enabling each free DMA channel in turn and checking if
initialization succeeds.
The HP-J2405A board is an exception: with this board it is easy to read the
EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
_know_ the base address -- that field is for writing the EEPROM.)
III. Driver operation
IIIa. Ring buffers
The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
the base and length of the data buffer, along with status bits. The length
of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
the buffer length (rather than being directly the buffer length) for
implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
needlessly uses extra space and reduces the chance that an upper layer will
be able to reorder queued Tx packets based on priority. Decreasing the number
of entries makes it more difficult to achieve back-to-back packet transmission
and increases the chance that Rx ring will overflow. (Consider the worst case
of receiving back-to-back minimum-sized packets.)
The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
avoid the administrative overhead. For the Rx side this avoids dynamically
allocating full-sized buffers "just in case", at the expense of a
memory-to-memory data copy for each packet received. For most systems this
is a good tradeoff: the Rx buffer will always be in low memory, the copy
is inexpensive, and it primes the cache for later packet processing. For Tx
the buffers are only used when needed as low-memory bounce buffers.
IIIB. 16M memory limitations.
For the ISA bus master mode all structures used directly by the LANCE,
the initialization block, Rx and Tx rings, and data buffers, must be
accessible from the ISA bus, i.e. in the lower 16M of real memory.
This is a problem for current Linux kernels on >16M machines. The network
devices are initialized after memory initialization, and the kernel doles out
memory from the top of memory downward. The current solution is to have a
special network initialization routine that's called before memory
initialization; this will eventually be generalized for all network devices.
As mentioned before, low-memory "bounce-buffers" are used when needed.
IIIC. Synchronization
The driver runs as two independent, single-threaded flows of control. One
is the send-packet routine, which enforces single-threaded use by the
dev->tbusy flag. The other thread is the interrupt handler, which is single
threaded by the hardware and other software.
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
the 'lp->tx_full' flag.
The interrupt handler has exclusive control over the Rx ring and records stats
from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
we can't avoid the interrupt overhead by having the Tx routine reap the Tx
stats.) After reaping the stats, it marks the queue entry as empty by setting
the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
tx_full and tbusy flags.
*/
/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
That translates to 4 and 4 (16 == 2^^4).
This is a compile-time option for efficiency.
*/
#ifndef LANCE_LOG_TX_BUFFERS
#define LANCE_LOG_TX_BUFFERS 4
#define LANCE_LOG_RX_BUFFERS 4
#endif
#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
#define PKT_BUF_SZ 1544
/* Offsets from base I/O address. */
#define LANCE_DATA 0x10
#define LANCE_ADDR 0x12
#define LANCE_RESET 0x14
#define LANCE_BUS_IF 0x16
#define LANCE_TOTAL_SIZE 0x18
#define TX_TIMEOUT (HZ/5)
/* The LANCE Rx and Tx ring descriptors. */
struct lance_rx_head {
s32 base;
s16 buf_length; /* This length is 2s complement (negative)! */
s16 msg_length; /* This length is "normal". */
};
struct lance_tx_head {
s32 base;
s16 length; /* Length is 2s complement (negative)! */
s16 misc;
};
/* The LANCE initialization block, described in databook. */
struct lance_init_block {
u16 mode; /* Pre-set mode (reg. 15) */
u8 phys_addr[6]; /* Physical ethernet address */
u32 filter[2]; /* Multicast filter (unused). */
/* Receive and transmit ring base, along with extra bits. */
u32 rx_ring; /* Tx and Rx ring base pointers */
u32 tx_ring;
};
struct lance_private {
/* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
struct lance_rx_head rx_ring[RX_RING_SIZE];
struct lance_tx_head tx_ring[TX_RING_SIZE];
struct lance_init_block init_block;
const char *name;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
struct sk_buff* tx_skbuff[TX_RING_SIZE];
/* The addresses of receive-in-place skbuffs. */
struct sk_buff* rx_skbuff[RX_RING_SIZE];
unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
/* Tx low-memory "bounce buffer" address. */
char (*tx_bounce_buffs)[PKT_BUF_SZ];
int cur_rx, cur_tx; /* The next free ring entry */
int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
int dma;
unsigned char chip_version; /* See lance_chip_type. */
spinlock_t devlock;
};
#define LANCE_MUST_PAD 0x00000001
#define LANCE_ENABLE_AUTOSELECT 0x00000002
#define LANCE_MUST_REINIT_RING 0x00000004
#define LANCE_MUST_UNRESET 0x00000008
#define LANCE_HAS_MISSED_FRAME 0x00000010
/* A mapping from the chip ID number to the part number and features.
These are from the datasheets -- in real life the '970 version
reportedly has the same ID as the '965. */
static struct lance_chip_type {
int id_number;
const char *name;
int flags;
} chip_table[] = {
{0x0000, "LANCE 7990", /* Ancient lance chip. */
LANCE_MUST_PAD + LANCE_MUST_UNRESET},
{0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
LANCE_HAS_MISSED_FRAME},
{0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
LANCE_HAS_MISSED_FRAME},
{0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
LANCE_HAS_MISSED_FRAME},
/* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
it the PCnet32. */
{0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
LANCE_HAS_MISSED_FRAME},
{0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
LANCE_HAS_MISSED_FRAME},
{0x0, "PCnet (unknown)",
LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
LANCE_HAS_MISSED_FRAME},
};
enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
Assume yes until we know the memory size. */
static unsigned char lance_need_isa_bounce_buffers = 1;
static int lance_open(struct net_device *dev);
static void lance_init_ring(struct net_device *dev, gfp_t mode);
static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int lance_rx(struct net_device *dev);
static irqreturn_t lance_interrupt(int irq, void *dev_id);
static int lance_close(struct net_device *dev);
static struct net_device_stats *lance_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void lance_tx_timeout (struct net_device *dev);
#ifdef MODULE
#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
static struct net_device *dev_lance[MAX_CARDS];
static int io[MAX_CARDS];
static int dma[MAX_CARDS];
static int irq[MAX_CARDS];
module_param_array(io, int, NULL, 0);
module_param_array(dma, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
int __init init_module(void)
{
struct net_device *dev;
int this_dev, found = 0;
for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
if (io[this_dev] == 0) {
if (this_dev != 0) /* only complain once */
break;
printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
return -EPERM;
}
dev = alloc_etherdev(0);
if (!dev)
break;
dev->irq = irq[this_dev];
dev->base_addr = io[this_dev];
dev->dma = dma[this_dev];
if (do_lance_probe(dev) == 0) {
dev_lance[found++] = dev;
continue;
}
free_netdev(dev);
break;
}
if (found != 0)
return 0;
return -ENXIO;
}
static void cleanup_card(struct net_device *dev)
{
struct lance_private *lp = dev->ml_priv;
if (dev->dma != 4)
free_dma(dev->dma);
release_region(dev->base_addr, LANCE_TOTAL_SIZE);
kfree(lp->tx_bounce_buffs);
kfree((void*)lp->rx_buffs);
kfree(lp);
}
void __exit cleanup_module(void)
{
int this_dev;
for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
struct net_device *dev = dev_lance[this_dev];
if (dev) {
unregister_netdev(dev);
cleanup_card(dev);
free_netdev(dev);
}
}
}
#endif /* MODULE */
MODULE_LICENSE("GPL");
/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
board probes now that kmalloc() can allocate ISA DMA-able regions.
This also allows the LANCE driver to be used as a module.
*/
static int __init do_lance_probe(struct net_device *dev)
{
unsigned int *port;
int result;
if (high_memory <= phys_to_virt(16*1024*1024))
lance_need_isa_bounce_buffers = 0;
for (port = lance_portlist; *port; port++) {
int ioaddr = *port;
struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
"lance-probe");
if (r) {
/* Detect the card with minimal I/O reads */
char offset14 = inb(ioaddr + 14);
int card;
for (card = 0; card < NUM_CARDS; ++card)
if (cards[card].id_offset14 == offset14)
break;
if (card < NUM_CARDS) {/*yes, the first byte matches*/
char offset15 = inb(ioaddr + 15);
for (card = 0; card < NUM_CARDS; ++card)
if ((cards[card].id_offset14 == offset14) &&
(cards[card].id_offset15 == offset15))
break;
}
if (card < NUM_CARDS) { /*Signature OK*/
result = lance_probe1(dev, ioaddr, 0, 0);
if (!result) {
struct lance_private *lp = dev->ml_priv;
int ver = lp->chip_version;
r->name = chip_table[ver].name;
return 0;
}
}
release_region(ioaddr, LANCE_TOTAL_SIZE);
}
}
return -ENODEV;
}
#ifndef MODULE
struct net_device * __init lance_probe(int unit)
{
struct net_device *dev = alloc_etherdev(0);
int err;
if (!dev)
return ERR_PTR(-ENODEV);
sprintf(dev->name, "eth%d", unit);
netdev_boot_setup_check(dev);
err = do_lance_probe(dev);
if (err)
goto out;
return dev;
out:
free_netdev(dev);
return ERR_PTR(err);
}
#endif
static const struct net_device_ops lance_netdev_ops = {
.ndo_open = lance_open,
.ndo_start_xmit = lance_start_xmit,
.ndo_stop = lance_close,
.ndo_get_stats = lance_get_stats,
.ndo_set_rx_mode = set_multicast_list,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
{
struct lance_private *lp;
unsigned long dma_channels; /* Mark spuriously-busy DMA channels */
int i, reset_val, lance_version;
const char *chipname;
/* Flags for specific chips or boards. */
unsigned char hpJ2405A = 0; /* HP ISA adaptor */
int hp_builtin = 0; /* HP on-board ethernet. */
static int did_version; /* Already printed version info. */
unsigned long flags;
int err = -ENOMEM;
void __iomem *bios;
/* First we look for special cases.
Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
There are two HP versions, check the BIOS for the configuration port.
This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
*/
bios = ioremap(0xf00f0, 0x14);
if (!bios)
return -ENOMEM;
if (readw(bios + 0x12) == 0x5048) {
static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99;
/* We can have boards other than the built-in! Verify this is on-board. */
if ((inb(hp_port) & 0xc0) == 0x80 &&
ioaddr_table[inb(hp_port) & 3] == ioaddr)
hp_builtin = hp_port;
}
iounmap(bios);
/* We also recognize the HP Vectra on-board here, but check below. */
hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
inb(ioaddr+2) == 0x09);
/* Reset the LANCE. */
reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
/* The Un-Reset needed is only needed for the real NE2100, and will
confuse the HP board. */
if (!hpJ2405A)
outw(reset_val, ioaddr+LANCE_RESET);
outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
if (inw(ioaddr+LANCE_DATA) != 0x0004)
return -ENODEV;
/* Get the version of the chip. */
outw(88, ioaddr+LANCE_ADDR);
if (inw(ioaddr+LANCE_ADDR) != 88) {
lance_version = 0;
} else { /* Good, it's a newer chip. */
int chip_version = inw(ioaddr+LANCE_DATA);
outw(89, ioaddr+LANCE_ADDR);
chip_version |= inw(ioaddr+LANCE_DATA) << 16;
if (lance_debug > 2)
printk(" LANCE chip version is %#x.\n", chip_version);
if ((chip_version & 0xfff) != 0x003)
return -ENODEV;
chip_version = (chip_version >> 12) & 0xffff;
for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
if (chip_table[lance_version].id_number == chip_version)
break;
}
}
/* We can't allocate private data from alloc_etherdev() because it must
a ISA DMA-able region. */
chipname = chip_table[lance_version].name;
printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
/* There is a 16 byte station address PROM at the base address.
The first six bytes are the station address. */
for (i = 0; i < 6; i++)
dev->dev_addr[i] = inb(ioaddr + i);
printk("%pM", dev->dev_addr);
dev->base_addr = ioaddr;
/* Make certain the data structures used by the LANCE are aligned and DMAble. */
lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
if(lp==NULL)
return -ENODEV;
if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
dev->ml_priv = lp;
lp->name = chipname;
lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
GFP_DMA | GFP_KERNEL);
if (!lp->rx_buffs)
goto out_lp;
if (lance_need_isa_bounce_buffers) {
lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
GFP_DMA | GFP_KERNEL);
if (!lp->tx_bounce_buffs)
goto out_rx;
} else
lp->tx_bounce_buffs = NULL;
lp->chip_version = lance_version;
spin_lock_init(&lp->devlock);
lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
for (i = 0; i < 6; i++)
lp->init_block.phys_addr[i] = dev->dev_addr[i];
lp->init_block.filter[0] = 0x00000000;
lp->init_block.filter[1] = 0x00000000;
lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
outw(0x0001, ioaddr+LANCE_ADDR);
inw(ioaddr+LANCE_ADDR);
outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
outw(0x0002, ioaddr+LANCE_ADDR);
inw(ioaddr+LANCE_ADDR);
outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
outw(0x0000, ioaddr+LANCE_ADDR);
inw(ioaddr+LANCE_ADDR);
if (irq) { /* Set iff PCI card. */
dev->dma = 4; /* Native bus-master, no DMA channel needed. */
dev->irq = irq;
} else if (hp_builtin) {
static const char dma_tbl[4] = {3, 5, 6, 0};
static const char irq_tbl[4] = {3, 4, 5, 9};
unsigned char port_val = inb(hp_builtin);
dev->dma = dma_tbl[(port_val >> 4) & 3];
dev->irq = irq_tbl[(port_val >> 2) & 3];
printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
} else if (hpJ2405A) {
static const char dma_tbl[4] = {3, 5, 6, 7};
static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
short reset_val = inw(ioaddr+LANCE_RESET);
dev->dma = dma_tbl[(reset_val >> 2) & 3];
dev->irq = irq_tbl[(reset_val >> 4) & 7];
printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
} else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
short bus_info;
outw(8, ioaddr+LANCE_ADDR);
bus_info = inw(ioaddr+LANCE_BUS_IF);
dev->dma = bus_info & 0x07;
dev->irq = (bus_info >> 4) & 0x0F;
} else {
/* The DMA channel may be passed in PARAM1. */
if (dev->mem_start & 0x07)
dev->dma = dev->mem_start & 0x07;
}
if (dev->dma == 0) {
/* Read the DMA channel status register, so that we can avoid
stuck DMA channels in the DMA detection below. */
dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
(inb(DMA2_STAT_REG) & 0xf0);
}
err = -ENODEV;
if (dev->irq >= 2)
printk(" assigned IRQ %d", dev->irq);
else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
unsigned long irq_mask;
/* To auto-IRQ we enable the initialization-done and DMA error
interrupts. For ISA boards we get a DMA error, but VLB and PCI
boards will work. */
irq_mask = probe_irq_on();
/* Trigger an initialization just for the interrupt. */
outw(0x0041, ioaddr+LANCE_DATA);
mdelay(20);
dev->irq = probe_irq_off(irq_mask);
if (dev->irq)
printk(", probed IRQ %d", dev->irq);
else {
printk(", failed to detect IRQ line.\n");
goto out_tx;
}
/* Check for the initialization done bit, 0x0100, which means
that we don't need a DMA channel. */
if (inw(ioaddr+LANCE_DATA) & 0x0100)
dev->dma = 4;
}
if (dev->dma == 4) {
printk(", no DMA needed.\n");
} else if (dev->dma) {
if (request_dma(dev->dma, chipname)) {
printk("DMA %d allocation failed.\n", dev->dma);
goto out_tx;
} else
printk(", assigned DMA %d.\n", dev->dma);
} else { /* OK, we have to auto-DMA. */
for (i = 0; i < 4; i++) {
static const char dmas[] = { 5, 6, 7, 3 };
int dma = dmas[i];
int boguscnt;
/* Don't enable a permanently busy DMA channel, or the machine
will hang. */
if (test_bit(dma, &dma_channels))
continue;
outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
if (request_dma(dma, chipname))
continue;
flags=claim_dma_lock();
set_dma_mode(dma, DMA_MODE_CASCADE);
enable_dma(dma);
release_dma_lock(flags);
/* Trigger an initialization. */
outw(0x0001, ioaddr+LANCE_DATA);
for (boguscnt = 100; boguscnt > 0; --boguscnt)
if (inw(ioaddr+LANCE_DATA) & 0x0900)
break;
if (inw(ioaddr+LANCE_DATA) & 0x0100) {
dev->dma = dma;
printk(", DMA %d.\n", dev->dma);
break;
} else {
flags=claim_dma_lock();
disable_dma(dma);
release_dma_lock(flags);
free_dma(dma);
}
}
if (i == 4) { /* Failure: bail. */
printk("DMA detection failed.\n");
goto out_tx;
}
}
if (lance_version == 0 && dev->irq == 0) {
/* We may auto-IRQ now that we have a DMA channel. */
/* Trigger an initialization just for the interrupt. */
unsigned long irq_mask;
irq_mask = probe_irq_on();
outw(0x0041, ioaddr+LANCE_DATA);
mdelay(40);
dev->irq = probe_irq_off(irq_mask);
if (dev->irq == 0) {
printk(" Failed to detect the 7990 IRQ line.\n");
goto out_dma;
}
printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
}
if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
/* Turn on auto-select of media (10baseT or BNC) so that the user
can watch the LEDs even if the board isn't opened. */
outw(0x0002, ioaddr+LANCE_ADDR);
/* Don't touch 10base2 power bit. */
outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
}
if (lance_debug > 0 && did_version++ == 0)
printk(version);
/* The LANCE-specific entries in the device structure. */
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
err = register_netdev(dev);
if (err)
goto out_dma;
return 0;
out_dma:
if (dev->dma != 4)
free_dma(dev->dma);
out_tx:
kfree(lp->tx_bounce_buffs);
out_rx:
kfree((void*)lp->rx_buffs);
out_lp:
kfree(lp);
return err;
}
static int
lance_open(struct net_device *dev)
{
struct lance_private *lp = dev->ml_priv;
int ioaddr = dev->base_addr;
int i;
if (dev->irq == 0 ||
request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
return -EAGAIN;
}
/* We used to allocate DMA here, but that was silly.
DMA lines can't be shared! We now permanently allocate them. */
/* Reset the LANCE */
inw(ioaddr+LANCE_RESET);
/* The DMA controller is used as a no-operation slave, "cascade mode". */
if (dev->dma != 4) {
unsigned long flags=claim_dma_lock();
enable_dma(dev->dma);
set_dma_mode(dev->dma, DMA_MODE_CASCADE);
release_dma_lock(flags);
}
/* Un-Reset the LANCE, needed only for the NE2100. */
if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
outw(0, ioaddr+LANCE_RESET);
if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
/* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
outw(0x0002, ioaddr+LANCE_ADDR);
/* Only touch autoselect bit. */
outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
}
if (lance_debug > 1)
printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
dev->name, dev->irq, dev->dma,
(u32) isa_virt_to_bus(lp->tx_ring),
(u32) isa_virt_to_bus(lp->rx_ring),
(u32) isa_virt_to_bus(&lp->init_block));
lance_init_ring(dev, GFP_KERNEL);
/* Re-initialize the LANCE, and start it when done. */
outw(0x0001, ioaddr+LANCE_ADDR);
outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
outw(0x0002, ioaddr+LANCE_ADDR);
outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
outw(0x0004, ioaddr+LANCE_ADDR);
outw(0x0915, ioaddr+LANCE_DATA);
outw(0x0000, ioaddr+LANCE_ADDR);
outw(0x0001, ioaddr+LANCE_DATA);
netif_start_queue (dev);
i = 0;
while (i++ < 100)
if (inw(ioaddr+LANCE_DATA) & 0x0100)
break;
/*
* We used to clear the InitDone bit, 0x0100, here but Mark Stockton
* reports that doing so triggers a bug in the '974.
*/
outw(0x0042, ioaddr+LANCE_DATA);
if (lance_debug > 2)
printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
return 0; /* Always succeed */
}
/* The LANCE has been halted for one reason or another (busmaster memory
arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
etc.). Modern LANCE variants always reload their ring-buffer
configuration when restarted, so we must reinitialize our ring
context before restarting. As part of this reinitialization,
find all packets still on the Tx ring and pretend that they had been
sent (in effect, drop the packets on the floor) - the higher-level
protocols will time out and retransmit. It'd be better to shuffle
these skbs to a temp list and then actually re-Tx them after
restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
*/
static void
lance_purge_ring(struct net_device *dev)
{
struct lance_private *lp = dev->ml_priv;
int i;
/* Free all the skbuffs in the Rx and Tx queues. */
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = lp->rx_skbuff[i];
lp->rx_skbuff[i] = NULL;
lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
if (skb)
dev_kfree_skb_any(skb);
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (lp->tx_skbuff[i]) {
dev_kfree_skb_any(lp->tx_skbuff[i]);
lp->tx_skbuff[i] = NULL;
}
}
}
/* Initialize the LANCE Rx and Tx rings. */
static void
lance_init_ring(struct net_device *dev, gfp_t gfp)
{
struct lance_private *lp = dev->ml_priv;
int i;
lp->cur_rx = lp->cur_tx = 0;
lp->dirty_rx = lp->dirty_tx = 0;
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
void *rx_buff;
skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
lp->rx_skbuff[i] = skb;
if (skb)
rx_buff = skb->data;
else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
if (rx_buff == NULL)
lp->rx_ring[i].base = 0;
else
lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
}
/* The Tx buffer address is filled in as needed, but we do need to clear
the upper ownership bit. */
for (i = 0; i < TX_RING_SIZE; i++) {
lp->tx_skbuff[i] = NULL;
lp->tx_ring[i].base = 0;
}
lp->init_block.mode = 0x0000;
for (i = 0; i < 6; i++)
lp->init_block.phys_addr[i] = dev->dev_addr[i];
lp->init_block.filter[0] = 0x00000000;
lp->init_block.filter[1] = 0x00000000;
lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
}
static void
lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
{
struct lance_private *lp = dev->ml_priv;
if (must_reinit ||
(chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
lance_purge_ring(dev);
lance_init_ring(dev, GFP_ATOMIC);
}
outw(0x0000, dev->base_addr + LANCE_ADDR);
outw(csr0_bits, dev->base_addr + LANCE_DATA);
}
static void lance_tx_timeout (struct net_device *dev)
{
struct lance_private *lp = (struct lance_private *) dev->ml_priv;
int ioaddr = dev->base_addr;
outw (0, ioaddr + LANCE_ADDR);
printk ("%s: transmit timed out, status %4.4x, resetting.\n",
dev->name, inw (ioaddr + LANCE_DATA));
outw (0x0004, ioaddr + LANCE_DATA);
dev->stats.tx_errors++;
#ifndef final_version
if (lance_debug > 3) {
int i;
printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
lp->cur_rx);
for (i = 0; i < RX_RING_SIZE; i++)
printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
lp->rx_ring[i].msg_length);
for (i = 0; i < TX_RING_SIZE; i++)
printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
lp->tx_ring[i].base, -lp->tx_ring[i].length,
lp->tx_ring[i].misc);
printk ("\n");
}
#endif
lance_restart (dev, 0x0043, 1);
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue (dev);
}
static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct lance_private *lp = dev->ml_priv;
int ioaddr = dev->base_addr;
int entry;
unsigned long flags;
spin_lock_irqsave(&lp->devlock, flags);
if (lance_debug > 3) {
outw(0x0000, ioaddr+LANCE_ADDR);
printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
inw(ioaddr+LANCE_DATA));
outw(0x0000, ioaddr+LANCE_DATA);
}
/* Fill in a Tx ring entry */
/* Mask to ring buffer boundary. */
entry = lp->cur_tx & TX_RING_MOD_MASK;
/* Caution: the write order is important here, set the base address
with the "ownership" bits last. */
/* The old LANCE chips doesn't automatically pad buffers to min. size. */
if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
if (skb->len < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
goto out;
lp->tx_ring[entry].length = -ETH_ZLEN;
}
else
lp->tx_ring[entry].length = -skb->len;
} else
lp->tx_ring[entry].length = -skb->len;
lp->tx_ring[entry].misc = 0x0000;
dev->stats.tx_bytes += skb->len;
/* If any part of this buffer is >16M we must copy it to a low-memory
buffer. */
if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
if (lance_debug > 5)
printk("%s: bouncing a high-memory packet (%#x).\n",
dev->name, (u32)isa_virt_to_bus(skb->data));
skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
lp->tx_ring[entry].base =
((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
dev_kfree_skb(skb);
} else {
lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
}
lp->cur_tx++;
/* Trigger an immediate send poll. */
outw(0x0000, ioaddr+LANCE_ADDR);
outw(0x0048, ioaddr+LANCE_DATA);
if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
netif_stop_queue(dev);
out:
spin_unlock_irqrestore(&lp->devlock, flags);
return NETDEV_TX_OK;
}
/* The LANCE interrupt handler. */
static irqreturn_t lance_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct lance_private *lp;
int csr0, ioaddr, boguscnt=10;
int must_restart;
ioaddr = dev->base_addr;
lp = dev->ml_priv;
spin_lock (&lp->devlock);
outw(0x00, dev->base_addr + LANCE_ADDR);
while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
--boguscnt >= 0) {
/* Acknowledge all of the current interrupt sources ASAP. */
outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
must_restart = 0;
if (lance_debug > 5)
printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
if (csr0 & 0x0400) /* Rx interrupt */
lance_rx(dev);
if (csr0 & 0x0200) { /* Tx-done interrupt */
int dirty_tx = lp->dirty_tx;
while (dirty_tx < lp->cur_tx) {
int entry = dirty_tx & TX_RING_MOD_MASK;
int status = lp->tx_ring[entry].base;
if (status < 0)
break; /* It still hasn't been Txed */
lp->tx_ring[entry].base = 0;
if (status & 0x40000000) {
/* There was an major error, log it. */
int err_status = lp->tx_ring[entry].misc;
dev->stats.tx_errors++;
if (err_status & 0x0400)
dev->stats.tx_aborted_errors++;
if (err_status & 0x0800)
dev->stats.tx_carrier_errors++;
if (err_status & 0x1000)
dev->stats.tx_window_errors++;
if (err_status & 0x4000) {
/* Ackk! On FIFO errors the Tx unit is turned off! */
dev->stats.tx_fifo_errors++;
/* Remove this verbosity later! */
printk("%s: Tx FIFO error! Status %4.4x.\n",
dev->name, csr0);
/* Restart the chip. */
must_restart = 1;
}
} else {
if (status & 0x18000000)
dev->stats.collisions++;
dev->stats.tx_packets++;
}
/* We must free the original skb if it's not a data-only copy
in the bounce buffer. */
if (lp->tx_skbuff[entry]) {
dev_kfree_skb_irq(lp->tx_skbuff[entry]);
lp->tx_skbuff[entry] = NULL;
}
dirty_tx++;
}
#ifndef final_version
if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
dirty_tx, lp->cur_tx,
netif_queue_stopped(dev) ? "yes" : "no");
dirty_tx += TX_RING_SIZE;
}
#endif
/* if the ring is no longer full, accept more packets */
if (netif_queue_stopped(dev) &&
dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
netif_wake_queue (dev);
lp->dirty_tx = dirty_tx;
}
/* Log misc errors. */
if (csr0 & 0x4000)
dev->stats.tx_errors++; /* Tx babble. */
if (csr0 & 0x1000)
dev->stats.rx_errors++; /* Missed a Rx frame. */
if (csr0 & 0x0800) {
printk("%s: Bus master arbitration failure, status %4.4x.\n",
dev->name, csr0);
/* Restart the chip. */
must_restart = 1;
}
if (must_restart) {
/* stop the chip to clear the error condition, then restart */
outw(0x0000, dev->base_addr + LANCE_ADDR);
outw(0x0004, dev->base_addr + LANCE_DATA);
lance_restart(dev, 0x0002, 0);
}
}
/* Clear any other interrupt, and set interrupt enable. */
outw(0x0000, dev->base_addr + LANCE_ADDR);
outw(0x7940, dev->base_addr + LANCE_DATA);
if (lance_debug > 4)
printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
dev->name, inw(ioaddr + LANCE_ADDR),
inw(dev->base_addr + LANCE_DATA));
spin_unlock (&lp->devlock);
return IRQ_HANDLED;
}
static int
lance_rx(struct net_device *dev)
{
struct lance_private *lp = dev->ml_priv;
int entry = lp->cur_rx & RX_RING_MOD_MASK;
int i;
/* If we own the next entry, it's a new packet. Send it up. */
while (lp->rx_ring[entry].base >= 0) {
int status = lp->rx_ring[entry].base >> 24;
if (status != 0x03) { /* There was an error. */
/* There is a tricky error noted by John Murphy,
<murf@perftech.com> to Russ Nelson: Even with full-sized
buffers it's possible for a jabber packet to use two
buffers, with only the last correctly noting the error. */
if (status & 0x01) /* Only count a general error at the */
dev->stats.rx_errors++; /* end of a packet.*/
if (status & 0x20)
dev->stats.rx_frame_errors++;
if (status & 0x10)
dev->stats.rx_over_errors++;
if (status & 0x08)
dev->stats.rx_crc_errors++;
if (status & 0x04)
dev->stats.rx_fifo_errors++;
lp->rx_ring[entry].base &= 0x03ffffff;
}
else
{
/* Malloc up new buffer, compatible with net3. */
short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
struct sk_buff *skb;
if(pkt_len<60)
{
printk("%s: Runt packet!\n",dev->name);
dev->stats.rx_errors++;
}
else
{
skb = dev_alloc_skb(pkt_len+2);
if (skb == NULL)
{
printk("%s: Memory squeeze, deferring packet.\n", dev->name);
for (i=0; i < RX_RING_SIZE; i++)
if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
break;
if (i > RX_RING_SIZE -2)
{
dev->stats.rx_dropped++;
lp->rx_ring[entry].base |= 0x80000000;
lp->cur_rx++;
}
break;
}
skb_reserve(skb,2); /* 16 byte align */
skb_put(skb,pkt_len); /* Make room */
skb_copy_to_linear_data(skb,
(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
pkt_len);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
}
/* The docs say that the buffer length isn't touched, but Andrew Boyd
of QNX reports that some revs of the 79C965 clear it. */
lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
lp->rx_ring[entry].base |= 0x80000000;
entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
}
/* We should check that at least two ring entries are free. If not,
we should free one and mark stats->rx_dropped++. */
return 0;
}
static int
lance_close(struct net_device *dev)
{
int ioaddr = dev->base_addr;
struct lance_private *lp = dev->ml_priv;
netif_stop_queue (dev);
if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
outw(112, ioaddr+LANCE_ADDR);
dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
}
outw(0, ioaddr+LANCE_ADDR);
if (lance_debug > 1)
printk("%s: Shutting down ethercard, status was %2.2x.\n",
dev->name, inw(ioaddr+LANCE_DATA));
/* We stop the LANCE here -- it occasionally polls
memory if we don't. */
outw(0x0004, ioaddr+LANCE_DATA);
if (dev->dma != 4)
{
unsigned long flags=claim_dma_lock();
disable_dma(dev->dma);
release_dma_lock(flags);
}
free_irq(dev->irq, dev);
lance_purge_ring(dev);
return 0;
}
static struct net_device_stats *lance_get_stats(struct net_device *dev)
{
struct lance_private *lp = dev->ml_priv;
if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
short ioaddr = dev->base_addr;
short saved_addr;
unsigned long flags;
spin_lock_irqsave(&lp->devlock, flags);
saved_addr = inw(ioaddr+LANCE_ADDR);
outw(112, ioaddr+LANCE_ADDR);
dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
outw(saved_addr, ioaddr+LANCE_ADDR);
spin_unlock_irqrestore(&lp->devlock, flags);
}
return &dev->stats;
}
/* Set or clear the multicast filter for this adaptor.
*/
static void set_multicast_list(struct net_device *dev)
{
short ioaddr = dev->base_addr;
outw(0, ioaddr+LANCE_ADDR);
outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
if (dev->flags&IFF_PROMISC) {
outw(15, ioaddr+LANCE_ADDR);
outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
} else {
short multicast_table[4];
int i;
int num_addrs=netdev_mc_count(dev);
if(dev->flags&IFF_ALLMULTI)
num_addrs=1;
/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
for (i = 0; i < 4; i++) {
outw(8 + i, ioaddr+LANCE_ADDR);
outw(multicast_table[i], ioaddr+LANCE_DATA);
}
outw(15, ioaddr+LANCE_ADDR);
outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
}
lance_restart(dev, 0x0142, 0); /* Resume normal operation */
}
| gpl-2.0 |
schuhumi/i9100-proper-linux-kernel | sound/soc/ep93xx/ep93xx-ac97.c | 2528 | 11766 | /*
* ASoC driver for Cirrus Logic EP93xx AC97 controller.
*
* Copyright (c) 2010 Mika Westerberg
*
* Based on s3c-ac97 ASoC driver by Jaswinder Singh.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/ac97_codec.h>
#include <sound/soc.h>
#include <mach/dma.h>
#include "ep93xx-pcm.h"
/*
* Per channel (1-4) registers.
*/
#define AC97CH(n) (((n) - 1) * 0x20)
#define AC97DR(n) (AC97CH(n) + 0x0000)
#define AC97RXCR(n) (AC97CH(n) + 0x0004)
#define AC97RXCR_REN BIT(0)
#define AC97RXCR_RX3 BIT(3)
#define AC97RXCR_RX4 BIT(4)
#define AC97RXCR_CM BIT(15)
#define AC97TXCR(n) (AC97CH(n) + 0x0008)
#define AC97TXCR_TEN BIT(0)
#define AC97TXCR_TX3 BIT(3)
#define AC97TXCR_TX4 BIT(4)
#define AC97TXCR_CM BIT(15)
#define AC97SR(n) (AC97CH(n) + 0x000c)
#define AC97SR_TXFE BIT(1)
#define AC97SR_TXUE BIT(6)
#define AC97RISR(n) (AC97CH(n) + 0x0010)
#define AC97ISR(n) (AC97CH(n) + 0x0014)
#define AC97IE(n) (AC97CH(n) + 0x0018)
/*
* Global AC97 controller registers.
*/
#define AC97S1DATA 0x0080
#define AC97S2DATA 0x0084
#define AC97S12DATA 0x0088
#define AC97RGIS 0x008c
#define AC97GIS 0x0090
#define AC97IM 0x0094
/*
* Common bits for RGIS, GIS and IM registers.
*/
#define AC97_SLOT2RXVALID BIT(1)
#define AC97_CODECREADY BIT(5)
#define AC97_SLOT2TXCOMPLETE BIT(6)
#define AC97EOI 0x0098
#define AC97EOI_WINT BIT(0)
#define AC97EOI_CODECREADY BIT(1)
#define AC97GCR 0x009c
#define AC97GCR_AC97IFE BIT(0)
#define AC97RESET 0x00a0
#define AC97RESET_TIMEDRESET BIT(0)
#define AC97SYNC 0x00a4
#define AC97SYNC_TIMEDSYNC BIT(0)
#define AC97_TIMEOUT msecs_to_jiffies(5)
/**
* struct ep93xx_ac97_info - EP93xx AC97 controller info structure
* @lock: mutex serializing access to the bus (slot 1 & 2 ops)
* @dev: pointer to the platform device dev structure
* @mem: physical memory resource for the registers
* @regs: mapped AC97 controller registers
* @irq: AC97 interrupt number
* @done: bus ops wait here for an interrupt
*/
struct ep93xx_ac97_info {
struct mutex lock;
struct device *dev;
struct resource *mem;
void __iomem *regs;
int irq;
struct completion done;
};
/* currently ALSA only supports a single AC97 device */
static struct ep93xx_ac97_info *ep93xx_ac97_info;
static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = {
.name = "ac97-pcm-out",
.dma_port = EP93XX_DMA_M2P_PORT_AAC1,
};
static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = {
.name = "ac97-pcm-in",
.dma_port = EP93XX_DMA_M2P_PORT_AAC1,
};
static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info,
unsigned reg)
{
return __raw_readl(info->regs + reg);
}
static inline void ep93xx_ac97_write_reg(struct ep93xx_ac97_info *info,
unsigned reg, unsigned val)
{
__raw_writel(val, info->regs + reg);
}
static unsigned short ep93xx_ac97_read(struct snd_ac97 *ac97,
unsigned short reg)
{
struct ep93xx_ac97_info *info = ep93xx_ac97_info;
unsigned short val;
mutex_lock(&info->lock);
ep93xx_ac97_write_reg(info, AC97S1DATA, reg);
ep93xx_ac97_write_reg(info, AC97IM, AC97_SLOT2RXVALID);
if (!wait_for_completion_timeout(&info->done, AC97_TIMEOUT)) {
dev_warn(info->dev, "timeout reading register %x\n", reg);
mutex_unlock(&info->lock);
return -ETIMEDOUT;
}
val = (unsigned short)ep93xx_ac97_read_reg(info, AC97S2DATA);
mutex_unlock(&info->lock);
return val;
}
static void ep93xx_ac97_write(struct snd_ac97 *ac97,
unsigned short reg,
unsigned short val)
{
struct ep93xx_ac97_info *info = ep93xx_ac97_info;
mutex_lock(&info->lock);
/*
* Writes to the codec need to be done so that slot 2 is filled in
* before slot 1.
*/
ep93xx_ac97_write_reg(info, AC97S2DATA, val);
ep93xx_ac97_write_reg(info, AC97S1DATA, reg);
ep93xx_ac97_write_reg(info, AC97IM, AC97_SLOT2TXCOMPLETE);
if (!wait_for_completion_timeout(&info->done, AC97_TIMEOUT))
dev_warn(info->dev, "timeout writing register %x\n", reg);
mutex_unlock(&info->lock);
}
static void ep93xx_ac97_warm_reset(struct snd_ac97 *ac97)
{
struct ep93xx_ac97_info *info = ep93xx_ac97_info;
mutex_lock(&info->lock);
/*
* We are assuming that before this functions gets called, the codec
* BIT_CLK is stopped by forcing the codec into powerdown mode. We can
* control the SYNC signal directly via AC97SYNC register. Using
* TIMEDSYNC the controller will keep the SYNC high > 1us.
*/
ep93xx_ac97_write_reg(info, AC97SYNC, AC97SYNC_TIMEDSYNC);
ep93xx_ac97_write_reg(info, AC97IM, AC97_CODECREADY);
if (!wait_for_completion_timeout(&info->done, AC97_TIMEOUT))
dev_warn(info->dev, "codec warm reset timeout\n");
mutex_unlock(&info->lock);
}
static void ep93xx_ac97_cold_reset(struct snd_ac97 *ac97)
{
struct ep93xx_ac97_info *info = ep93xx_ac97_info;
mutex_lock(&info->lock);
/*
* For doing cold reset, we disable the AC97 controller interface, clear
* WINT and CODECREADY bits, and finally enable the interface again.
*/
ep93xx_ac97_write_reg(info, AC97GCR, 0);
ep93xx_ac97_write_reg(info, AC97EOI, AC97EOI_CODECREADY | AC97EOI_WINT);
ep93xx_ac97_write_reg(info, AC97GCR, AC97GCR_AC97IFE);
/*
* Now, assert the reset and wait for the codec to become ready.
*/
ep93xx_ac97_write_reg(info, AC97RESET, AC97RESET_TIMEDRESET);
ep93xx_ac97_write_reg(info, AC97IM, AC97_CODECREADY);
if (!wait_for_completion_timeout(&info->done, AC97_TIMEOUT))
dev_warn(info->dev, "codec cold reset timeout\n");
/*
* Give the codec some time to come fully out from the reset. This way
* we ensure that the subsequent reads/writes will work.
*/
usleep_range(15000, 20000);
mutex_unlock(&info->lock);
}
static irqreturn_t ep93xx_ac97_interrupt(int irq, void *dev_id)
{
struct ep93xx_ac97_info *info = dev_id;
unsigned status, mask;
/*
* Just mask out the interrupt and wake up the waiting thread.
* Interrupts are cleared via reading/writing to slot 1 & 2 registers by
* the waiting thread.
*/
status = ep93xx_ac97_read_reg(info, AC97GIS);
mask = ep93xx_ac97_read_reg(info, AC97IM);
mask &= ~status;
ep93xx_ac97_write_reg(info, AC97IM, mask);
complete(&info->done);
return IRQ_HANDLED;
}
struct snd_ac97_bus_ops soc_ac97_ops = {
.read = ep93xx_ac97_read,
.write = ep93xx_ac97_write,
.reset = ep93xx_ac97_cold_reset,
.warm_reset = ep93xx_ac97_warm_reset,
};
EXPORT_SYMBOL_GPL(soc_ac97_ops);
static int ep93xx_ac97_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
struct ep93xx_ac97_info *info = snd_soc_dai_get_drvdata(dai);
unsigned v = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/*
* Enable compact mode, TX slots 3 & 4, and the TX FIFO
* itself.
*/
v |= AC97TXCR_CM;
v |= AC97TXCR_TX3 | AC97TXCR_TX4;
v |= AC97TXCR_TEN;
ep93xx_ac97_write_reg(info, AC97TXCR(1), v);
} else {
/*
* Enable compact mode, RX slots 3 & 4, and the RX FIFO
* itself.
*/
v |= AC97RXCR_CM;
v |= AC97RXCR_RX3 | AC97RXCR_RX4;
v |= AC97RXCR_REN;
ep93xx_ac97_write_reg(info, AC97RXCR(1), v);
}
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/*
* As per Cirrus EP93xx errata described below:
*
* http://www.cirrus.com/en/pubs/errata/ER667E2B.pdf
*
* we will wait for the TX FIFO to be empty before
* clearing the TEN bit.
*/
unsigned long timeout = jiffies + AC97_TIMEOUT;
do {
v = ep93xx_ac97_read_reg(info, AC97SR(1));
if (time_after(jiffies, timeout)) {
dev_warn(info->dev, "TX timeout\n");
break;
}
} while (!(v & (AC97SR_TXFE | AC97SR_TXUE)));
/* disable the TX FIFO */
ep93xx_ac97_write_reg(info, AC97TXCR(1), 0);
} else {
/* disable the RX FIFO */
ep93xx_ac97_write_reg(info, AC97RXCR(1), 0);
}
break;
default:
dev_warn(info->dev, "unknown command %d\n", cmd);
return -EINVAL;
}
return 0;
}
static int ep93xx_ac97_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct ep93xx_pcm_dma_params *dma_data;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dma_data = &ep93xx_ac97_pcm_out;
else
dma_data = &ep93xx_ac97_pcm_in;
snd_soc_dai_set_dma_data(dai, substream, dma_data);
return 0;
}
static struct snd_soc_dai_ops ep93xx_ac97_dai_ops = {
.startup = ep93xx_ac97_startup,
.trigger = ep93xx_ac97_trigger,
};
struct snd_soc_dai_driver ep93xx_ac97_dai = {
.name = "ep93xx-ac97",
.id = 0,
.ac97_control = 1,
.playback = {
.stream_name = "AC97 Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "AC97 Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &ep93xx_ac97_dai_ops,
};
static int __devinit ep93xx_ac97_probe(struct platform_device *pdev)
{
struct ep93xx_ac97_info *info;
int ret;
info = kzalloc(sizeof(struct ep93xx_ac97_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, info);
mutex_init(&info->lock);
init_completion(&info->done);
info->dev = &pdev->dev;
info->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!info->mem) {
ret = -ENXIO;
goto fail_free_info;
}
info->irq = platform_get_irq(pdev, 0);
if (!info->irq) {
ret = -ENXIO;
goto fail_free_info;
}
if (!request_mem_region(info->mem->start, resource_size(info->mem),
pdev->name)) {
ret = -EBUSY;
goto fail_free_info;
}
info->regs = ioremap(info->mem->start, resource_size(info->mem));
if (!info->regs) {
ret = -ENOMEM;
goto fail_release_mem;
}
ret = request_irq(info->irq, ep93xx_ac97_interrupt, IRQF_TRIGGER_HIGH,
pdev->name, info);
if (ret)
goto fail_unmap_mem;
ep93xx_ac97_info = info;
platform_set_drvdata(pdev, info);
ret = snd_soc_register_dai(&pdev->dev, &ep93xx_ac97_dai);
if (ret)
goto fail_free_irq;
return 0;
fail_free_irq:
platform_set_drvdata(pdev, NULL);
free_irq(info->irq, info);
fail_unmap_mem:
iounmap(info->regs);
fail_release_mem:
release_mem_region(info->mem->start, resource_size(info->mem));
fail_free_info:
kfree(info);
return ret;
}
static int __devexit ep93xx_ac97_remove(struct platform_device *pdev)
{
struct ep93xx_ac97_info *info = platform_get_drvdata(pdev);
snd_soc_unregister_dai(&pdev->dev);
/* disable the AC97 controller */
ep93xx_ac97_write_reg(info, AC97GCR, 0);
free_irq(info->irq, info);
iounmap(info->regs);
release_mem_region(info->mem->start, resource_size(info->mem));
platform_set_drvdata(pdev, NULL);
kfree(info);
return 0;
}
static struct platform_driver ep93xx_ac97_driver = {
.probe = ep93xx_ac97_probe,
.remove = __devexit_p(ep93xx_ac97_remove),
.driver = {
.name = "ep93xx-ac97",
.owner = THIS_MODULE,
},
};
static int __init ep93xx_ac97_init(void)
{
return platform_driver_register(&ep93xx_ac97_driver);
}
module_init(ep93xx_ac97_init);
static void __exit ep93xx_ac97_exit(void)
{
platform_driver_unregister(&ep93xx_ac97_driver);
}
module_exit(ep93xx_ac97_exit);
MODULE_DESCRIPTION("EP93xx AC97 ASoC Driver");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ep93xx-ac97");
| gpl-2.0 |
yinquan529/pandaboard | fs/cramfs/inode.c | 2784 | 14823 | /*
* Compressed rom filesystem for Linux.
*
* Copyright (C) 1999 Linus Torvalds.
*
* This file is released under the GPL.
*/
/*
* These are the VFS interfaces to the compressed rom filesystem.
* The actual compression is based on zlib, see the other files.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/blkdev.h>
#include <linux/cramfs_fs.h>
#include <linux/slab.h>
#include <linux/cramfs_fs_sb.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
static const struct super_operations cramfs_ops;
static const struct inode_operations cramfs_dir_inode_operations;
static const struct file_operations cramfs_directory_operations;
static const struct address_space_operations cramfs_aops;
static DEFINE_MUTEX(read_mutex);
/* These macros may change in future, to provide better st_ino semantics. */
#define OFFSET(x) ((x)->i_ino)
static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
{
if (!cino->offset)
return offset + 1;
if (!cino->size)
return offset + 1;
/*
* The file mode test fixes buggy mkcramfs implementations where
* cramfs_inode->offset is set to a non zero value for entries
* which did not contain data, like devices node and fifos.
*/
switch (cino->mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
case S_IFLNK:
return cino->offset << 2;
default:
break;
}
return offset + 1;
}
static struct inode *get_cramfs_inode(struct super_block *sb,
const struct cramfs_inode *cramfs_inode, unsigned int offset)
{
struct inode *inode;
static struct timespec zerotime;
inode = iget_locked(sb, cramino(cramfs_inode, offset));
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
switch (cramfs_inode->mode & S_IFMT) {
case S_IFREG:
inode->i_fop = &generic_ro_fops;
inode->i_data.a_ops = &cramfs_aops;
break;
case S_IFDIR:
inode->i_op = &cramfs_dir_inode_operations;
inode->i_fop = &cramfs_directory_operations;
break;
case S_IFLNK:
inode->i_op = &page_symlink_inode_operations;
inode->i_data.a_ops = &cramfs_aops;
break;
default:
init_special_inode(inode, cramfs_inode->mode,
old_decode_dev(cramfs_inode->size));
}
inode->i_mode = cramfs_inode->mode;
inode->i_uid = cramfs_inode->uid;
inode->i_gid = cramfs_inode->gid;
/* if the lower 2 bits are zero, the inode contains data */
if (!(inode->i_ino & 3)) {
inode->i_size = cramfs_inode->size;
inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
}
/* Struct copy intentional */
inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
/* inode->i_nlink is left 1 - arguably wrong for directories,
but it's the best we can do without reading the directory
contents. 1 yields the right result in GNU find, even
without -noleaf option. */
unlock_new_inode(inode);
return inode;
}
/*
* We have our own block cache: don't fill up the buffer cache
* with the rom-image, because the way the filesystem is set
* up the accesses should be fairly regular and cached in the
* page cache and dentry tree anyway..
*
* This also acts as a way to guarantee contiguous areas of up to
* BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to
* worry about end-of-buffer issues even when decompressing a full
* page cache.
*/
#define READ_BUFFERS (2)
/* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
#define NEXT_BUFFER(_ix) ((_ix) ^ 1)
/*
* BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
* data that takes up more space than the original and with unlucky
* alignment.
*/
#define BLKS_PER_BUF_SHIFT (2)
#define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE)
static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
static unsigned buffer_blocknr[READ_BUFFERS];
static struct super_block * buffer_dev[READ_BUFFERS];
static int next_buffer;
/*
* Returns a pointer to a buffer containing at least LEN bytes of
* filesystem starting at byte offset OFFSET into the filesystem.
*/
static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
{
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
struct page *pages[BLKS_PER_BUF];
unsigned i, blocknr, buffer;
unsigned long devsize;
char *data;
if (!len)
return NULL;
blocknr = offset >> PAGE_CACHE_SHIFT;
offset &= PAGE_CACHE_SIZE - 1;
/* Check if an existing buffer already has the data.. */
for (i = 0; i < READ_BUFFERS; i++) {
unsigned int blk_offset;
if (buffer_dev[i] != sb)
continue;
if (blocknr < buffer_blocknr[i])
continue;
blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
blk_offset += offset;
if (blk_offset + len > BUFFER_SIZE)
continue;
return read_buffers[i] + blk_offset;
}
devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;
/* Ok, read in BLKS_PER_BUF pages completely first. */
for (i = 0; i < BLKS_PER_BUF; i++) {
struct page *page = NULL;
if (blocknr + i < devsize) {
page = read_mapping_page_async(mapping, blocknr + i,
NULL);
/* synchronous error? */
if (IS_ERR(page))
page = NULL;
}
pages[i] = page;
}
for (i = 0; i < BLKS_PER_BUF; i++) {
struct page *page = pages[i];
if (page) {
wait_on_page_locked(page);
if (!PageUptodate(page)) {
/* asynchronous error */
page_cache_release(page);
pages[i] = NULL;
}
}
}
buffer = next_buffer;
next_buffer = NEXT_BUFFER(buffer);
buffer_blocknr[buffer] = blocknr;
buffer_dev[buffer] = sb;
data = read_buffers[buffer];
for (i = 0; i < BLKS_PER_BUF; i++) {
struct page *page = pages[i];
if (page) {
memcpy(data, kmap(page), PAGE_CACHE_SIZE);
kunmap(page);
page_cache_release(page);
} else
memset(data, 0, PAGE_CACHE_SIZE);
data += PAGE_CACHE_SIZE;
}
return read_buffers[buffer] + offset;
}
static void cramfs_put_super(struct super_block *sb)
{
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
static int cramfs_remount(struct super_block *sb, int *flags, char *data)
{
*flags |= MS_RDONLY;
return 0;
}
static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
{
int i;
struct cramfs_super super;
unsigned long root_offset;
struct cramfs_sb_info *sbi;
struct inode *root;
sb->s_flags |= MS_RDONLY;
sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
/* Invalidate the read buffers on mount: think disk change.. */
mutex_lock(&read_mutex);
for (i = 0; i < READ_BUFFERS; i++)
buffer_blocknr[i] = -1;
/* Read the first block and get the superblock from it */
memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
mutex_unlock(&read_mutex);
/* Do sanity checks on the superblock */
if (super.magic != CRAMFS_MAGIC) {
/* check for wrong endianess */
if (super.magic == CRAMFS_MAGIC_WEND) {
if (!silent)
printk(KERN_ERR "cramfs: wrong endianess\n");
goto out;
}
/* check at 512 byte offset */
mutex_lock(&read_mutex);
memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
mutex_unlock(&read_mutex);
if (super.magic != CRAMFS_MAGIC) {
if (super.magic == CRAMFS_MAGIC_WEND && !silent)
printk(KERN_ERR "cramfs: wrong endianess\n");
else if (!silent)
printk(KERN_ERR "cramfs: wrong magic\n");
goto out;
}
}
/* get feature flags first */
if (super.flags & ~CRAMFS_SUPPORTED_FLAGS) {
printk(KERN_ERR "cramfs: unsupported filesystem features\n");
goto out;
}
/* Check that the root inode is in a sane state */
if (!S_ISDIR(super.root.mode)) {
printk(KERN_ERR "cramfs: root is not a directory\n");
goto out;
}
/* correct strange, hard-coded permissions of mkcramfs */
super.root.mode |= (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
root_offset = super.root.offset << 2;
if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) {
sbi->size=super.size;
sbi->blocks=super.fsid.blocks;
sbi->files=super.fsid.files;
} else {
sbi->size=1<<28;
sbi->blocks=0;
sbi->files=0;
}
sbi->magic=super.magic;
sbi->flags=super.flags;
if (root_offset == 0)
printk(KERN_INFO "cramfs: empty filesystem");
else if (!(super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
((root_offset != sizeof(struct cramfs_super)) &&
(root_offset != 512 + sizeof(struct cramfs_super))))
{
printk(KERN_ERR "cramfs: bad root offset %lu\n", root_offset);
goto out;
}
/* Set it all up.. */
sb->s_op = &cramfs_ops;
root = get_cramfs_inode(sb, &super.root, 0);
if (IS_ERR(root))
goto out;
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
iput(root);
goto out;
}
return 0;
out:
kfree(sbi);
sb->s_fs_info = NULL;
return -EINVAL;
}
static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = CRAMFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_blocks = CRAMFS_SB(sb)->blocks;
buf->f_bfree = 0;
buf->f_bavail = 0;
buf->f_files = CRAMFS_SB(sb)->files;
buf->f_ffree = 0;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
buf->f_namelen = CRAMFS_MAXPATHLEN;
return 0;
}
/*
* Read a cramfs directory entry.
*/
static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block *sb = inode->i_sb;
char *buf;
unsigned int offset;
int copied;
/* Offset within the thing. */
offset = filp->f_pos;
if (offset >= inode->i_size)
return 0;
/* Directory entries are always 4-byte aligned */
if (offset & 3)
return -EINVAL;
buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
copied = 0;
while (offset < inode->i_size) {
struct cramfs_inode *de;
unsigned long nextoffset;
char *name;
ino_t ino;
mode_t mode;
int namelen, error;
mutex_lock(&read_mutex);
de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
name = (char *)(de+1);
/*
* Namelengths on disk are shifted by two
* and the name padded out to 4-byte boundaries
* with zeroes.
*/
namelen = de->namelen << 2;
memcpy(buf, name, namelen);
ino = cramino(de, OFFSET(inode) + offset);
mode = de->mode;
mutex_unlock(&read_mutex);
nextoffset = offset + sizeof(*de) + namelen;
for (;;) {
if (!namelen) {
kfree(buf);
return -EIO;
}
if (buf[namelen-1])
break;
namelen--;
}
error = filldir(dirent, buf, namelen, offset, ino, mode >> 12);
if (error)
break;
offset = nextoffset;
filp->f_pos = offset;
copied++;
}
kfree(buf);
return 0;
}
/*
* Lookup and fill in the inode data..
*/
static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
unsigned int offset = 0;
struct inode *inode = NULL;
int sorted;
mutex_lock(&read_mutex);
sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
while (offset < dir->i_size) {
struct cramfs_inode *de;
char *name;
int namelen, retval;
int dir_off = OFFSET(dir) + offset;
de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
name = (char *)(de+1);
/* Try to take advantage of sorted directories */
if (sorted && (dentry->d_name.name[0] < name[0]))
break;
namelen = de->namelen << 2;
offset += sizeof(*de) + namelen;
/* Quick check that the name is roughly the right length */
if (((dentry->d_name.len + 3) & ~3) != namelen)
continue;
for (;;) {
if (!namelen) {
inode = ERR_PTR(-EIO);
goto out;
}
if (name[namelen-1])
break;
namelen--;
}
if (namelen != dentry->d_name.len)
continue;
retval = memcmp(dentry->d_name.name, name, namelen);
if (retval > 0)
continue;
if (!retval) {
inode = get_cramfs_inode(dir->i_sb, de, dir_off);
break;
}
/* else (retval < 0) */
if (sorted)
break;
}
out:
mutex_unlock(&read_mutex);
if (IS_ERR(inode))
return ERR_CAST(inode);
d_add(dentry, inode);
return NULL;
}
static int cramfs_readpage(struct file *file, struct page * page)
{
struct inode *inode = page->mapping->host;
u32 maxblock;
int bytes_filled;
void *pgdata;
maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
bytes_filled = 0;
pgdata = kmap(page);
if (page->index < maxblock) {
struct super_block *sb = inode->i_sb;
u32 blkptr_offset = OFFSET(inode) + page->index*4;
u32 start_offset, compr_len;
start_offset = OFFSET(inode) + maxblock*4;
mutex_lock(&read_mutex);
if (page->index)
start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4,
4);
compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) -
start_offset);
mutex_unlock(&read_mutex);
if (compr_len == 0)
; /* hole */
else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) {
pr_err("cramfs: bad compressed blocksize %u\n",
compr_len);
goto err;
} else {
mutex_lock(&read_mutex);
bytes_filled = cramfs_uncompress_block(pgdata,
PAGE_CACHE_SIZE,
cramfs_read(sb, start_offset, compr_len),
compr_len);
mutex_unlock(&read_mutex);
if (unlikely(bytes_filled < 0))
goto err;
}
}
memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
flush_dcache_page(page);
kunmap(page);
SetPageUptodate(page);
unlock_page(page);
return 0;
err:
kunmap(page);
ClearPageUptodate(page);
SetPageError(page);
unlock_page(page);
return 0;
}
static const struct address_space_operations cramfs_aops = {
.readpage = cramfs_readpage
};
/*
* Our operations:
*/
/*
* A directory can only readdir
*/
static const struct file_operations cramfs_directory_operations = {
.llseek = generic_file_llseek,
.read = generic_read_dir,
.readdir = cramfs_readdir,
};
static const struct inode_operations cramfs_dir_inode_operations = {
.lookup = cramfs_lookup,
};
static const struct super_operations cramfs_ops = {
.put_super = cramfs_put_super,
.remount_fs = cramfs_remount,
.statfs = cramfs_statfs,
};
static struct dentry *cramfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, cramfs_fill_super);
}
static struct file_system_type cramfs_fs_type = {
.owner = THIS_MODULE,
.name = "cramfs",
.mount = cramfs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
static int __init init_cramfs_fs(void)
{
int rv;
rv = cramfs_uncompress_init();
if (rv < 0)
return rv;
rv = register_filesystem(&cramfs_fs_type);
if (rv < 0)
cramfs_uncompress_exit();
return rv;
}
static void __exit exit_cramfs_fs(void)
{
cramfs_uncompress_exit();
unregister_filesystem(&cramfs_fs_type);
}
module_init(init_cramfs_fs)
module_exit(exit_cramfs_fs)
MODULE_LICENSE("GPL");
| gpl-2.0 |
StarKissed/recovery-cwm-ekgc100 | drivers/video/via/global.c | 2784 | 1665 | /*
* Copyright 1998-2008 VIA Technologies, Inc. All Rights Reserved.
* Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation;
* either version 2, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even
* the implied warranty of MERCHANTABILITY or FITNESS FOR
* A PARTICULAR PURPOSE.See the GNU General Public License
* for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "global.h"
int viafb_platform_epia_dvi = STATE_OFF;
int viafb_device_lcd_dualedge = STATE_OFF;
int viafb_bus_width = 12;
int viafb_display_hardware_layout = HW_LAYOUT_LCD_DVI;
int viafb_DeviceStatus = CRT_Device;
int viafb_hotplug;
int viafb_refresh = 60;
int viafb_refresh1 = 60;
int viafb_lcd_dsp_method = LCD_EXPANDSION;
int viafb_lcd_mode = LCD_OPENLDI;
int viafb_CRT_ON = 1;
int viafb_DVI_ON;
int viafb_LCD_ON ;
int viafb_LCD2_ON;
int viafb_SAMM_ON;
int viafb_dual_fb;
int viafb_hotplug_Xres = 640;
int viafb_hotplug_Yres = 480;
int viafb_hotplug_bpp = 32;
int viafb_hotplug_refresh = 60;
int viafb_primary_dev = None_Device;
int viafb_lcd_panel_id = LCD_PANEL_ID_MAXIMUM + 1;
struct fb_info *viafbinfo;
struct fb_info *viafbinfo1;
struct viafb_par *viaparinfo;
struct viafb_par *viaparinfo1;
| gpl-2.0 |
nikitines/zte-kernel-roamer2 | fs/ioctl.c | 3040 | 16060 | /*
* linux/fs/ioctl.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>
#include <linux/falloc.h>
#include <asm/ioctls.h>
/* So that the fiemap access checks can't overflow on 32 bit machines. */
#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
/**
* vfs_ioctl - call filesystem specific ioctl methods
* @filp: open file to invoke ioctl method on
* @cmd: ioctl command to execute
* @arg: command-specific argument for ioctl
*
* Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
* returns -ENOTTY.
*
* Returns 0 on success, -errno on error.
*/
static long vfs_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int error = -ENOTTY;
if (!filp->f_op || !filp->f_op->unlocked_ioctl)
goto out;
error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
if (error == -ENOIOCTLCMD)
error = -EINVAL;
out:
return error;
}
static int ioctl_fibmap(struct file *filp, int __user *p)
{
struct address_space *mapping = filp->f_mapping;
int res, block;
/* do we support this mess? */
if (!mapping->a_ops->bmap)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
res = get_user(block, p);
if (res)
return res;
res = mapping->a_ops->bmap(mapping, block);
return put_user(res, p);
}
/**
* fiemap_fill_next_extent - Fiemap helper function
* @fieinfo: Fiemap context passed into ->fiemap
* @logical: Extent logical start offset, in bytes
* @phys: Extent physical start offset, in bytes
* @len: Extent length, in bytes
* @flags: FIEMAP_EXTENT flags that describe this extent
*
* Called from file system ->fiemap callback. Will populate extent
* info as passed in via arguments and copy to user memory. On
* success, extent count on fieinfo is incremented.
*
* Returns 0 on success, -errno on error, 1 if this was the last
* extent that will fit in user array.
*/
#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
u64 phys, u64 len, u32 flags)
{
struct fiemap_extent extent;
struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
/* only count the extents */
if (fieinfo->fi_extents_max == 0) {
fieinfo->fi_extents_mapped++;
return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
}
if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
return 1;
if (flags & SET_UNKNOWN_FLAGS)
flags |= FIEMAP_EXTENT_UNKNOWN;
if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
flags |= FIEMAP_EXTENT_ENCODED;
if (flags & SET_NOT_ALIGNED_FLAGS)
flags |= FIEMAP_EXTENT_NOT_ALIGNED;
memset(&extent, 0, sizeof(extent));
extent.fe_logical = logical;
extent.fe_physical = phys;
extent.fe_length = len;
extent.fe_flags = flags;
dest += fieinfo->fi_extents_mapped;
if (copy_to_user(dest, &extent, sizeof(extent)))
return -EFAULT;
fieinfo->fi_extents_mapped++;
if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
return 1;
return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
}
EXPORT_SYMBOL(fiemap_fill_next_extent);
/**
* fiemap_check_flags - check validity of requested flags for fiemap
* @fieinfo: Fiemap context passed into ->fiemap
* @fs_flags: Set of fiemap flags that the file system understands
*
* Called from file system ->fiemap callback. This will compute the
* intersection of valid fiemap flags and those that the fs supports. That
* value is then compared against the user supplied flags. In case of bad user
* flags, the invalid values will be written into the fieinfo structure, and
* -EBADR is returned, which tells ioctl_fiemap() to return those values to
* userspace. For this reason, a return code of -EBADR should be preserved.
*
* Returns 0 on success, -EBADR on bad flags.
*/
int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags)
{
u32 incompat_flags;
incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags);
if (incompat_flags) {
fieinfo->fi_flags = incompat_flags;
return -EBADR;
}
return 0;
}
EXPORT_SYMBOL(fiemap_check_flags);
static int fiemap_check_ranges(struct super_block *sb,
u64 start, u64 len, u64 *new_len)
{
u64 maxbytes = (u64) sb->s_maxbytes;
*new_len = len;
if (len == 0)
return -EINVAL;
if (start > maxbytes)
return -EFBIG;
/*
* Shrink request scope to what the fs can actually handle.
*/
if (len > maxbytes || (maxbytes - len) < start)
*new_len = maxbytes - start;
return 0;
}
static int ioctl_fiemap(struct file *filp, unsigned long arg)
{
struct fiemap fiemap;
struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
struct fiemap_extent_info fieinfo = { 0, };
struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block *sb = inode->i_sb;
u64 len;
int error;
if (!inode->i_op->fiemap)
return -EOPNOTSUPP;
if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
return -EFAULT;
if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
return -EINVAL;
error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
&len);
if (error)
return error;
fieinfo.fi_flags = fiemap.fm_flags;
fieinfo.fi_extents_max = fiemap.fm_extent_count;
fieinfo.fi_extents_start = ufiemap->fm_extents;
if (fiemap.fm_extent_count != 0 &&
!access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
return -EFAULT;
if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
filemap_write_and_wait(inode->i_mapping);
error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
fiemap.fm_flags = fieinfo.fi_flags;
fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
error = -EFAULT;
return error;
}
#ifdef CONFIG_BLOCK
static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
{
return (offset >> inode->i_blkbits);
}
static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
{
return (blk << inode->i_blkbits);
}
/**
* __generic_block_fiemap - FIEMAP for block based inodes (no locking)
* @inode: the inode to map
* @fieinfo: the fiemap info struct that will be passed back to userspace
* @start: where to start mapping in the inode
* @len: how much space to map
* @get_block: the fs's get_block function
*
* This does FIEMAP for block based inodes. Basically it will just loop
* through get_block until we hit the number of extents we want to map, or we
* go past the end of the file and hit a hole.
*
* If it is possible to have data blocks beyond a hole past @inode->i_size, then
* please do not use this function, it will stop at the first unmapped block
* beyond i_size.
*
* If you use this function directly, you need to do your own locking. Use
* generic_block_fiemap if you want the locking done for you.
*/
int __generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, loff_t start,
loff_t len, get_block_t *get_block)
{
struct buffer_head map_bh;
sector_t start_blk, last_blk;
loff_t isize = i_size_read(inode);
u64 logical = 0, phys = 0, size = 0;
u32 flags = FIEMAP_EXTENT_MERGED;
bool past_eof = false, whole_file = false;
int ret = 0;
ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
if (ret)
return ret;
/*
* Either the i_mutex or other appropriate locking needs to be held
* since we expect isize to not change at all through the duration of
* this call.
*/
if (len >= isize) {
whole_file = true;
len = isize;
}
/*
* Some filesystems can't deal with being asked to map less than
* blocksize, so make sure our len is at least block length.
*/
if (logical_to_blk(inode, len) == 0)
len = blk_to_logical(inode, 1);
start_blk = logical_to_blk(inode, start);
last_blk = logical_to_blk(inode, start + len - 1);
do {
/*
* we set b_size to the total size we want so it will map as
* many contiguous blocks as possible at once
*/
memset(&map_bh, 0, sizeof(struct buffer_head));
map_bh.b_size = len;
ret = get_block(inode, start_blk, &map_bh, 0);
if (ret)
break;
/* HOLE */
if (!buffer_mapped(&map_bh)) {
start_blk++;
/*
* We want to handle the case where there is an
* allocated block at the front of the file, and then
* nothing but holes up to the end of the file properly,
* to make sure that extent at the front gets properly
* marked with FIEMAP_EXTENT_LAST
*/
if (!past_eof &&
blk_to_logical(inode, start_blk) >= isize)
past_eof = 1;
/*
* First hole after going past the EOF, this is our
* last extent
*/
if (past_eof && size) {
flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
} else if (size) {
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size, flags);
size = 0;
}
/* if we have holes up to/past EOF then we're done */
if (start_blk > last_blk || past_eof || ret)
break;
} else {
/*
* We have gone over the length of what we wanted to
* map, and it wasn't the entire file, so add the extent
* we got last time and exit.
*
* This is for the case where say we want to map all the
* way up to the second to the last block in a file, but
* the last block is a hole, making the second to last
* block FIEMAP_EXTENT_LAST. In this case we want to
* see if there is a hole after the second to last block
* so we can mark it properly. If we found data after
* we exceeded the length we were requesting, then we
* are good to go, just add the extent to the fieinfo
* and break
*/
if (start_blk > last_blk && !whole_file) {
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
break;
}
/*
* if size != 0 then we know we already have an extent
* to add, so add it.
*/
if (size) {
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size,
flags);
if (ret)
break;
}
logical = blk_to_logical(inode, start_blk);
phys = blk_to_logical(inode, map_bh.b_blocknr);
size = map_bh.b_size;
flags = FIEMAP_EXTENT_MERGED;
start_blk += logical_to_blk(inode, size);
/*
* If we are past the EOF, then we need to make sure as
* soon as we find a hole that the last extent we found
* is marked with FIEMAP_EXTENT_LAST
*/
if (!past_eof && logical + size >= isize)
past_eof = true;
}
cond_resched();
} while (1);
/* If ret is 1 then we just hit the end of the extent array */
if (ret == 1)
ret = 0;
return ret;
}
EXPORT_SYMBOL(__generic_block_fiemap);
/**
* generic_block_fiemap - FIEMAP for block based inodes
* @inode: The inode to map
* @fieinfo: The mapping information
* @start: The initial block to map
* @len: The length of the extect to attempt to map
* @get_block: The block mapping function for the fs
*
* Calls __generic_block_fiemap to map the inode, after taking
* the inode's mutex lock.
*/
int generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, u64 start,
u64 len, get_block_t *get_block)
{
int ret;
mutex_lock(&inode->i_mutex);
ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
mutex_unlock(&inode->i_mutex);
return ret;
}
EXPORT_SYMBOL(generic_block_fiemap);
#endif /* CONFIG_BLOCK */
/*
* This provides compatibility with legacy XFS pre-allocation ioctls
* which predate the fallocate syscall.
*
* Only the l_start, l_len and l_whence fields of the 'struct space_resv'
* are used here, rest are ignored.
*/
int ioctl_preallocate(struct file *filp, void __user *argp)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct space_resv sr;
if (copy_from_user(&sr, argp, sizeof(sr)))
return -EFAULT;
switch (sr.l_whence) {
case SEEK_SET:
break;
case SEEK_CUR:
sr.l_start += filp->f_pos;
break;
case SEEK_END:
sr.l_start += i_size_read(inode);
break;
default:
return -EINVAL;
}
return do_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
}
static int file_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct inode *inode = filp->f_path.dentry->d_inode;
int __user *p = (int __user *)arg;
switch (cmd) {
case FIBMAP:
return ioctl_fibmap(filp, p);
case FIONREAD:
return put_user(i_size_read(inode) - filp->f_pos, p);
case FS_IOC_RESVSP:
case FS_IOC_RESVSP64:
return ioctl_preallocate(filp, p);
}
return vfs_ioctl(filp, cmd, arg);
}
static int ioctl_fionbio(struct file *filp, int __user *argp)
{
unsigned int flag;
int on, error;
error = get_user(on, argp);
if (error)
return error;
flag = O_NONBLOCK;
#ifdef __sparc__
/* SunOS compatibility item. */
if (O_NONBLOCK != O_NDELAY)
flag |= O_NDELAY;
#endif
spin_lock(&filp->f_lock);
if (on)
filp->f_flags |= flag;
else
filp->f_flags &= ~flag;
spin_unlock(&filp->f_lock);
return error;
}
static int ioctl_fioasync(unsigned int fd, struct file *filp,
int __user *argp)
{
unsigned int flag;
int on, error;
error = get_user(on, argp);
if (error)
return error;
flag = on ? FASYNC : 0;
/* Did FASYNC state change ? */
if ((flag ^ filp->f_flags) & FASYNC) {
if (filp->f_op && filp->f_op->fasync)
/* fasync() adjusts filp->f_flags */
error = filp->f_op->fasync(fd, filp, on);
else
error = -ENOTTY;
}
return error < 0 ? error : 0;
}
static int ioctl_fsfreeze(struct file *filp)
{
struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* If filesystem doesn't support freeze feature, return. */
if (sb->s_op->freeze_fs == NULL)
return -EOPNOTSUPP;
/* Freeze */
return freeze_super(sb);
}
static int ioctl_fsthaw(struct file *filp)
{
struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* Thaw */
return thaw_super(sb);
}
/*
* When you add any new common ioctls to the switches above and below
* please update compat_sys_ioctl() too.
*
* do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
* It's just a simple helper for sys_ioctl and compat_sys_ioctl.
*/
int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
unsigned long arg)
{
int error = 0;
int __user *argp = (int __user *)arg;
struct inode *inode = filp->f_path.dentry->d_inode;
switch (cmd) {
case FIOCLEX:
set_close_on_exec(fd, 1);
break;
case FIONCLEX:
set_close_on_exec(fd, 0);
break;
case FIONBIO:
error = ioctl_fionbio(filp, argp);
break;
case FIOASYNC:
error = ioctl_fioasync(fd, filp, argp);
break;
case FIOQSIZE:
if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
S_ISLNK(inode->i_mode)) {
loff_t res = inode_get_bytes(inode);
error = copy_to_user(argp, &res, sizeof(res)) ?
-EFAULT : 0;
} else
error = -ENOTTY;
break;
case FIFREEZE:
error = ioctl_fsfreeze(filp);
break;
case FITHAW:
error = ioctl_fsthaw(filp);
break;
case FS_IOC_FIEMAP:
return ioctl_fiemap(filp, arg);
case FIGETBSZ:
return put_user(inode->i_sb->s_blocksize, argp);
default:
if (S_ISREG(inode->i_mode))
error = file_ioctl(filp, cmd, arg);
else
error = vfs_ioctl(filp, cmd, arg);
break;
}
return error;
}
SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
{
struct file *filp;
int error = -EBADF;
int fput_needed;
filp = fget_light(fd, &fput_needed);
if (!filp)
goto out;
error = security_file_ioctl(filp, cmd, arg);
if (error)
goto out_fput;
error = do_vfs_ioctl(filp, fd, cmd, arg);
out_fput:
fput_light(filp, fput_needed);
out:
return error;
}
| gpl-2.0 |
Ca1ne/Enoch-Sense-Kernel-old | drivers/acpi/acpica/utxface.c | 3040 | 18356 | /******************************************************************************
*
* Module Name: utxface - External interfaces for "global" ACPI functions
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
#include "acdebug.h"
#include "actables.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
#ifndef ACPI_ASL_COMPILER
/*******************************************************************************
*
* FUNCTION: acpi_initialize_subsystem
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Initializes all global variables. This is the first function
* called, so any early initialization belongs here.
*
******************************************************************************/
acpi_status __init acpi_initialize_subsystem(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_initialize_subsystem);
acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE;
ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace());
/* Initialize the OS-Dependent layer */
status = acpi_os_initialize();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization"));
return_ACPI_STATUS(status);
}
/* Initialize all globals used by the subsystem */
status = acpi_ut_init_globals();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During initialization of globals"));
return_ACPI_STATUS(status);
}
/* Create the default mutex objects */
status = acpi_ut_mutex_initialize();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During Global Mutex creation"));
return_ACPI_STATUS(status);
}
/*
* Initialize the namespace manager and
* the root of the namespace tree
*/
status = acpi_ns_root_initialize();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During Namespace initialization"));
return_ACPI_STATUS(status);
}
/* Initialize the global OSI interfaces list with the static names */
status = acpi_ut_initialize_interfaces();
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"During OSI interfaces initialization"));
return_ACPI_STATUS(status);
}
/* If configured, initialize the AML debugger */
ACPI_DEBUGGER_EXEC(status = acpi_db_initialize());
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_enable_subsystem
*
* PARAMETERS: Flags - Init/enable Options
*
* RETURN: Status
*
* DESCRIPTION: Completes the subsystem initialization including hardware.
* Puts system into ACPI mode if it isn't already.
*
******************************************************************************/
acpi_status acpi_enable_subsystem(u32 flags)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(acpi_enable_subsystem);
/* Enable ACPI mode */
if (!(flags & ACPI_NO_ACPI_ENABLE)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Going into ACPI mode\n"));
acpi_gbl_original_mode = acpi_hw_get_mode();
status = acpi_enable();
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO, "AcpiEnable failed"));
return_ACPI_STATUS(status);
}
}
/*
* Obtain a permanent mapping for the FACS. This is required for the
* Global Lock and the Firmware Waking Vector
*/
status = acpi_tb_initialize_facs();
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
return_ACPI_STATUS(status);
}
/*
* Install the default op_region handlers. These are installed unless
* other handlers have already been installed via the
* install_address_space_handler interface.
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Installing default address space handlers\n"));
status = acpi_ev_install_region_handlers();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Initialize ACPI Event handling (Fixed and General Purpose)
*
* Note1: We must have the hardware and events initialized before we can
* execute any control methods safely. Any control method can require
* ACPI hardware support, so the hardware must be fully initialized before
* any method execution!
*
* Note2: Fixed events are initialized and enabled here. GPEs are
* initialized, but cannot be enabled until after the hardware is
* completely initialized (SCI and global_lock activated)
*/
if (!(flags & ACPI_NO_EVENT_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Initializing ACPI events\n"));
status = acpi_ev_initialize_events();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Install the SCI handler and Global Lock handler. This completes the
* hardware initialization.
*/
if (!(flags & ACPI_NO_HANDLER_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Installing SCI/GL handlers\n"));
status = acpi_ev_install_xrupt_handlers();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_enable_subsystem)
/*******************************************************************************
*
* FUNCTION: acpi_initialize_objects
*
* PARAMETERS: Flags - Init/enable Options
*
* RETURN: Status
*
* DESCRIPTION: Completes namespace initialization by initializing device
* objects and executing AML code for Regions, buffers, etc.
*
******************************************************************************/
acpi_status acpi_initialize_objects(u32 flags)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(acpi_initialize_objects);
/*
* Run all _REG methods
*
* Note: Any objects accessed by the _REG methods will be automatically
* initialized, even if they contain executable AML (see the call to
* acpi_ns_initialize_objects below).
*/
if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Executing _REG OpRegion methods\n"));
status = acpi_ev_initialize_op_regions();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Execute any module-level code that was detected during the table load
* phase. Although illegal since ACPI 2.0, there are many machines that
* contain this type of code. Each block of detected executable AML code
* outside of any control method is wrapped with a temporary control
* method object and placed on a global list. The methods on this list
* are executed below.
*/
acpi_ns_exec_module_code_list();
/*
* Initialize the objects that remain uninitialized. This runs the
* executable AML that may be part of the declaration of these objects:
* operation_regions, buffer_fields, Buffers, and Packages.
*/
if (!(flags & ACPI_NO_OBJECT_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Completing Initialization of ACPI Objects\n"));
status = acpi_ns_initialize_objects();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Initialize all device objects in the namespace. This runs the device
* _STA and _INI methods.
*/
if (!(flags & ACPI_NO_DEVICE_INIT)) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[Init] Initializing ACPI Devices\n"));
status = acpi_ns_initialize_devices();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/*
* Empty the caches (delete the cached objects) on the assumption that
* the table load filled them up more than they will be at runtime --
* thus wasting non-paged memory.
*/
status = acpi_purge_cached_objects();
acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK;
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
#endif
/*******************************************************************************
*
* FUNCTION: acpi_terminate
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Shutdown the ACPICA subsystem and release all resources.
*
******************************************************************************/
acpi_status acpi_terminate(void)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_terminate);
/* Just exit if subsystem is already shutdown */
if (acpi_gbl_shutdown) {
ACPI_ERROR((AE_INFO, "ACPI Subsystem is already terminated"));
return_ACPI_STATUS(AE_OK);
}
/* Subsystem appears active, go ahead and shut it down */
acpi_gbl_shutdown = TRUE;
acpi_gbl_startup_flags = 0;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
/* Terminate the AML Debugger if present */
ACPI_DEBUGGER_EXEC(acpi_gbl_db_terminate_threads = TRUE);
/* Shutdown and free all resources */
acpi_ut_subsystem_shutdown();
/* Free the mutex objects */
acpi_ut_mutex_terminate();
#ifdef ACPI_DEBUGGER
/* Shut down the debugger */
acpi_db_terminate();
#endif
/* Now we can shutdown the OS-dependent layer */
status = acpi_os_terminate();
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_terminate)
#ifndef ACPI_ASL_COMPILER
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
* FUNCTION: acpi_subsystem_status
*
* PARAMETERS: None
*
* RETURN: Status of the ACPI subsystem
*
* DESCRIPTION: Other drivers that use the ACPI subsystem should call this
* before making any other calls, to ensure the subsystem
* initialized successfully.
*
******************************************************************************/
acpi_status acpi_subsystem_status(void)
{
if (acpi_gbl_startup_flags & ACPI_INITIALIZED_OK) {
return (AE_OK);
} else {
return (AE_ERROR);
}
}
ACPI_EXPORT_SYMBOL(acpi_subsystem_status)
/*******************************************************************************
*
* FUNCTION: acpi_get_system_info
*
* PARAMETERS: out_buffer - A buffer to receive the resources for the
* device
*
* RETURN: Status - the status of the call
*
* DESCRIPTION: This function is called to get information about the current
* state of the ACPI subsystem. It will return system information
* in the out_buffer.
*
* If the function fails an appropriate status will be returned
* and the value of out_buffer is undefined.
*
******************************************************************************/
acpi_status acpi_get_system_info(struct acpi_buffer * out_buffer)
{
struct acpi_system_info *info_ptr;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_get_system_info);
/* Parameter validation */
status = acpi_ut_validate_buffer(out_buffer);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Validate/Allocate/Clear caller buffer */
status =
acpi_ut_initialize_buffer(out_buffer,
sizeof(struct acpi_system_info));
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Populate the return buffer
*/
info_ptr = (struct acpi_system_info *)out_buffer->pointer;
info_ptr->acpi_ca_version = ACPI_CA_VERSION;
/* System flags (ACPI capabilities) */
info_ptr->flags = ACPI_SYS_MODE_ACPI;
/* Timer resolution - 24 or 32 bits */
if (acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER) {
info_ptr->timer_resolution = 24;
} else {
info_ptr->timer_resolution = 32;
}
/* Clear the reserved fields */
info_ptr->reserved1 = 0;
info_ptr->reserved2 = 0;
/* Current debug levels */
info_ptr->debug_layer = acpi_dbg_layer;
info_ptr->debug_level = acpi_dbg_level;
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_get_system_info)
/*****************************************************************************
*
* FUNCTION: acpi_install_initialization_handler
*
* PARAMETERS: Handler - Callback procedure
* Function - Not (currently) used, see below
*
* RETURN: Status
*
* DESCRIPTION: Install an initialization handler
*
* TBD: When a second function is added, must save the Function also.
*
****************************************************************************/
acpi_status
acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
{
if (!handler) {
return (AE_BAD_PARAMETER);
}
if (acpi_gbl_init_handler) {
return (AE_ALREADY_EXISTS);
}
acpi_gbl_init_handler = handler;
return AE_OK;
}
ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
#endif /* ACPI_FUTURE_USAGE */
/*****************************************************************************
*
* FUNCTION: acpi_purge_cached_objects
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Empty all caches (delete the cached objects)
*
****************************************************************************/
acpi_status acpi_purge_cached_objects(void)
{
ACPI_FUNCTION_TRACE(acpi_purge_cached_objects);
(void)acpi_os_purge_cache(acpi_gbl_state_cache);
(void)acpi_os_purge_cache(acpi_gbl_operand_cache);
(void)acpi_os_purge_cache(acpi_gbl_ps_node_cache);
(void)acpi_os_purge_cache(acpi_gbl_ps_node_ext_cache);
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
/*****************************************************************************
*
* FUNCTION: acpi_install_interface
*
* PARAMETERS: interface_name - The interface to install
*
* RETURN: Status
*
* DESCRIPTION: Install an _OSI interface to the global list
*
****************************************************************************/
acpi_status acpi_install_interface(acpi_string interface_name)
{
acpi_status status;
struct acpi_interface_info *interface_info;
/* Parameter validation */
if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
return (AE_BAD_PARAMETER);
}
(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
/* Check if the interface name is already in the global list */
interface_info = acpi_ut_get_interface(interface_name);
if (interface_info) {
/*
* The interface already exists in the list. This is OK if the
* interface has been marked invalid -- just clear the bit.
*/
if (interface_info->flags & ACPI_OSI_INVALID) {
interface_info->flags &= ~ACPI_OSI_INVALID;
status = AE_OK;
} else {
status = AE_ALREADY_EXISTS;
}
} else {
/* New interface name, install into the global list */
status = acpi_ut_install_interface(interface_name);
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_install_interface)
/*****************************************************************************
*
* FUNCTION: acpi_remove_interface
*
* PARAMETERS: interface_name - The interface to remove
*
* RETURN: Status
*
* DESCRIPTION: Remove an _OSI interface from the global list
*
****************************************************************************/
acpi_status acpi_remove_interface(acpi_string interface_name)
{
acpi_status status;
/* Parameter validation */
if (!interface_name || (ACPI_STRLEN(interface_name) == 0)) {
return (AE_BAD_PARAMETER);
}
(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
status = acpi_ut_remove_interface(interface_name);
acpi_os_release_mutex(acpi_gbl_osi_mutex);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_remove_interface)
/*****************************************************************************
*
* FUNCTION: acpi_install_interface_handler
*
* PARAMETERS: Handler - The _OSI interface handler to install
* NULL means "remove existing handler"
*
* RETURN: Status
*
* DESCRIPTION: Install a handler for the predefined _OSI ACPI method.
* invoked during execution of the internal implementation of
* _OSI. A NULL handler simply removes any existing handler.
*
****************************************************************************/
acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
{
acpi_status status = AE_OK;
(void)acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
if (handler && acpi_gbl_interface_handler) {
status = AE_ALREADY_EXISTS;
} else {
acpi_gbl_interface_handler = handler;
}
acpi_os_release_mutex(acpi_gbl_osi_mutex);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
#endif /* !ACPI_ASL_COMPILER */
| gpl-2.0 |
fronti90/kernel_lge_geefhd | fs/exofs/inode.c | 4832 | 37240 | /*
* Copyright (C) 2005, 2006
* Avishay Traeger (avishay@gmail.com)
* Copyright (C) 2008, 2009
* Boaz Harrosh <bharrosh@panasas.com>
*
* Copyrights for code taken from ext2:
* Copyright (C) 1992, 1993, 1994, 1995
* Remy Card (card@masi.ibp.fr)
* Laboratoire MASI - Institut Blaise Pascal
* Universite Pierre et Marie Curie (Paris VI)
* from
* linux/fs/minix/inode.c
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is part of exofs.
*
* exofs is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation. Since it is based on ext2, and the only
* valid version of GPL for the Linux kernel is version 2, the only valid
* version of GPL for exofs is version 2.
*
* exofs is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with exofs; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/slab.h>
#include "exofs.h"
#define EXOFS_DBGMSG2(M...) do {} while (0)
enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), };
unsigned exofs_max_io_pages(struct ore_layout *layout,
unsigned expected_pages)
{
unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC);
/* TODO: easily support bio chaining */
pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE);
return pages;
}
struct page_collect {
struct exofs_sb_info *sbi;
struct inode *inode;
unsigned expected_pages;
struct ore_io_state *ios;
struct page **pages;
unsigned alloc_pages;
unsigned nr_pages;
unsigned long length;
loff_t pg_first; /* keep 64bit also in 32-arches */
bool read_4_write; /* This means two things: that the read is sync
* And the pages should not be unlocked.
*/
struct page *that_locked_page;
};
static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
struct inode *inode)
{
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
pcol->sbi = sbi;
pcol->inode = inode;
pcol->expected_pages = expected_pages;
pcol->ios = NULL;
pcol->pages = NULL;
pcol->alloc_pages = 0;
pcol->nr_pages = 0;
pcol->length = 0;
pcol->pg_first = -1;
pcol->read_4_write = false;
pcol->that_locked_page = NULL;
}
static void _pcol_reset(struct page_collect *pcol)
{
pcol->expected_pages -= min(pcol->nr_pages, pcol->expected_pages);
pcol->pages = NULL;
pcol->alloc_pages = 0;
pcol->nr_pages = 0;
pcol->length = 0;
pcol->pg_first = -1;
pcol->ios = NULL;
pcol->that_locked_page = NULL;
/* this is probably the end of the loop but in writes
* it might not end here. don't be left with nothing
*/
if (!pcol->expected_pages)
pcol->expected_pages = MAX_PAGES_KMALLOC;
}
static int pcol_try_alloc(struct page_collect *pcol)
{
unsigned pages;
/* TODO: easily support bio chaining */
pages = exofs_max_io_pages(&pcol->sbi->layout, pcol->expected_pages);
for (; pages; pages >>= 1) {
pcol->pages = kmalloc(pages * sizeof(struct page *),
GFP_KERNEL);
if (likely(pcol->pages)) {
pcol->alloc_pages = pages;
return 0;
}
}
EXOFS_ERR("Failed to kmalloc expected_pages=%u\n",
pcol->expected_pages);
return -ENOMEM;
}
static void pcol_free(struct page_collect *pcol)
{
kfree(pcol->pages);
pcol->pages = NULL;
if (pcol->ios) {
ore_put_io_state(pcol->ios);
pcol->ios = NULL;
}
}
static int pcol_add_page(struct page_collect *pcol, struct page *page,
unsigned len)
{
if (unlikely(pcol->nr_pages >= pcol->alloc_pages))
return -ENOMEM;
pcol->pages[pcol->nr_pages++] = page;
pcol->length += len;
return 0;
}
enum {PAGE_WAS_NOT_IN_IO = 17};
static int update_read_page(struct page *page, int ret)
{
switch (ret) {
case 0:
/* Everything is OK */
SetPageUptodate(page);
if (PageError(page))
ClearPageError(page);
break;
case -EFAULT:
/* In this case we were trying to read something that wasn't on
* disk yet - return a page full of zeroes. This should be OK,
* because the object should be empty (if there was a write
* before this read, the read would be waiting with the page
* locked */
clear_highpage(page);
SetPageUptodate(page);
if (PageError(page))
ClearPageError(page);
EXOFS_DBGMSG("recovered read error\n");
/* fall through */
case PAGE_WAS_NOT_IN_IO:
ret = 0; /* recovered error */
break;
default:
SetPageError(page);
}
return ret;
}
static void update_write_page(struct page *page, int ret)
{
if (unlikely(ret == PAGE_WAS_NOT_IN_IO))
return; /* don't pass start don't collect $200 */
if (ret) {
mapping_set_error(page->mapping, ret);
SetPageError(page);
}
end_page_writeback(page);
}
/* Called at the end of reads, to optionally unlock pages and update their
* status.
*/
static int __readpages_done(struct page_collect *pcol)
{
int i;
u64 good_bytes;
u64 length = 0;
int ret = ore_check_io(pcol->ios, NULL);
if (likely(!ret)) {
good_bytes = pcol->length;
ret = PAGE_WAS_NOT_IN_IO;
} else {
good_bytes = 0;
}
EXOFS_DBGMSG2("readpages_done(0x%lx) good_bytes=0x%llx"
" length=0x%lx nr_pages=%u\n",
pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
pcol->nr_pages);
for (i = 0; i < pcol->nr_pages; i++) {
struct page *page = pcol->pages[i];
struct inode *inode = page->mapping->host;
int page_stat;
if (inode != pcol->inode)
continue; /* osd might add more pages at end */
if (likely(length < good_bytes))
page_stat = 0;
else
page_stat = ret;
EXOFS_DBGMSG2(" readpages_done(0x%lx, 0x%lx) %s\n",
inode->i_ino, page->index,
page_stat ? "bad_bytes" : "good_bytes");
ret = update_read_page(page, page_stat);
if (!pcol->read_4_write)
unlock_page(page);
length += PAGE_SIZE;
}
pcol_free(pcol);
EXOFS_DBGMSG2("readpages_done END\n");
return ret;
}
/* callback of async reads */
static void readpages_done(struct ore_io_state *ios, void *p)
{
struct page_collect *pcol = p;
__readpages_done(pcol);
atomic_dec(&pcol->sbi->s_curr_pending);
kfree(pcol);
}
static void _unlock_pcol_pages(struct page_collect *pcol, int ret, int rw)
{
int i;
for (i = 0; i < pcol->nr_pages; i++) {
struct page *page = pcol->pages[i];
if (rw == READ)
update_read_page(page, ret);
else
update_write_page(page, ret);
unlock_page(page);
}
}
static int _maybe_not_all_in_one_io(struct ore_io_state *ios,
struct page_collect *pcol_src, struct page_collect *pcol)
{
/* length was wrong or offset was not page aligned */
BUG_ON(pcol_src->nr_pages < ios->nr_pages);
if (pcol_src->nr_pages > ios->nr_pages) {
struct page **src_page;
unsigned pages_less = pcol_src->nr_pages - ios->nr_pages;
unsigned long len_less = pcol_src->length - ios->length;
unsigned i;
int ret;
/* This IO was trimmed */
pcol_src->nr_pages = ios->nr_pages;
pcol_src->length = ios->length;
/* Left over pages are passed to the next io */
pcol->expected_pages += pages_less;
pcol->nr_pages = pages_less;
pcol->length = len_less;
src_page = pcol_src->pages + pcol_src->nr_pages;
pcol->pg_first = (*src_page)->index;
ret = pcol_try_alloc(pcol);
if (unlikely(ret))
return ret;
for (i = 0; i < pages_less; ++i)
pcol->pages[i] = *src_page++;
EXOFS_DBGMSG("Length was adjusted nr_pages=0x%x "
"pages_less=0x%x expected_pages=0x%x "
"next_offset=0x%llx next_len=0x%lx\n",
pcol_src->nr_pages, pages_less, pcol->expected_pages,
pcol->pg_first * PAGE_SIZE, pcol->length);
}
return 0;
}
static int read_exec(struct page_collect *pcol)
{
struct exofs_i_info *oi = exofs_i(pcol->inode);
struct ore_io_state *ios;
struct page_collect *pcol_copy = NULL;
int ret;
if (!pcol->pages)
return 0;
if (!pcol->ios) {
int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true,
pcol->pg_first << PAGE_CACHE_SHIFT,
pcol->length, &pcol->ios);
if (ret)
return ret;
}
ios = pcol->ios;
ios->pages = pcol->pages;
if (pcol->read_4_write) {
ore_read(pcol->ios);
return __readpages_done(pcol);
}
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
if (!pcol_copy) {
ret = -ENOMEM;
goto err;
}
*pcol_copy = *pcol;
ios->done = readpages_done;
ios->private = pcol_copy;
/* pages ownership was passed to pcol_copy */
_pcol_reset(pcol);
ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
if (unlikely(ret))
goto err;
EXOFS_DBGMSG2("read_exec(0x%lx) offset=0x%llx length=0x%llx\n",
pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
ret = ore_read(ios);
if (unlikely(ret))
goto err;
atomic_inc(&pcol->sbi->s_curr_pending);
return 0;
err:
if (!pcol->read_4_write)
_unlock_pcol_pages(pcol, ret, READ);
pcol_free(pcol);
kfree(pcol_copy);
return ret;
}
/* readpage_strip is called either directly from readpage() or by the VFS from
* within read_cache_pages(), to add one more page to be read. It will try to
* collect as many contiguous pages as posible. If a discontinuity is
* encountered, or it runs out of resources, it will submit the previous segment
* and will start a new collection. Eventually caller must submit the last
* segment if present.
*/
static int readpage_strip(void *data, struct page *page)
{
struct page_collect *pcol = data;
struct inode *inode = pcol->inode;
struct exofs_i_info *oi = exofs_i(inode);
loff_t i_size = i_size_read(inode);
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
size_t len;
int ret;
/* FIXME: Just for debugging, will be removed */
if (PageUptodate(page))
EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino,
page->index);
pcol->that_locked_page = page;
if (page->index < end_index)
len = PAGE_CACHE_SIZE;
else if (page->index == end_index)
len = i_size & ~PAGE_CACHE_MASK;
else
len = 0;
if (!len || !obj_created(oi)) {
/* this will be out of bounds, or doesn't exist yet.
* Current page is cleared and the request is split
*/
clear_highpage(page);
SetPageUptodate(page);
if (PageError(page))
ClearPageError(page);
if (!pcol->read_4_write)
unlock_page(page);
EXOFS_DBGMSG("readpage_strip(0x%lx) empty page len=%zx "
"read_4_write=%d index=0x%lx end_index=0x%lx "
"splitting\n", inode->i_ino, len,
pcol->read_4_write, page->index, end_index);
return read_exec(pcol);
}
try_again:
if (unlikely(pcol->pg_first == -1)) {
pcol->pg_first = page->index;
} else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
page->index)) {
/* Discontinuity detected, split the request */
ret = read_exec(pcol);
if (unlikely(ret))
goto fail;
goto try_again;
}
if (!pcol->pages) {
ret = pcol_try_alloc(pcol);
if (unlikely(ret))
goto fail;
}
if (len != PAGE_CACHE_SIZE)
zero_user(page, len, PAGE_CACHE_SIZE - len);
EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n",
inode->i_ino, page->index, len);
ret = pcol_add_page(pcol, page, len);
if (ret) {
EXOFS_DBGMSG2("Failed pcol_add_page pages[i]=%p "
"this_len=0x%zx nr_pages=%u length=0x%lx\n",
page, len, pcol->nr_pages, pcol->length);
/* split the request, and start again with current page */
ret = read_exec(pcol);
if (unlikely(ret))
goto fail;
goto try_again;
}
return 0;
fail:
/* SetPageError(page); ??? */
unlock_page(page);
return ret;
}
static int exofs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct page_collect pcol;
int ret;
_pcol_init(&pcol, nr_pages, mapping->host);
ret = read_cache_pages(mapping, pages, readpage_strip, &pcol);
if (ret) {
EXOFS_ERR("read_cache_pages => %d\n", ret);
return ret;
}
ret = read_exec(&pcol);
if (unlikely(ret))
return ret;
return read_exec(&pcol);
}
static int _readpage(struct page *page, bool read_4_write)
{
struct page_collect pcol;
int ret;
_pcol_init(&pcol, 1, page->mapping->host);
pcol.read_4_write = read_4_write;
ret = readpage_strip(&pcol, page);
if (ret) {
EXOFS_ERR("_readpage => %d\n", ret);
return ret;
}
return read_exec(&pcol);
}
/*
* We don't need the file
*/
static int exofs_readpage(struct file *file, struct page *page)
{
return _readpage(page, false);
}
/* Callback for osd_write. All writes are asynchronous */
static void writepages_done(struct ore_io_state *ios, void *p)
{
struct page_collect *pcol = p;
int i;
u64 good_bytes;
u64 length = 0;
int ret = ore_check_io(ios, NULL);
atomic_dec(&pcol->sbi->s_curr_pending);
if (likely(!ret)) {
good_bytes = pcol->length;
ret = PAGE_WAS_NOT_IN_IO;
} else {
good_bytes = 0;
}
EXOFS_DBGMSG2("writepages_done(0x%lx) good_bytes=0x%llx"
" length=0x%lx nr_pages=%u\n",
pcol->inode->i_ino, _LLU(good_bytes), pcol->length,
pcol->nr_pages);
for (i = 0; i < pcol->nr_pages; i++) {
struct page *page = pcol->pages[i];
struct inode *inode = page->mapping->host;
int page_stat;
if (inode != pcol->inode)
continue; /* osd might add more pages to a bio */
if (likely(length < good_bytes))
page_stat = 0;
else
page_stat = ret;
update_write_page(page, page_stat);
unlock_page(page);
EXOFS_DBGMSG2(" writepages_done(0x%lx, 0x%lx) status=%d\n",
inode->i_ino, page->index, page_stat);
length += PAGE_SIZE;
}
pcol_free(pcol);
kfree(pcol);
EXOFS_DBGMSG2("writepages_done END\n");
}
static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
{
struct page_collect *pcol = priv;
pgoff_t index = offset / PAGE_SIZE;
if (!pcol->that_locked_page ||
(pcol->that_locked_page->index != index)) {
struct page *page = find_get_page(pcol->inode->i_mapping, index);
if (!page) {
page = find_or_create_page(pcol->inode->i_mapping,
index, GFP_NOFS);
if (unlikely(!page)) {
EXOFS_DBGMSG("grab_cache_page Failed "
"index=0x%llx\n", _LLU(index));
return NULL;
}
unlock_page(page);
}
if (PageDirty(page) || PageWriteback(page))
*uptodate = true;
else
*uptodate = PageUptodate(page);
EXOFS_DBGMSG("index=0x%lx uptodate=%d\n", index, *uptodate);
return page;
} else {
EXOFS_DBGMSG("YES that_locked_page index=0x%lx\n",
pcol->that_locked_page->index);
*uptodate = true;
return pcol->that_locked_page;
}
}
static void __r4w_put_page(void *priv, struct page *page)
{
struct page_collect *pcol = priv;
if (pcol->that_locked_page != page) {
EXOFS_DBGMSG("index=0x%lx\n", page->index);
page_cache_release(page);
return;
}
EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index);
}
static const struct _ore_r4w_op _r4w_op = {
.get_page = &__r4w_get_page,
.put_page = &__r4w_put_page,
};
static int write_exec(struct page_collect *pcol)
{
struct exofs_i_info *oi = exofs_i(pcol->inode);
struct ore_io_state *ios;
struct page_collect *pcol_copy = NULL;
int ret;
if (!pcol->pages)
return 0;
BUG_ON(pcol->ios);
ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false,
pcol->pg_first << PAGE_CACHE_SHIFT,
pcol->length, &pcol->ios);
if (unlikely(ret))
goto err;
pcol_copy = kmalloc(sizeof(*pcol_copy), GFP_KERNEL);
if (!pcol_copy) {
EXOFS_ERR("write_exec: Failed to kmalloc(pcol)\n");
ret = -ENOMEM;
goto err;
}
*pcol_copy = *pcol;
ios = pcol->ios;
ios->pages = pcol_copy->pages;
ios->done = writepages_done;
ios->r4w = &_r4w_op;
ios->private = pcol_copy;
/* pages ownership was passed to pcol_copy */
_pcol_reset(pcol);
ret = _maybe_not_all_in_one_io(ios, pcol_copy, pcol);
if (unlikely(ret))
goto err;
EXOFS_DBGMSG2("write_exec(0x%lx) offset=0x%llx length=0x%llx\n",
pcol->inode->i_ino, _LLU(ios->offset), _LLU(ios->length));
ret = ore_write(ios);
if (unlikely(ret)) {
EXOFS_ERR("write_exec: ore_write() Failed\n");
goto err;
}
atomic_inc(&pcol->sbi->s_curr_pending);
return 0;
err:
_unlock_pcol_pages(pcol, ret, WRITE);
pcol_free(pcol);
kfree(pcol_copy);
return ret;
}
/* writepage_strip is called either directly from writepage() or by the VFS from
* within write_cache_pages(), to add one more page to be written to storage.
* It will try to collect as many contiguous pages as possible. If a
* discontinuity is encountered or it runs out of resources it will submit the
* previous segment and will start a new collection.
* Eventually caller must submit the last segment if present.
*/
static int writepage_strip(struct page *page,
struct writeback_control *wbc_unused, void *data)
{
struct page_collect *pcol = data;
struct inode *inode = pcol->inode;
struct exofs_i_info *oi = exofs_i(inode);
loff_t i_size = i_size_read(inode);
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
size_t len;
int ret;
BUG_ON(!PageLocked(page));
ret = wait_obj_created(oi);
if (unlikely(ret))
goto fail;
if (page->index < end_index)
/* in this case, the page is within the limits of the file */
len = PAGE_CACHE_SIZE;
else {
len = i_size & ~PAGE_CACHE_MASK;
if (page->index > end_index || !len) {
/* in this case, the page is outside the limits
* (truncate in progress)
*/
ret = write_exec(pcol);
if (unlikely(ret))
goto fail;
if (PageError(page))
ClearPageError(page);
unlock_page(page);
EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) "
"outside the limits\n",
inode->i_ino, page->index);
return 0;
}
}
try_again:
if (unlikely(pcol->pg_first == -1)) {
pcol->pg_first = page->index;
} else if (unlikely((pcol->pg_first + pcol->nr_pages) !=
page->index)) {
/* Discontinuity detected, split the request */
ret = write_exec(pcol);
if (unlikely(ret))
goto fail;
EXOFS_DBGMSG("writepage_strip(0x%lx, 0x%lx) Discontinuity\n",
inode->i_ino, page->index);
goto try_again;
}
if (!pcol->pages) {
ret = pcol_try_alloc(pcol);
if (unlikely(ret))
goto fail;
}
EXOFS_DBGMSG2(" writepage_strip(0x%lx, 0x%lx) len=0x%zx\n",
inode->i_ino, page->index, len);
ret = pcol_add_page(pcol, page, len);
if (unlikely(ret)) {
EXOFS_DBGMSG2("Failed pcol_add_page "
"nr_pages=%u total_length=0x%lx\n",
pcol->nr_pages, pcol->length);
/* split the request, next loop will start again */
ret = write_exec(pcol);
if (unlikely(ret)) {
EXOFS_DBGMSG("write_exec failed => %d", ret);
goto fail;
}
goto try_again;
}
BUG_ON(PageWriteback(page));
set_page_writeback(page);
return 0;
fail:
EXOFS_DBGMSG("Error: writepage_strip(0x%lx, 0x%lx)=>%d\n",
inode->i_ino, page->index, ret);
set_bit(AS_EIO, &page->mapping->flags);
unlock_page(page);
return ret;
}
static int exofs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct page_collect pcol;
long start, end, expected_pages;
int ret;
start = wbc->range_start >> PAGE_CACHE_SHIFT;
end = (wbc->range_end == LLONG_MAX) ?
start + mapping->nrpages :
wbc->range_end >> PAGE_CACHE_SHIFT;
if (start || end)
expected_pages = end - start + 1;
else
expected_pages = mapping->nrpages;
if (expected_pages < 32L)
expected_pages = 32L;
EXOFS_DBGMSG2("inode(0x%lx) wbc->start=0x%llx wbc->end=0x%llx "
"nrpages=%lu start=0x%lx end=0x%lx expected_pages=%ld\n",
mapping->host->i_ino, wbc->range_start, wbc->range_end,
mapping->nrpages, start, end, expected_pages);
_pcol_init(&pcol, expected_pages, mapping->host);
ret = write_cache_pages(mapping, wbc, writepage_strip, &pcol);
if (unlikely(ret)) {
EXOFS_ERR("write_cache_pages => %d\n", ret);
return ret;
}
ret = write_exec(&pcol);
if (unlikely(ret))
return ret;
if (wbc->sync_mode == WB_SYNC_ALL) {
return write_exec(&pcol); /* pump the last reminder */
} else if (pcol.nr_pages) {
/* not SYNC let the reminder join the next writeout */
unsigned i;
for (i = 0; i < pcol.nr_pages; i++) {
struct page *page = pcol.pages[i];
end_page_writeback(page);
set_page_dirty(page);
unlock_page(page);
}
}
return 0;
}
/*
static int exofs_writepage(struct page *page, struct writeback_control *wbc)
{
struct page_collect pcol;
int ret;
_pcol_init(&pcol, 1, page->mapping->host);
ret = writepage_strip(page, NULL, &pcol);
if (ret) {
EXOFS_ERR("exofs_writepage => %d\n", ret);
return ret;
}
return write_exec(&pcol);
}
*/
/* i_mutex held using inode->i_size directly */
static void _write_failed(struct inode *inode, loff_t to)
{
if (to > inode->i_size)
truncate_pagecache(inode, to, inode->i_size);
}
int exofs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
int ret = 0;
struct page *page;
page = *pagep;
if (page == NULL) {
ret = simple_write_begin(file, mapping, pos, len, flags, pagep,
fsdata);
if (ret) {
EXOFS_DBGMSG("simple_write_begin failed\n");
goto out;
}
page = *pagep;
}
/* read modify write */
if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) {
loff_t i_size = i_size_read(mapping->host);
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
size_t rlen;
if (page->index < end_index)
rlen = PAGE_CACHE_SIZE;
else if (page->index == end_index)
rlen = i_size & ~PAGE_CACHE_MASK;
else
rlen = 0;
if (!rlen) {
clear_highpage(page);
SetPageUptodate(page);
goto out;
}
ret = _readpage(page, true);
if (ret) {
/*SetPageError was done by _readpage. Is it ok?*/
unlock_page(page);
EXOFS_DBGMSG("__readpage failed\n");
}
}
out:
if (unlikely(ret))
_write_failed(mapping->host, pos + len);
return ret;
}
static int exofs_write_begin_export(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
*pagep = NULL;
return exofs_write_begin(file, mapping, pos, len, flags, pagep,
fsdata);
}
static int exofs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
/* According to comment in simple_write_end i_mutex is held */
loff_t i_size = inode->i_size;
int ret;
ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
if (unlikely(ret))
_write_failed(inode, pos + len);
/* TODO: once simple_write_end marks inode dirty remove */
if (i_size != inode->i_size)
mark_inode_dirty(inode);
return ret;
}
static int exofs_releasepage(struct page *page, gfp_t gfp)
{
EXOFS_DBGMSG("page 0x%lx\n", page->index);
WARN_ON(1);
return 0;
}
static void exofs_invalidatepage(struct page *page, unsigned long offset)
{
EXOFS_DBGMSG("page 0x%lx offset 0x%lx\n", page->index, offset);
WARN_ON(1);
}
const struct address_space_operations exofs_aops = {
.readpage = exofs_readpage,
.readpages = exofs_readpages,
.writepage = NULL,
.writepages = exofs_writepages,
.write_begin = exofs_write_begin_export,
.write_end = exofs_write_end,
.releasepage = exofs_releasepage,
.set_page_dirty = __set_page_dirty_nobuffers,
.invalidatepage = exofs_invalidatepage,
/* Not implemented Yet */
.bmap = NULL, /* TODO: use osd's OSD_ACT_READ_MAP */
.direct_IO = NULL, /* TODO: Should be trivial to do */
/* With these NULL has special meaning or default is not exported */
.get_xip_mem = NULL,
.migratepage = NULL,
.launder_page = NULL,
.is_partially_uptodate = NULL,
.error_remove_page = NULL,
};
/******************************************************************************
* INODE OPERATIONS
*****************************************************************************/
/*
* Test whether an inode is a fast symlink.
*/
static inline int exofs_inode_is_fast_symlink(struct inode *inode)
{
struct exofs_i_info *oi = exofs_i(inode);
return S_ISLNK(inode->i_mode) && (oi->i_data[0] != 0);
}
static int _do_truncate(struct inode *inode, loff_t newsize)
{
struct exofs_i_info *oi = exofs_i(inode);
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
int ret;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
ret = ore_truncate(&sbi->layout, &oi->oc, (u64)newsize);
if (likely(!ret))
truncate_setsize(inode, newsize);
EXOFS_DBGMSG("(0x%lx) size=0x%llx ret=>%d\n",
inode->i_ino, newsize, ret);
return ret;
}
/*
* Set inode attributes - update size attribute on OSD if needed,
* otherwise just call generic functions.
*/
int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
{
struct inode *inode = dentry->d_inode;
int error;
/* if we are about to modify an object, and it hasn't been
* created yet, wait
*/
error = wait_obj_created(exofs_i(inode));
if (unlikely(error))
return error;
error = inode_change_ok(inode, iattr);
if (unlikely(error))
return error;
if ((iattr->ia_valid & ATTR_SIZE) &&
iattr->ia_size != i_size_read(inode)) {
error = _do_truncate(inode, iattr->ia_size);
if (unlikely(error))
return error;
}
setattr_copy(inode, iattr);
mark_inode_dirty(inode);
return 0;
}
static const struct osd_attr g_attr_inode_file_layout = ATTR_DEF(
EXOFS_APAGE_FS_DATA,
EXOFS_ATTR_INODE_FILE_LAYOUT,
0);
static const struct osd_attr g_attr_inode_dir_layout = ATTR_DEF(
EXOFS_APAGE_FS_DATA,
EXOFS_ATTR_INODE_DIR_LAYOUT,
0);
/*
* Read the Linux inode info from the OSD, and return it as is. In exofs the
* inode info is in an application specific page/attribute of the osd-object.
*/
static int exofs_get_inode(struct super_block *sb, struct exofs_i_info *oi,
struct exofs_fcb *inode)
{
struct exofs_sb_info *sbi = sb->s_fs_info;
struct osd_attr attrs[] = {
[0] = g_attr_inode_data,
[1] = g_attr_inode_file_layout,
[2] = g_attr_inode_dir_layout,
};
struct ore_io_state *ios;
struct exofs_on_disk_inode_layout *layout;
int ret;
ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
if (unlikely(ret)) {
EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
return ret;
}
attrs[1].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
attrs[2].len = exofs_on_disk_inode_layout_size(sbi->oc.numdevs);
ios->in_attr = attrs;
ios->in_attr_len = ARRAY_SIZE(attrs);
ret = ore_read(ios);
if (unlikely(ret)) {
EXOFS_ERR("object(0x%llx) corrupted, return empty file=>%d\n",
_LLU(oi->one_comp.obj.id), ret);
memset(inode, 0, sizeof(*inode));
inode->i_mode = 0040000 | (0777 & ~022);
/* If object is lost on target we might as well enable it's
* delete.
*/
if ((ret == -ENOENT) || (ret == -EINVAL))
ret = 0;
goto out;
}
ret = extract_attr_from_ios(ios, &attrs[0]);
if (ret) {
EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
goto out;
}
WARN_ON(attrs[0].len != EXOFS_INO_ATTR_SIZE);
memcpy(inode, attrs[0].val_ptr, EXOFS_INO_ATTR_SIZE);
ret = extract_attr_from_ios(ios, &attrs[1]);
if (ret) {
EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
goto out;
}
if (attrs[1].len) {
layout = attrs[1].val_ptr;
if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
EXOFS_ERR("%s: unsupported files layout %d\n",
__func__, layout->gen_func);
ret = -ENOTSUPP;
goto out;
}
}
ret = extract_attr_from_ios(ios, &attrs[2]);
if (ret) {
EXOFS_ERR("%s: extract_attr of inode_data failed\n", __func__);
goto out;
}
if (attrs[2].len) {
layout = attrs[2].val_ptr;
if (layout->gen_func != cpu_to_le16(LAYOUT_MOVING_WINDOW)) {
EXOFS_ERR("%s: unsupported meta-data layout %d\n",
__func__, layout->gen_func);
ret = -ENOTSUPP;
goto out;
}
}
out:
ore_put_io_state(ios);
return ret;
}
static void __oi_init(struct exofs_i_info *oi)
{
init_waitqueue_head(&oi->i_wq);
oi->i_flags = 0;
}
/*
* Fill in an inode read from the OSD and set it up for use
*/
struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
{
struct exofs_i_info *oi;
struct exofs_fcb fcb;
struct inode *inode;
int ret;
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
oi = exofs_i(inode);
__oi_init(oi);
exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
exofs_oi_objno(oi));
/* read the inode from the osd */
ret = exofs_get_inode(sb, oi, &fcb);
if (ret)
goto bad_inode;
set_obj_created(oi);
/* copy stuff from on-disk struct to in-memory struct */
inode->i_mode = le16_to_cpu(fcb.i_mode);
inode->i_uid = le32_to_cpu(fcb.i_uid);
inode->i_gid = le32_to_cpu(fcb.i_gid);
set_nlink(inode, le16_to_cpu(fcb.i_links_count));
inode->i_ctime.tv_sec = (signed)le32_to_cpu(fcb.i_ctime);
inode->i_atime.tv_sec = (signed)le32_to_cpu(fcb.i_atime);
inode->i_mtime.tv_sec = (signed)le32_to_cpu(fcb.i_mtime);
inode->i_ctime.tv_nsec =
inode->i_atime.tv_nsec = inode->i_mtime.tv_nsec = 0;
oi->i_commit_size = le64_to_cpu(fcb.i_size);
i_size_write(inode, oi->i_commit_size);
inode->i_blkbits = EXOFS_BLKSHIFT;
inode->i_generation = le32_to_cpu(fcb.i_generation);
oi->i_dir_start_lookup = 0;
if ((inode->i_nlink == 0) && (inode->i_mode == 0)) {
ret = -ESTALE;
goto bad_inode;
}
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (fcb.i_data[0])
inode->i_rdev =
old_decode_dev(le32_to_cpu(fcb.i_data[0]));
else
inode->i_rdev =
new_decode_dev(le32_to_cpu(fcb.i_data[1]));
} else {
memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
}
inode->i_mapping->backing_dev_info = sb->s_bdi;
if (S_ISREG(inode->i_mode)) {
inode->i_op = &exofs_file_inode_operations;
inode->i_fop = &exofs_file_operations;
inode->i_mapping->a_ops = &exofs_aops;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &exofs_dir_inode_operations;
inode->i_fop = &exofs_dir_operations;
inode->i_mapping->a_ops = &exofs_aops;
} else if (S_ISLNK(inode->i_mode)) {
if (exofs_inode_is_fast_symlink(inode))
inode->i_op = &exofs_fast_symlink_inode_operations;
else {
inode->i_op = &exofs_symlink_inode_operations;
inode->i_mapping->a_ops = &exofs_aops;
}
} else {
inode->i_op = &exofs_special_inode_operations;
if (fcb.i_data[0])
init_special_inode(inode, inode->i_mode,
old_decode_dev(le32_to_cpu(fcb.i_data[0])));
else
init_special_inode(inode, inode->i_mode,
new_decode_dev(le32_to_cpu(fcb.i_data[1])));
}
unlock_new_inode(inode);
return inode;
bad_inode:
iget_failed(inode);
return ERR_PTR(ret);
}
int __exofs_wait_obj_created(struct exofs_i_info *oi)
{
if (!obj_created(oi)) {
EXOFS_DBGMSG("!obj_created\n");
BUG_ON(!obj_2bcreated(oi));
wait_event(oi->i_wq, obj_created(oi));
EXOFS_DBGMSG("wait_event done\n");
}
return unlikely(is_bad_inode(&oi->vfs_inode)) ? -EIO : 0;
}
/*
* Callback function from exofs_new_inode(). The important thing is that we
* set the obj_created flag so that other methods know that the object exists on
* the OSD.
*/
static void create_done(struct ore_io_state *ios, void *p)
{
struct inode *inode = p;
struct exofs_i_info *oi = exofs_i(inode);
struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
int ret;
ret = ore_check_io(ios, NULL);
ore_put_io_state(ios);
atomic_dec(&sbi->s_curr_pending);
if (unlikely(ret)) {
EXOFS_ERR("object=0x%llx creation failed in pid=0x%llx",
_LLU(exofs_oi_objno(oi)),
_LLU(oi->one_comp.obj.partition));
/*TODO: When FS is corrupted creation can fail, object already
* exist. Get rid of this asynchronous creation, if exist
* increment the obj counter and try the next object. Until we
* succeed. All these dangling objects will be made into lost
* files by chkfs.exofs
*/
}
set_obj_created(oi);
wake_up(&oi->i_wq);
}
/*
* Set up a new inode and create an object for it on the OSD
*/
struct inode *exofs_new_inode(struct inode *dir, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
struct inode *inode;
struct exofs_i_info *oi;
struct ore_io_state *ios;
int ret;
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
oi = exofs_i(inode);
__oi_init(oi);
set_obj_2bcreated(oi);
inode->i_mapping->backing_dev_info = sb->s_bdi;
inode_init_owner(inode, dir, mode);
inode->i_ino = sbi->s_nextid++;
inode->i_blkbits = EXOFS_BLKSHIFT;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
oi->i_commit_size = inode->i_size = 0;
spin_lock(&sbi->s_next_gen_lock);
inode->i_generation = sbi->s_next_generation++;
spin_unlock(&sbi->s_next_gen_lock);
insert_inode_hash(inode);
exofs_init_comps(&oi->oc, &oi->one_comp, sb->s_fs_info,
exofs_oi_objno(oi));
exofs_sbi_write_stats(sbi); /* Make sure new sbi->s_nextid is on disk */
mark_inode_dirty(inode);
ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
if (unlikely(ret)) {
EXOFS_ERR("exofs_new_inode: ore_get_io_state failed\n");
return ERR_PTR(ret);
}
ios->done = create_done;
ios->private = inode;
ret = ore_create(ios);
if (ret) {
ore_put_io_state(ios);
return ERR_PTR(ret);
}
atomic_inc(&sbi->s_curr_pending);
return inode;
}
/*
* struct to pass two arguments to update_inode's callback
*/
struct updatei_args {
struct exofs_sb_info *sbi;
struct exofs_fcb fcb;
};
/*
* Callback function from exofs_update_inode().
*/
static void updatei_done(struct ore_io_state *ios, void *p)
{
struct updatei_args *args = p;
ore_put_io_state(ios);
atomic_dec(&args->sbi->s_curr_pending);
kfree(args);
}
/*
* Write the inode to the OSD. Just fill up the struct, and set the attribute
* synchronously or asynchronously depending on the do_sync flag.
*/
static int exofs_update_inode(struct inode *inode, int do_sync)
{
struct exofs_i_info *oi = exofs_i(inode);
struct super_block *sb = inode->i_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
struct ore_io_state *ios;
struct osd_attr attr;
struct exofs_fcb *fcb;
struct updatei_args *args;
int ret;
args = kzalloc(sizeof(*args), GFP_KERNEL);
if (!args) {
EXOFS_DBGMSG("Failed kzalloc of args\n");
return -ENOMEM;
}
fcb = &args->fcb;
fcb->i_mode = cpu_to_le16(inode->i_mode);
fcb->i_uid = cpu_to_le32(inode->i_uid);
fcb->i_gid = cpu_to_le32(inode->i_gid);
fcb->i_links_count = cpu_to_le16(inode->i_nlink);
fcb->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec);
fcb->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
fcb->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec);
oi->i_commit_size = i_size_read(inode);
fcb->i_size = cpu_to_le64(oi->i_commit_size);
fcb->i_generation = cpu_to_le32(inode->i_generation);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
fcb->i_data[0] =
cpu_to_le32(old_encode_dev(inode->i_rdev));
fcb->i_data[1] = 0;
} else {
fcb->i_data[0] = 0;
fcb->i_data[1] =
cpu_to_le32(new_encode_dev(inode->i_rdev));
fcb->i_data[2] = 0;
}
} else
memcpy(fcb->i_data, oi->i_data, sizeof(fcb->i_data));
ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
if (unlikely(ret)) {
EXOFS_ERR("%s: ore_get_io_state failed.\n", __func__);
goto free_args;
}
attr = g_attr_inode_data;
attr.val_ptr = fcb;
ios->out_attr_len = 1;
ios->out_attr = &attr;
wait_obj_created(oi);
if (!do_sync) {
args->sbi = sbi;
ios->done = updatei_done;
ios->private = args;
}
ret = ore_write(ios);
if (!do_sync && !ret) {
atomic_inc(&sbi->s_curr_pending);
goto out; /* deallocation in updatei_done */
}
ore_put_io_state(ios);
free_args:
kfree(args);
out:
EXOFS_DBGMSG("(0x%lx) do_sync=%d ret=>%d\n",
inode->i_ino, do_sync, ret);
return ret;
}
int exofs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
/* FIXME: fix fsync and use wbc->sync_mode == WB_SYNC_ALL */
return exofs_update_inode(inode, 1);
}
/*
* Callback function from exofs_delete_inode() - don't have much cleaning up to
* do.
*/
static void delete_done(struct ore_io_state *ios, void *p)
{
struct exofs_sb_info *sbi = p;
ore_put_io_state(ios);
atomic_dec(&sbi->s_curr_pending);
}
/*
* Called when the refcount of an inode reaches zero. We remove the object
* from the OSD here. We make sure the object was created before we try and
* delete it.
*/
void exofs_evict_inode(struct inode *inode)
{
struct exofs_i_info *oi = exofs_i(inode);
struct super_block *sb = inode->i_sb;
struct exofs_sb_info *sbi = sb->s_fs_info;
struct ore_io_state *ios;
int ret;
truncate_inode_pages(&inode->i_data, 0);
/* TODO: should do better here */
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
inode->i_size = 0;
end_writeback(inode);
/* if we are deleting an obj that hasn't been created yet, wait.
* This also makes sure that create_done cannot be called with an
* already evicted inode.
*/
wait_obj_created(oi);
/* ignore the error, attempt a remove anyway */
/* Now Remove the OSD objects */
ret = ore_get_io_state(&sbi->layout, &oi->oc, &ios);
if (unlikely(ret)) {
EXOFS_ERR("%s: ore_get_io_state failed\n", __func__);
return;
}
ios->done = delete_done;
ios->private = sbi;
ret = ore_remove(ios);
if (ret) {
EXOFS_ERR("%s: ore_remove failed\n", __func__);
ore_put_io_state(ios);
return;
}
atomic_inc(&sbi->s_curr_pending);
return;
no_delete:
end_writeback(inode);
}
| gpl-2.0 |
OESF/Linaro-Android_LinaroSprint2011Q1 | arch/arm/mach-clps711x/edb7211-mm.c | 4832 | 2679 | /*
* linux/arch/arm/mach-clps711x/mm.c
*
* Extra MM routines for the EDB7211 board
*
* Copyright (C) 2000, 2001 Blue Mug, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <mach/hardware.h>
#include <asm/page.h>
#include <asm/sizes.h>
#include <asm/mach/map.h>
extern void clps711x_map_io(void);
/*
* The on-chip registers are given a size of 1MB so that a section can
* be used to map them; this saves a page table. This is the place to
* add mappings for ROM, expansion memory, PCMCIA, etc. (if static
* mappings are chosen for those areas).
*
* Here is a physical memory map (to be fleshed out later):
*
* Physical Address Size Description
* ----------------- ----- ---------------------------------
* c0000000-c001ffff 128KB reserved for video RAM [1]
* c0020000-c0023fff 16KB parameters (see Documentation/arm/Setup)
* c0024000-c0027fff 16KB swapper_pg_dir (task 0 page directory)
* c0028000-... kernel image (TEXTADDR)
*
* [1] Unused pages should be given back to the VM; they are not yet.
* The parameter block should also be released (not sure if this
* happens).
*/
static struct map_desc edb7211_io_desc[] __initdata = {
{ /* memory-mapped extra keyboard row */
.virtual = EP7211_VIRT_EXTKBD,
.pfn = __phys_to_pfn(EP7211_PHYS_EXTKBD),
.length = SZ_1M,
.type = MT_DEVICE,
}, { /* and CS8900A Ethernet chip */
.virtual = EP7211_VIRT_CS8900A,
.pfn = __phys_to_pfn(EP7211_PHYS_CS8900A),
.length = SZ_1M,
.type = MT_DEVICE,
}, { /* flash banks */
.virtual = EP7211_VIRT_FLASH1,
.pfn = __phys_to_pfn(EP7211_PHYS_FLASH1),
.length = SZ_8M,
.type = MT_DEVICE,
}, {
.virtual = EP7211_VIRT_FLASH2,
.pfn = __phys_to_pfn(EP7211_PHYS_FLASH2),
.length = SZ_8M,
.type = MT_DEVICE,
}
};
void __init edb7211_map_io(void)
{
clps711x_map_io();
iotable_init(edb7211_io_desc, ARRAY_SIZE(edb7211_io_desc));
}
| gpl-2.0 |
zparallax/amplitude_kernel_aosp | sound/pci/ctxfi/ctmixer.c | 5600 | 30717 | /**
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*
* @File ctmixer.c
*
* @Brief
* This file contains the implementation of alsa mixer device functions.
*
* @Author Liu Chun
* @Date May 28 2008
*
*/
#include "ctmixer.h"
#include "ctamixer.h"
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/asoundef.h>
#include <sound/pcm.h>
#include <sound/tlv.h>
enum CT_SUM_CTL {
SUM_IN_F,
SUM_IN_R,
SUM_IN_C,
SUM_IN_S,
SUM_IN_F_C,
NUM_CT_SUMS
};
enum CT_AMIXER_CTL {
/* volume control mixers */
AMIXER_MASTER_F,
AMIXER_MASTER_R,
AMIXER_MASTER_C,
AMIXER_MASTER_S,
AMIXER_PCM_F,
AMIXER_PCM_R,
AMIXER_PCM_C,
AMIXER_PCM_S,
AMIXER_SPDIFI,
AMIXER_LINEIN,
AMIXER_MIC,
AMIXER_SPDIFO,
AMIXER_WAVE_F,
AMIXER_WAVE_R,
AMIXER_WAVE_C,
AMIXER_WAVE_S,
AMIXER_MASTER_F_C,
AMIXER_PCM_F_C,
AMIXER_SPDIFI_C,
AMIXER_LINEIN_C,
AMIXER_MIC_C,
/* this should always be the last one */
NUM_CT_AMIXERS
};
enum CTALSA_MIXER_CTL {
/* volume control mixers */
MIXER_MASTER_P,
MIXER_PCM_P,
MIXER_LINEIN_P,
MIXER_MIC_P,
MIXER_SPDIFI_P,
MIXER_SPDIFO_P,
MIXER_WAVEF_P,
MIXER_WAVER_P,
MIXER_WAVEC_P,
MIXER_WAVES_P,
MIXER_MASTER_C,
MIXER_PCM_C,
MIXER_LINEIN_C,
MIXER_MIC_C,
MIXER_SPDIFI_C,
/* switch control mixers */
MIXER_PCM_C_S,
MIXER_LINEIN_C_S,
MIXER_MIC_C_S,
MIXER_SPDIFI_C_S,
MIXER_SPDIFO_P_S,
MIXER_WAVEF_P_S,
MIXER_WAVER_P_S,
MIXER_WAVEC_P_S,
MIXER_WAVES_P_S,
MIXER_DIGITAL_IO_S,
MIXER_IEC958_MASK,
MIXER_IEC958_DEFAULT,
MIXER_IEC958_STREAM,
/* this should always be the last one */
NUM_CTALSA_MIXERS
};
#define VOL_MIXER_START MIXER_MASTER_P
#define VOL_MIXER_END MIXER_SPDIFI_C
#define VOL_MIXER_NUM (VOL_MIXER_END - VOL_MIXER_START + 1)
#define SWH_MIXER_START MIXER_PCM_C_S
#define SWH_MIXER_END MIXER_DIGITAL_IO_S
#define SWH_CAPTURE_START MIXER_PCM_C_S
#define SWH_CAPTURE_END MIXER_SPDIFI_C_S
#define CHN_NUM 2
struct ct_kcontrol_init {
unsigned char ctl;
char *name;
};
static struct ct_kcontrol_init
ct_kcontrol_init_table[NUM_CTALSA_MIXERS] = {
[MIXER_MASTER_P] = {
.ctl = 1,
.name = "Master Playback Volume",
},
[MIXER_MASTER_C] = {
.ctl = 1,
.name = "Master Capture Volume",
},
[MIXER_PCM_P] = {
.ctl = 1,
.name = "PCM Playback Volume",
},
[MIXER_PCM_C] = {
.ctl = 1,
.name = "PCM Capture Volume",
},
[MIXER_LINEIN_P] = {
.ctl = 1,
.name = "Line Playback Volume",
},
[MIXER_LINEIN_C] = {
.ctl = 1,
.name = "Line Capture Volume",
},
[MIXER_MIC_P] = {
.ctl = 1,
.name = "Mic Playback Volume",
},
[MIXER_MIC_C] = {
.ctl = 1,
.name = "Mic Capture Volume",
},
[MIXER_SPDIFI_P] = {
.ctl = 1,
.name = "IEC958 Playback Volume",
},
[MIXER_SPDIFI_C] = {
.ctl = 1,
.name = "IEC958 Capture Volume",
},
[MIXER_SPDIFO_P] = {
.ctl = 1,
.name = "Digital Playback Volume",
},
[MIXER_WAVEF_P] = {
.ctl = 1,
.name = "Front Playback Volume",
},
[MIXER_WAVES_P] = {
.ctl = 1,
.name = "Side Playback Volume",
},
[MIXER_WAVEC_P] = {
.ctl = 1,
.name = "Center/LFE Playback Volume",
},
[MIXER_WAVER_P] = {
.ctl = 1,
.name = "Surround Playback Volume",
},
[MIXER_PCM_C_S] = {
.ctl = 1,
.name = "PCM Capture Switch",
},
[MIXER_LINEIN_C_S] = {
.ctl = 1,
.name = "Line Capture Switch",
},
[MIXER_MIC_C_S] = {
.ctl = 1,
.name = "Mic Capture Switch",
},
[MIXER_SPDIFI_C_S] = {
.ctl = 1,
.name = "IEC958 Capture Switch",
},
[MIXER_SPDIFO_P_S] = {
.ctl = 1,
.name = "Digital Playback Switch",
},
[MIXER_WAVEF_P_S] = {
.ctl = 1,
.name = "Front Playback Switch",
},
[MIXER_WAVES_P_S] = {
.ctl = 1,
.name = "Side Playback Switch",
},
[MIXER_WAVEC_P_S] = {
.ctl = 1,
.name = "Center/LFE Playback Switch",
},
[MIXER_WAVER_P_S] = {
.ctl = 1,
.name = "Surround Playback Switch",
},
[MIXER_DIGITAL_IO_S] = {
.ctl = 0,
.name = "Digit-IO Playback Switch",
},
};
static void
ct_mixer_recording_select(struct ct_mixer *mixer, enum CT_AMIXER_CTL type);
static void
ct_mixer_recording_unselect(struct ct_mixer *mixer, enum CT_AMIXER_CTL type);
/* FIXME: this static looks like it would fail if more than one card was */
/* installed. */
static struct snd_kcontrol *kctls[2] = {NULL};
static enum CT_AMIXER_CTL get_amixer_index(enum CTALSA_MIXER_CTL alsa_index)
{
switch (alsa_index) {
case MIXER_MASTER_P: return AMIXER_MASTER_F;
case MIXER_MASTER_C: return AMIXER_MASTER_F_C;
case MIXER_PCM_P: return AMIXER_PCM_F;
case MIXER_PCM_C:
case MIXER_PCM_C_S: return AMIXER_PCM_F_C;
case MIXER_LINEIN_P: return AMIXER_LINEIN;
case MIXER_LINEIN_C:
case MIXER_LINEIN_C_S: return AMIXER_LINEIN_C;
case MIXER_MIC_P: return AMIXER_MIC;
case MIXER_MIC_C:
case MIXER_MIC_C_S: return AMIXER_MIC_C;
case MIXER_SPDIFI_P: return AMIXER_SPDIFI;
case MIXER_SPDIFI_C:
case MIXER_SPDIFI_C_S: return AMIXER_SPDIFI_C;
case MIXER_SPDIFO_P: return AMIXER_SPDIFO;
case MIXER_WAVEF_P: return AMIXER_WAVE_F;
case MIXER_WAVES_P: return AMIXER_WAVE_S;
case MIXER_WAVEC_P: return AMIXER_WAVE_C;
case MIXER_WAVER_P: return AMIXER_WAVE_R;
default: return NUM_CT_AMIXERS;
}
}
static enum CT_AMIXER_CTL get_recording_amixer(enum CT_AMIXER_CTL index)
{
switch (index) {
case AMIXER_MASTER_F: return AMIXER_MASTER_F_C;
case AMIXER_PCM_F: return AMIXER_PCM_F_C;
case AMIXER_SPDIFI: return AMIXER_SPDIFI_C;
case AMIXER_LINEIN: return AMIXER_LINEIN_C;
case AMIXER_MIC: return AMIXER_MIC_C;
default: return NUM_CT_AMIXERS;
}
}
static unsigned char
get_switch_state(struct ct_mixer *mixer, enum CTALSA_MIXER_CTL type)
{
return (mixer->switch_state & (0x1 << (type - SWH_MIXER_START)))
? 1 : 0;
}
static void
set_switch_state(struct ct_mixer *mixer,
enum CTALSA_MIXER_CTL type, unsigned char state)
{
if (state)
mixer->switch_state |= (0x1 << (type - SWH_MIXER_START));
else
mixer->switch_state &= ~(0x1 << (type - SWH_MIXER_START));
}
#if 0 /* not used */
/* Map integer value ranging from 0 to 65535 to 14-bit float value ranging
* from 2^-6 to (1+1023/1024) */
static unsigned int uint16_to_float14(unsigned int x)
{
unsigned int i;
if (x < 17)
return 0;
x *= 2031;
x /= 65535;
x += 16;
/* i <= 6 */
for (i = 0; !(x & 0x400); i++)
x <<= 1;
x = (((7 - i) & 0x7) << 10) | (x & 0x3ff);
return x;
}
static unsigned int float14_to_uint16(unsigned int x)
{
unsigned int e;
if (!x)
return x;
e = (x >> 10) & 0x7;
x &= 0x3ff;
x += 1024;
x >>= (7 - e);
x -= 16;
x *= 65535;
x /= 2031;
return x;
}
#endif /* not used */
#define VOL_SCALE 0x1c
#define VOL_MAX 0x100
static const DECLARE_TLV_DB_SCALE(ct_vol_db_scale, -6400, 25, 1);
static int ct_alsa_mix_volume_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 2;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = VOL_MAX;
return 0;
}
static int ct_alsa_mix_volume_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
enum CT_AMIXER_CTL type = get_amixer_index(kcontrol->private_value);
struct amixer *amixer;
int i, val;
for (i = 0; i < 2; i++) {
amixer = ((struct ct_mixer *)atc->mixer)->
amixers[type*CHN_NUM+i];
val = amixer->ops->get_scale(amixer) / VOL_SCALE;
if (val < 0)
val = 0;
else if (val > VOL_MAX)
val = VOL_MAX;
ucontrol->value.integer.value[i] = val;
}
return 0;
}
static int ct_alsa_mix_volume_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
struct ct_mixer *mixer = atc->mixer;
enum CT_AMIXER_CTL type = get_amixer_index(kcontrol->private_value);
struct amixer *amixer;
int i, j, val, oval, change = 0;
for (i = 0; i < 2; i++) {
val = ucontrol->value.integer.value[i];
if (val < 0)
val = 0;
else if (val > VOL_MAX)
val = VOL_MAX;
val *= VOL_SCALE;
amixer = mixer->amixers[type*CHN_NUM+i];
oval = amixer->ops->get_scale(amixer);
if (val != oval) {
amixer->ops->set_scale(amixer, val);
amixer->ops->commit_write(amixer);
change = 1;
/* Synchronize Master/PCM playback AMIXERs. */
if (AMIXER_MASTER_F == type || AMIXER_PCM_F == type) {
for (j = 1; j < 4; j++) {
amixer = mixer->
amixers[(type+j)*CHN_NUM+i];
amixer->ops->set_scale(amixer, val);
amixer->ops->commit_write(amixer);
}
}
}
}
return change;
}
static struct snd_kcontrol_new vol_ctl = {
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ,
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = ct_alsa_mix_volume_info,
.get = ct_alsa_mix_volume_get,
.put = ct_alsa_mix_volume_put,
.tlv = { .p = ct_vol_db_scale },
};
static int output_switch_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
static const char *const names[3] = {
"FP Headphones", "Headphones", "Speakers"
};
return snd_ctl_enum_info(info, 1, 3, names);
}
static int output_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = atc->output_switch_get(atc);
return 0;
}
static int output_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
if (ucontrol->value.enumerated.item[0] > 2)
return -EINVAL;
return atc->output_switch_put(atc, ucontrol->value.enumerated.item[0]);
}
static struct snd_kcontrol_new output_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog Output Playback Enum",
.info = output_switch_info,
.get = output_switch_get,
.put = output_switch_put,
};
static int mic_source_switch_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *info)
{
static const char *const names[3] = {
"Mic", "FP Mic", "Aux"
};
return snd_ctl_enum_info(info, 1, 3, names);
}
static int mic_source_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
ucontrol->value.enumerated.item[0] = atc->mic_source_switch_get(atc);
return 0;
}
static int mic_source_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
if (ucontrol->value.enumerated.item[0] > 2)
return -EINVAL;
return atc->mic_source_switch_put(atc,
ucontrol->value.enumerated.item[0]);
}
static struct snd_kcontrol_new mic_source_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Mic Source Capture Enum",
.info = mic_source_switch_info,
.get = mic_source_switch_get,
.put = mic_source_switch_put,
};
static void
do_line_mic_switch(struct ct_atc *atc, enum CTALSA_MIXER_CTL type)
{
if (MIXER_LINEIN_C_S == type) {
atc->select_line_in(atc);
set_switch_state(atc->mixer, MIXER_MIC_C_S, 0);
snd_ctl_notify(atc->card, SNDRV_CTL_EVENT_MASK_VALUE,
&kctls[1]->id);
} else if (MIXER_MIC_C_S == type) {
atc->select_mic_in(atc);
set_switch_state(atc->mixer, MIXER_LINEIN_C_S, 0);
snd_ctl_notify(atc->card, SNDRV_CTL_EVENT_MASK_VALUE,
&kctls[0]->id);
}
}
static void
do_digit_io_switch(struct ct_atc *atc, int state)
{
struct ct_mixer *mixer = atc->mixer;
if (state) {
atc->select_digit_io(atc);
atc->spdif_out_unmute(atc,
get_switch_state(mixer, MIXER_SPDIFO_P_S));
atc->spdif_in_unmute(atc, 1);
atc->line_in_unmute(atc, 0);
return;
}
if (get_switch_state(mixer, MIXER_LINEIN_C_S))
atc->select_line_in(atc);
else if (get_switch_state(mixer, MIXER_MIC_C_S))
atc->select_mic_in(atc);
atc->spdif_out_unmute(atc, 0);
atc->spdif_in_unmute(atc, 0);
atc->line_in_unmute(atc, 1);
return;
}
static void do_switch(struct ct_atc *atc, enum CTALSA_MIXER_CTL type, int state)
{
struct ct_mixer *mixer = atc->mixer;
struct capabilities cap = atc->capabilities(atc);
/* Do changes in mixer. */
if ((SWH_CAPTURE_START <= type) && (SWH_CAPTURE_END >= type)) {
if (state) {
ct_mixer_recording_select(mixer,
get_amixer_index(type));
} else {
ct_mixer_recording_unselect(mixer,
get_amixer_index(type));
}
}
/* Do changes out of mixer. */
if (!cap.dedicated_mic &&
(MIXER_LINEIN_C_S == type || MIXER_MIC_C_S == type)) {
if (state)
do_line_mic_switch(atc, type);
atc->line_in_unmute(atc, state);
} else if (cap.dedicated_mic && (MIXER_LINEIN_C_S == type))
atc->line_in_unmute(atc, state);
else if (cap.dedicated_mic && (MIXER_MIC_C_S == type))
atc->mic_unmute(atc, state);
else if (MIXER_SPDIFI_C_S == type)
atc->spdif_in_unmute(atc, state);
else if (MIXER_WAVEF_P_S == type)
atc->line_front_unmute(atc, state);
else if (MIXER_WAVES_P_S == type)
atc->line_surround_unmute(atc, state);
else if (MIXER_WAVEC_P_S == type)
atc->line_clfe_unmute(atc, state);
else if (MIXER_WAVER_P_S == type)
atc->line_rear_unmute(atc, state);
else if (MIXER_SPDIFO_P_S == type)
atc->spdif_out_unmute(atc, state);
else if (MIXER_DIGITAL_IO_S == type)
do_digit_io_switch(atc, state);
return;
}
static int ct_alsa_mix_switch_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
uinfo->value.integer.step = 1;
return 0;
}
static int ct_alsa_mix_switch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_mixer *mixer =
((struct ct_atc *)snd_kcontrol_chip(kcontrol))->mixer;
enum CTALSA_MIXER_CTL type = kcontrol->private_value;
ucontrol->value.integer.value[0] = get_switch_state(mixer, type);
return 0;
}
static int ct_alsa_mix_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
struct ct_mixer *mixer = atc->mixer;
enum CTALSA_MIXER_CTL type = kcontrol->private_value;
int state;
state = ucontrol->value.integer.value[0];
if (get_switch_state(mixer, type) == state)
return 0;
set_switch_state(mixer, type, state);
do_switch(atc, type, state);
return 1;
}
static struct snd_kcontrol_new swh_ctl = {
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.info = ct_alsa_mix_switch_info,
.get = ct_alsa_mix_switch_get,
.put = ct_alsa_mix_switch_put
};
static int ct_spdif_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
uinfo->count = 1;
return 0;
}
static int ct_spdif_get_mask(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.iec958.status[0] = 0xff;
ucontrol->value.iec958.status[1] = 0xff;
ucontrol->value.iec958.status[2] = 0xff;
ucontrol->value.iec958.status[3] = 0xff;
return 0;
}
static int ct_spdif_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
unsigned int status;
atc->spdif_out_get_status(atc, &status);
if (status == 0)
status = SNDRV_PCM_DEFAULT_CON_SPDIF;
ucontrol->value.iec958.status[0] = (status >> 0) & 0xff;
ucontrol->value.iec958.status[1] = (status >> 8) & 0xff;
ucontrol->value.iec958.status[2] = (status >> 16) & 0xff;
ucontrol->value.iec958.status[3] = (status >> 24) & 0xff;
return 0;
}
static int ct_spdif_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct ct_atc *atc = snd_kcontrol_chip(kcontrol);
int change;
unsigned int status, old_status;
status = (ucontrol->value.iec958.status[0] << 0) |
(ucontrol->value.iec958.status[1] << 8) |
(ucontrol->value.iec958.status[2] << 16) |
(ucontrol->value.iec958.status[3] << 24);
atc->spdif_out_get_status(atc, &old_status);
change = (old_status != status);
if (change)
atc->spdif_out_set_status(atc, status);
return change;
}
static struct snd_kcontrol_new iec958_mask_ctl = {
.access = SNDRV_CTL_ELEM_ACCESS_READ,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, MASK),
.count = 1,
.info = ct_spdif_info,
.get = ct_spdif_get_mask,
.private_value = MIXER_IEC958_MASK
};
static struct snd_kcontrol_new iec958_default_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
.count = 1,
.info = ct_spdif_info,
.get = ct_spdif_get,
.put = ct_spdif_put,
.private_value = MIXER_IEC958_DEFAULT
};
static struct snd_kcontrol_new iec958_ctl = {
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.iface = SNDRV_CTL_ELEM_IFACE_PCM,
.name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, PCM_STREAM),
.count = 1,
.info = ct_spdif_info,
.get = ct_spdif_get,
.put = ct_spdif_put,
.private_value = MIXER_IEC958_STREAM
};
#define NUM_IEC958_CTL 3
static int
ct_mixer_kcontrol_new(struct ct_mixer *mixer, struct snd_kcontrol_new *new)
{
struct snd_kcontrol *kctl;
int err;
kctl = snd_ctl_new1(new, mixer->atc);
if (!kctl)
return -ENOMEM;
if (SNDRV_CTL_ELEM_IFACE_PCM == kctl->id.iface)
kctl->id.device = IEC958;
err = snd_ctl_add(mixer->atc->card, kctl);
if (err)
return err;
switch (new->private_value) {
case MIXER_LINEIN_C_S:
kctls[0] = kctl; break;
case MIXER_MIC_C_S:
kctls[1] = kctl; break;
default:
break;
}
return 0;
}
static int ct_mixer_kcontrols_create(struct ct_mixer *mixer)
{
enum CTALSA_MIXER_CTL type;
struct ct_atc *atc = mixer->atc;
struct capabilities cap = atc->capabilities(atc);
int err;
/* Create snd kcontrol instances on demand */
for (type = VOL_MIXER_START; type <= VOL_MIXER_END; type++) {
if (ct_kcontrol_init_table[type].ctl) {
vol_ctl.name = ct_kcontrol_init_table[type].name;
vol_ctl.private_value = (unsigned long)type;
err = ct_mixer_kcontrol_new(mixer, &vol_ctl);
if (err)
return err;
}
}
ct_kcontrol_init_table[MIXER_DIGITAL_IO_S].ctl = cap.digit_io_switch;
for (type = SWH_MIXER_START; type <= SWH_MIXER_END; type++) {
if (ct_kcontrol_init_table[type].ctl) {
swh_ctl.name = ct_kcontrol_init_table[type].name;
swh_ctl.private_value = (unsigned long)type;
err = ct_mixer_kcontrol_new(mixer, &swh_ctl);
if (err)
return err;
}
}
err = ct_mixer_kcontrol_new(mixer, &iec958_mask_ctl);
if (err)
return err;
err = ct_mixer_kcontrol_new(mixer, &iec958_default_ctl);
if (err)
return err;
err = ct_mixer_kcontrol_new(mixer, &iec958_ctl);
if (err)
return err;
if (cap.output_switch) {
err = ct_mixer_kcontrol_new(mixer, &output_ctl);
if (err)
return err;
}
if (cap.mic_source_switch) {
err = ct_mixer_kcontrol_new(mixer, &mic_source_ctl);
if (err)
return err;
}
atc->line_front_unmute(atc, 1);
set_switch_state(mixer, MIXER_WAVEF_P_S, 1);
atc->line_surround_unmute(atc, 0);
set_switch_state(mixer, MIXER_WAVES_P_S, 0);
atc->line_clfe_unmute(atc, 0);
set_switch_state(mixer, MIXER_WAVEC_P_S, 0);
atc->line_rear_unmute(atc, 0);
set_switch_state(mixer, MIXER_WAVER_P_S, 0);
atc->spdif_out_unmute(atc, 0);
set_switch_state(mixer, MIXER_SPDIFO_P_S, 0);
atc->line_in_unmute(atc, 0);
if (cap.dedicated_mic)
atc->mic_unmute(atc, 0);
atc->spdif_in_unmute(atc, 0);
set_switch_state(mixer, MIXER_PCM_C_S, 0);
set_switch_state(mixer, MIXER_LINEIN_C_S, 0);
set_switch_state(mixer, MIXER_SPDIFI_C_S, 0);
return 0;
}
static void
ct_mixer_recording_select(struct ct_mixer *mixer, enum CT_AMIXER_CTL type)
{
struct amixer *amix_d;
struct sum *sum_c;
int i;
for (i = 0; i < 2; i++) {
amix_d = mixer->amixers[type*CHN_NUM+i];
sum_c = mixer->sums[SUM_IN_F_C*CHN_NUM+i];
amix_d->ops->set_sum(amix_d, sum_c);
amix_d->ops->commit_write(amix_d);
}
}
static void
ct_mixer_recording_unselect(struct ct_mixer *mixer, enum CT_AMIXER_CTL type)
{
struct amixer *amix_d;
int i;
for (i = 0; i < 2; i++) {
amix_d = mixer->amixers[type*CHN_NUM+i];
amix_d->ops->set_sum(amix_d, NULL);
amix_d->ops->commit_write(amix_d);
}
}
static int ct_mixer_get_resources(struct ct_mixer *mixer)
{
struct sum_mgr *sum_mgr;
struct sum *sum;
struct sum_desc sum_desc = {0};
struct amixer_mgr *amixer_mgr;
struct amixer *amixer;
struct amixer_desc am_desc = {0};
int err;
int i;
/* Allocate sum resources for mixer obj */
sum_mgr = (struct sum_mgr *)mixer->atc->rsc_mgrs[SUM];
sum_desc.msr = mixer->atc->msr;
for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) {
err = sum_mgr->get_sum(sum_mgr, &sum_desc, &sum);
if (err) {
printk(KERN_ERR "ctxfi:Failed to get sum resources for "
"front output!\n");
break;
}
mixer->sums[i] = sum;
}
if (err)
goto error1;
/* Allocate amixer resources for mixer obj */
amixer_mgr = (struct amixer_mgr *)mixer->atc->rsc_mgrs[AMIXER];
am_desc.msr = mixer->atc->msr;
for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) {
err = amixer_mgr->get_amixer(amixer_mgr, &am_desc, &amixer);
if (err) {
printk(KERN_ERR "ctxfi:Failed to get amixer resources "
"for mixer obj!\n");
break;
}
mixer->amixers[i] = amixer;
}
if (err)
goto error2;
return 0;
error2:
for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) {
if (NULL != mixer->amixers[i]) {
amixer = mixer->amixers[i];
amixer_mgr->put_amixer(amixer_mgr, amixer);
mixer->amixers[i] = NULL;
}
}
error1:
for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) {
if (NULL != mixer->sums[i]) {
sum_mgr->put_sum(sum_mgr, (struct sum *)mixer->sums[i]);
mixer->sums[i] = NULL;
}
}
return err;
}
static int ct_mixer_get_mem(struct ct_mixer **rmixer)
{
struct ct_mixer *mixer;
int err;
*rmixer = NULL;
/* Allocate mem for mixer obj */
mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return -ENOMEM;
mixer->amixers = kzalloc(sizeof(void *)*(NUM_CT_AMIXERS*CHN_NUM),
GFP_KERNEL);
if (!mixer->amixers) {
err = -ENOMEM;
goto error1;
}
mixer->sums = kzalloc(sizeof(void *)*(NUM_CT_SUMS*CHN_NUM), GFP_KERNEL);
if (!mixer->sums) {
err = -ENOMEM;
goto error2;
}
*rmixer = mixer;
return 0;
error2:
kfree(mixer->amixers);
error1:
kfree(mixer);
return err;
}
static int ct_mixer_topology_build(struct ct_mixer *mixer)
{
struct sum *sum;
struct amixer *amix_d, *amix_s;
enum CT_AMIXER_CTL i, j;
/* Build topology from destination to source */
/* Set up Master mixer */
for (i = AMIXER_MASTER_F, j = SUM_IN_F;
i <= AMIXER_MASTER_S; i++, j++) {
amix_d = mixer->amixers[i*CHN_NUM];
sum = mixer->sums[j*CHN_NUM];
amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
amix_d = mixer->amixers[i*CHN_NUM+1];
sum = mixer->sums[j*CHN_NUM+1];
amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
}
/* Set up Wave-out mixer */
for (i = AMIXER_WAVE_F, j = AMIXER_MASTER_F;
i <= AMIXER_WAVE_S; i++, j++) {
amix_d = mixer->amixers[i*CHN_NUM];
amix_s = mixer->amixers[j*CHN_NUM];
amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
amix_d = mixer->amixers[i*CHN_NUM+1];
amix_s = mixer->amixers[j*CHN_NUM+1];
amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
}
/* Set up S/PDIF-out mixer */
amix_d = mixer->amixers[AMIXER_SPDIFO*CHN_NUM];
amix_s = mixer->amixers[AMIXER_MASTER_F*CHN_NUM];
amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
amix_d = mixer->amixers[AMIXER_SPDIFO*CHN_NUM+1];
amix_s = mixer->amixers[AMIXER_MASTER_F*CHN_NUM+1];
amix_d->ops->setup(amix_d, &amix_s->rsc, INIT_VOL, NULL);
/* Set up PCM-in mixer */
for (i = AMIXER_PCM_F, j = SUM_IN_F; i <= AMIXER_PCM_S; i++, j++) {
amix_d = mixer->amixers[i*CHN_NUM];
sum = mixer->sums[j*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[i*CHN_NUM+1];
sum = mixer->sums[j*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
}
/* Set up Line-in mixer */
amix_d = mixer->amixers[AMIXER_LINEIN*CHN_NUM];
sum = mixer->sums[SUM_IN_F*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[AMIXER_LINEIN*CHN_NUM+1];
sum = mixer->sums[SUM_IN_F*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
/* Set up Mic-in mixer */
amix_d = mixer->amixers[AMIXER_MIC*CHN_NUM];
sum = mixer->sums[SUM_IN_F*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[AMIXER_MIC*CHN_NUM+1];
sum = mixer->sums[SUM_IN_F*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
/* Set up S/PDIF-in mixer */
amix_d = mixer->amixers[AMIXER_SPDIFI*CHN_NUM];
sum = mixer->sums[SUM_IN_F*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[AMIXER_SPDIFI*CHN_NUM+1];
sum = mixer->sums[SUM_IN_F*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
/* Set up Master recording mixer */
amix_d = mixer->amixers[AMIXER_MASTER_F_C*CHN_NUM];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
amix_d = mixer->amixers[AMIXER_MASTER_F_C*CHN_NUM+1];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
amix_d->ops->setup(amix_d, &sum->rsc, INIT_VOL, NULL);
/* Set up PCM-in recording mixer */
amix_d = mixer->amixers[AMIXER_PCM_F_C*CHN_NUM];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[AMIXER_PCM_F_C*CHN_NUM+1];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
/* Set up Line-in recording mixer */
amix_d = mixer->amixers[AMIXER_LINEIN_C*CHN_NUM];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[AMIXER_LINEIN_C*CHN_NUM+1];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
/* Set up Mic-in recording mixer */
amix_d = mixer->amixers[AMIXER_MIC_C*CHN_NUM];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[AMIXER_MIC_C*CHN_NUM+1];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
/* Set up S/PDIF-in recording mixer */
amix_d = mixer->amixers[AMIXER_SPDIFI_C*CHN_NUM];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
amix_d = mixer->amixers[AMIXER_SPDIFI_C*CHN_NUM+1];
sum = mixer->sums[SUM_IN_F_C*CHN_NUM+1];
amix_d->ops->setup(amix_d, NULL, INIT_VOL, sum);
return 0;
}
static int mixer_set_input_port(struct amixer *amixer, struct rsc *rsc)
{
amixer->ops->set_input(amixer, rsc);
amixer->ops->commit_write(amixer);
return 0;
}
static enum CT_AMIXER_CTL port_to_amixer(enum MIXER_PORT_T type)
{
switch (type) {
case MIX_WAVE_FRONT: return AMIXER_WAVE_F;
case MIX_WAVE_SURROUND: return AMIXER_WAVE_S;
case MIX_WAVE_CENTLFE: return AMIXER_WAVE_C;
case MIX_WAVE_REAR: return AMIXER_WAVE_R;
case MIX_PCMO_FRONT: return AMIXER_MASTER_F_C;
case MIX_SPDIF_OUT: return AMIXER_SPDIFO;
case MIX_LINE_IN: return AMIXER_LINEIN;
case MIX_MIC_IN: return AMIXER_MIC;
case MIX_SPDIF_IN: return AMIXER_SPDIFI;
case MIX_PCMI_FRONT: return AMIXER_PCM_F;
case MIX_PCMI_SURROUND: return AMIXER_PCM_S;
case MIX_PCMI_CENTLFE: return AMIXER_PCM_C;
case MIX_PCMI_REAR: return AMIXER_PCM_R;
default: return 0;
}
}
static int mixer_get_output_ports(struct ct_mixer *mixer,
enum MIXER_PORT_T type,
struct rsc **rleft, struct rsc **rright)
{
enum CT_AMIXER_CTL amix = port_to_amixer(type);
if (NULL != rleft)
*rleft = &((struct amixer *)mixer->amixers[amix*CHN_NUM])->rsc;
if (NULL != rright)
*rright =
&((struct amixer *)mixer->amixers[amix*CHN_NUM+1])->rsc;
return 0;
}
static int mixer_set_input_left(struct ct_mixer *mixer,
enum MIXER_PORT_T type, struct rsc *rsc)
{
enum CT_AMIXER_CTL amix = port_to_amixer(type);
mixer_set_input_port(mixer->amixers[amix*CHN_NUM], rsc);
amix = get_recording_amixer(amix);
if (amix < NUM_CT_AMIXERS)
mixer_set_input_port(mixer->amixers[amix*CHN_NUM], rsc);
return 0;
}
static int
mixer_set_input_right(struct ct_mixer *mixer,
enum MIXER_PORT_T type, struct rsc *rsc)
{
enum CT_AMIXER_CTL amix = port_to_amixer(type);
mixer_set_input_port(mixer->amixers[amix*CHN_NUM+1], rsc);
amix = get_recording_amixer(amix);
if (amix < NUM_CT_AMIXERS)
mixer_set_input_port(mixer->amixers[amix*CHN_NUM+1], rsc);
return 0;
}
#ifdef CONFIG_PM
static int mixer_resume(struct ct_mixer *mixer)
{
int i, state;
struct amixer *amixer;
/* resume topology and volume gain. */
for (i = 0; i < NUM_CT_AMIXERS*CHN_NUM; i++) {
amixer = mixer->amixers[i];
amixer->ops->commit_write(amixer);
}
/* resume switch state. */
for (i = SWH_MIXER_START; i <= SWH_MIXER_END; i++) {
state = get_switch_state(mixer, i);
do_switch(mixer->atc, i, state);
}
return 0;
}
#endif
int ct_mixer_destroy(struct ct_mixer *mixer)
{
struct sum_mgr *sum_mgr = (struct sum_mgr *)mixer->atc->rsc_mgrs[SUM];
struct amixer_mgr *amixer_mgr =
(struct amixer_mgr *)mixer->atc->rsc_mgrs[AMIXER];
struct amixer *amixer;
int i = 0;
/* Release amixer resources */
for (i = 0; i < (NUM_CT_AMIXERS * CHN_NUM); i++) {
if (NULL != mixer->amixers[i]) {
amixer = mixer->amixers[i];
amixer_mgr->put_amixer(amixer_mgr, amixer);
}
}
/* Release sum resources */
for (i = 0; i < (NUM_CT_SUMS * CHN_NUM); i++) {
if (NULL != mixer->sums[i])
sum_mgr->put_sum(sum_mgr, (struct sum *)mixer->sums[i]);
}
/* Release mem assigned to mixer object */
kfree(mixer->sums);
kfree(mixer->amixers);
kfree(mixer);
return 0;
}
int ct_mixer_create(struct ct_atc *atc, struct ct_mixer **rmixer)
{
struct ct_mixer *mixer;
int err;
*rmixer = NULL;
/* Allocate mem for mixer obj */
err = ct_mixer_get_mem(&mixer);
if (err)
return err;
mixer->switch_state = 0;
mixer->atc = atc;
/* Set operations */
mixer->get_output_ports = mixer_get_output_ports;
mixer->set_input_left = mixer_set_input_left;
mixer->set_input_right = mixer_set_input_right;
#ifdef CONFIG_PM
mixer->resume = mixer_resume;
#endif
/* Allocate chip resources for mixer obj */
err = ct_mixer_get_resources(mixer);
if (err)
goto error;
/* Build internal mixer topology */
ct_mixer_topology_build(mixer);
*rmixer = mixer;
return 0;
error:
ct_mixer_destroy(mixer);
return err;
}
int ct_alsa_mix_create(struct ct_atc *atc,
enum CTALSADEVS device,
const char *device_name)
{
int err;
/* Create snd kcontrol instances on demand */
/* vol_ctl.device = swh_ctl.device = device; */ /* better w/ device 0 */
err = ct_mixer_kcontrols_create((struct ct_mixer *)atc->mixer);
if (err)
return err;
strcpy(atc->card->mixername, device_name);
return 0;
}
| gpl-2.0 |
arrrghhh/android_kernel_samsung_mondrianwifi | drivers/net/wireless/zd1211rw/zd_rf.c | 12512 | 4050 | /* ZD1211 USB-WLAN driver for Linux
*
* Copyright (C) 2005-2007 Ulrich Kunitz <kune@deine-taler.de>
* Copyright (C) 2006-2007 Daniel Drake <dsd@gentoo.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/errno.h>
#include <linux/string.h>
#include "zd_def.h"
#include "zd_rf.h"
#include "zd_mac.h"
#include "zd_chip.h"
static const char * const rfs[] = {
[0] = "unknown RF0",
[1] = "unknown RF1",
[UW2451_RF] = "UW2451_RF",
[UCHIP_RF] = "UCHIP_RF",
[AL2230_RF] = "AL2230_RF",
[AL7230B_RF] = "AL7230B_RF",
[THETA_RF] = "THETA_RF",
[AL2210_RF] = "AL2210_RF",
[MAXIM_NEW_RF] = "MAXIM_NEW_RF",
[UW2453_RF] = "UW2453_RF",
[AL2230S_RF] = "AL2230S_RF",
[RALINK_RF] = "RALINK_RF",
[INTERSIL_RF] = "INTERSIL_RF",
[RF2959_RF] = "RF2959_RF",
[MAXIM_NEW2_RF] = "MAXIM_NEW2_RF",
[PHILIPS_RF] = "PHILIPS_RF",
};
const char *zd_rf_name(u8 type)
{
if (type & 0xf0)
type = 0;
return rfs[type];
}
void zd_rf_init(struct zd_rf *rf)
{
memset(rf, 0, sizeof(*rf));
/* default to update channel integration, as almost all RF's do want
* this */
rf->update_channel_int = 1;
}
void zd_rf_clear(struct zd_rf *rf)
{
if (rf->clear)
rf->clear(rf);
ZD_MEMCLEAR(rf, sizeof(*rf));
}
int zd_rf_init_hw(struct zd_rf *rf, u8 type)
{
int r = 0;
int t;
struct zd_chip *chip = zd_rf_to_chip(rf);
ZD_ASSERT(mutex_is_locked(&chip->mutex));
switch (type) {
case RF2959_RF:
r = zd_rf_init_rf2959(rf);
break;
case AL2230_RF:
case AL2230S_RF:
r = zd_rf_init_al2230(rf);
break;
case AL7230B_RF:
r = zd_rf_init_al7230b(rf);
break;
case MAXIM_NEW_RF:
case UW2453_RF:
r = zd_rf_init_uw2453(rf);
break;
default:
dev_err(zd_chip_dev(chip),
"RF %s %#x is not supported\n", zd_rf_name(type), type);
rf->type = 0;
return -ENODEV;
}
if (r)
return r;
rf->type = type;
r = zd_chip_lock_phy_regs(chip);
if (r)
return r;
t = rf->init_hw(rf);
r = zd_chip_unlock_phy_regs(chip);
if (t)
r = t;
return r;
}
int zd_rf_scnprint_id(struct zd_rf *rf, char *buffer, size_t size)
{
return scnprintf(buffer, size, "%s", zd_rf_name(rf->type));
}
int zd_rf_set_channel(struct zd_rf *rf, u8 channel)
{
int r;
ZD_ASSERT(mutex_is_locked(&zd_rf_to_chip(rf)->mutex));
if (channel < MIN_CHANNEL24)
return -EINVAL;
if (channel > MAX_CHANNEL24)
return -EINVAL;
dev_dbg_f(zd_chip_dev(zd_rf_to_chip(rf)), "channel: %d\n", channel);
r = rf->set_channel(rf, channel);
if (r >= 0)
rf->channel = channel;
return r;
}
int zd_switch_radio_on(struct zd_rf *rf)
{
int r, t;
struct zd_chip *chip = zd_rf_to_chip(rf);
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = zd_chip_lock_phy_regs(chip);
if (r)
return r;
t = rf->switch_radio_on(rf);
r = zd_chip_unlock_phy_regs(chip);
if (t)
r = t;
return r;
}
int zd_switch_radio_off(struct zd_rf *rf)
{
int r, t;
struct zd_chip *chip = zd_rf_to_chip(rf);
/* TODO: move phy regs handling to zd_chip */
ZD_ASSERT(mutex_is_locked(&chip->mutex));
r = zd_chip_lock_phy_regs(chip);
if (r)
return r;
t = rf->switch_radio_off(rf);
r = zd_chip_unlock_phy_regs(chip);
if (t)
r = t;
return r;
}
int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel)
{
if (!rf->patch_6m_band_edge)
return 0;
return rf->patch_6m_band_edge(rf, channel);
}
int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel)
{
return zd_chip_generic_patch_6m_band(zd_rf_to_chip(rf), channel);
}
| gpl-2.0 |
NicholasPace/android_kernel_asus_Z00A | arch/ia64/lib/checksum.c | 13536 | 2604 | /*
* Network checksum routines
*
* Copyright (C) 1999, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*
* Most of the code coming from arch/alpha/lib/checksum.c
*
* This file contains network checksum routines that are better done
* in an architecture-specific manner due to speed..
*/
#include <linux/module.h>
#include <linux/string.h>
#include <asm/byteorder.h>
static inline unsigned short
from64to16 (unsigned long x)
{
/* add up 32-bit words for 33 bits */
x = (x & 0xffffffff) + (x >> 32);
/* add up 16-bit and 17-bit words for 17+c bits */
x = (x & 0xffff) + (x >> 16);
/* add up 16-bit and 2-bit for 16+c bit */
x = (x & 0xffff) + (x >> 16);
/* add up carry.. */
x = (x & 0xffff) + (x >> 16);
return x;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented.
*/
__sum16
csum_tcpudp_magic (__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
return (__force __sum16)~from64to16(
(__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8));
}
EXPORT_SYMBOL(csum_tcpudp_magic);
__wsum
csum_tcpudp_nofold (__be32 saddr, __be32 daddr, unsigned short len,
unsigned short proto, __wsum sum)
{
unsigned long result;
result = (__force u64)saddr + (__force u64)daddr +
(__force u64)sum + ((len + proto) << 8);
/* Fold down to 32-bits so we don't lose in the typedef-less network stack. */
/* 64 to 33 */
result = (result & 0xffffffff) + (result >> 32);
/* 33 to 32 */
result = (result & 0xffffffff) + (result >> 32);
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_tcpudp_nofold);
extern unsigned long do_csum (const unsigned char *, long);
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
u64 result = do_csum(buff, len);
/* add in old sum, and carry.. */
result += (__force u32)sum;
/* 32+c bits -> 32 bits */
result = (result & 0xffffffff) + (result >> 32);
return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
__sum16 ip_compute_csum (const void *buff, int len)
{
return (__force __sum16)~do_csum(buff,len);
}
EXPORT_SYMBOL(ip_compute_csum);
| gpl-2.0 |
yetu/linux-pfla02 | arch/arm/mach-omap2/cm33xx.c | 481 | 10444 | /*
* AM33XX CM functions
*
* Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
* Vaibhav Hiremath <hvaibhav@ti.com>
*
* Reference taken from from OMAP4 cminst44xx.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/io.h>
#include "clockdomain.h"
#include "cm.h"
#include "cm33xx.h"
#include "cm-regbits-34xx.h"
#include "cm-regbits-33xx.h"
#include "prm33xx.h"
/*
* CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield:
*
* 0x0 func: Module is fully functional, including OCP
* 0x1 trans: Module is performing transition: wakeup, or sleep, or sleep
* abortion
* 0x2 idle: Module is in Idle mode (only OCP part). It is functional if
* using separate functional clock
* 0x3 disabled: Module is disabled and cannot be accessed
*
*/
#define CLKCTRL_IDLEST_FUNCTIONAL 0x0
#define CLKCTRL_IDLEST_INTRANSITION 0x1
#define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
#define CLKCTRL_IDLEST_DISABLED 0x3
/* Private functions */
/* Read a register in a CM instance */
static inline u32 am33xx_cm_read_reg(u16 inst, u16 idx)
{
return readl_relaxed(cm_base + inst + idx);
}
/* Write into a register in a CM */
static inline void am33xx_cm_write_reg(u32 val, u16 inst, u16 idx)
{
writel_relaxed(val, cm_base + inst + idx);
}
/* Read-modify-write a register in CM */
static inline u32 am33xx_cm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx)
{
u32 v;
v = am33xx_cm_read_reg(inst, idx);
v &= ~mask;
v |= bits;
am33xx_cm_write_reg(v, inst, idx);
return v;
}
static inline u32 am33xx_cm_set_reg_bits(u32 bits, s16 inst, s16 idx)
{
return am33xx_cm_rmw_reg_bits(bits, bits, inst, idx);
}
static inline u32 am33xx_cm_clear_reg_bits(u32 bits, s16 inst, s16 idx)
{
return am33xx_cm_rmw_reg_bits(bits, 0x0, inst, idx);
}
static inline u32 am33xx_cm_read_reg_bits(u16 inst, s16 idx, u32 mask)
{
u32 v;
v = am33xx_cm_read_reg(inst, idx);
v &= mask;
v >>= __ffs(mask);
return v;
}
/**
* _clkctrl_idlest - read a CM_*_CLKCTRL register; mask & shift IDLEST bitfield
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
* @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
*
* Return the IDLEST bitfield of a CM_*_CLKCTRL register, shifted down to
* bit 0.
*/
static u32 _clkctrl_idlest(u16 inst, s16 cdoffs, u16 clkctrl_offs)
{
u32 v = am33xx_cm_read_reg(inst, clkctrl_offs);
v &= AM33XX_IDLEST_MASK;
v >>= AM33XX_IDLEST_SHIFT;
return v;
}
/**
* _is_module_ready - can module registers be accessed without causing an abort?
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
* @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
*
* Returns true if the module's CM_*_CLKCTRL.IDLEST bitfield is either
* *FUNCTIONAL or *INTERFACE_IDLE; false otherwise.
*/
static bool _is_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs)
{
u32 v;
v = _clkctrl_idlest(inst, cdoffs, clkctrl_offs);
return (v == CLKCTRL_IDLEST_FUNCTIONAL ||
v == CLKCTRL_IDLEST_INTERFACE_IDLE) ? true : false;
}
/**
* _clktrctrl_write - write @c to a CM_CLKSTCTRL.CLKTRCTRL register bitfield
* @c: CLKTRCTRL register bitfield (LSB = bit 0, i.e., unshifted)
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
*
* @c must be the unshifted value for CLKTRCTRL - i.e., this function
* will handle the shift itself.
*/
static void _clktrctrl_write(u8 c, u16 inst, u16 cdoffs)
{
u32 v;
v = am33xx_cm_read_reg(inst, cdoffs);
v &= ~AM33XX_CLKTRCTRL_MASK;
v |= c << AM33XX_CLKTRCTRL_SHIFT;
am33xx_cm_write_reg(v, inst, cdoffs);
}
/* Public functions */
/**
* am33xx_cm_is_clkdm_in_hwsup - is a clockdomain in hwsup idle mode?
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
*
* Returns true if the clockdomain referred to by (@inst, @cdoffs)
* is in hardware-supervised idle mode, or 0 otherwise.
*/
bool am33xx_cm_is_clkdm_in_hwsup(u16 inst, u16 cdoffs)
{
u32 v;
v = am33xx_cm_read_reg(inst, cdoffs);
v &= AM33XX_CLKTRCTRL_MASK;
v >>= AM33XX_CLKTRCTRL_SHIFT;
return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? true : false;
}
/**
* am33xx_cm_clkdm_enable_hwsup - put a clockdomain in hwsup-idle mode
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
*
* Put a clockdomain referred to by (@inst, @cdoffs) into
* hardware-supervised idle mode. No return value.
*/
void am33xx_cm_clkdm_enable_hwsup(u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, inst, cdoffs);
}
/**
* am33xx_cm_clkdm_disable_hwsup - put a clockdomain in swsup-idle mode
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
*
* Put a clockdomain referred to by (@inst, @cdoffs) into
* software-supervised idle mode, i.e., controlled manually by the
* Linux OMAP clockdomain code. No return value.
*/
void am33xx_cm_clkdm_disable_hwsup(u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, inst, cdoffs);
}
/**
* am33xx_cm_clkdm_force_sleep - try to put a clockdomain into idle
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
*
* Put a clockdomain referred to by (@inst, @cdoffs) into idle
* No return value.
*/
void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, inst, cdoffs);
}
/**
* am33xx_cm_clkdm_force_wakeup - try to take a clockdomain out of idle
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
*
* Take a clockdomain referred to by (@inst, @cdoffs) out of idle,
* waking it up. No return value.
*/
void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs)
{
_clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, inst, cdoffs);
}
/*
*
*/
/**
* am33xx_cm_wait_module_ready - wait for a module to be in 'func' state
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
* @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
*
* Wait for the module IDLEST to be functional. If the idle state is in any
* the non functional state (trans, idle or disabled), module and thus the
* sysconfig cannot be accessed and will probably lead to an "imprecise
* external abort"
*/
int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs)
{
int i = 0;
omap_test_timeout(_is_module_ready(inst, cdoffs, clkctrl_offs),
MAX_MODULE_READY_TIME, i);
return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
}
/**
* am33xx_cm_wait_module_idle - wait for a module to be in 'disabled'
* state
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
* @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
*
* Wait for the module IDLEST to be disabled. Some PRCM transition,
* like reset assertion or parent clock de-activation must wait the
* module to be fully disabled.
*/
int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs, u16 clkctrl_offs)
{
int i = 0;
if (!clkctrl_offs)
return 0;
omap_test_timeout((_clkctrl_idlest(inst, cdoffs, clkctrl_offs) ==
CLKCTRL_IDLEST_DISABLED),
MAX_MODULE_READY_TIME, i);
return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
}
/**
* am33xx_cm_module_enable - Enable the modulemode inside CLKCTRL
* @mode: Module mode (SW or HW)
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
* @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
*
* No return value.
*/
void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs, u16 clkctrl_offs)
{
u32 v;
v = am33xx_cm_read_reg(inst, clkctrl_offs);
v &= ~AM33XX_MODULEMODE_MASK;
v |= mode << AM33XX_MODULEMODE_SHIFT;
am33xx_cm_write_reg(v, inst, clkctrl_offs);
}
/**
* am33xx_cm_module_disable - Disable the module inside CLKCTRL
* @inst: CM instance register offset (*_INST macro)
* @cdoffs: Clockdomain register offset (*_CDOFFS macro)
* @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
*
* No return value.
*/
void am33xx_cm_module_disable(u16 inst, s16 cdoffs, u16 clkctrl_offs)
{
u32 v;
v = am33xx_cm_read_reg(inst, clkctrl_offs);
v &= ~AM33XX_MODULEMODE_MASK;
am33xx_cm_write_reg(v, inst, clkctrl_offs);
}
/*
* Clockdomain low-level functions
*/
static int am33xx_clkdm_sleep(struct clockdomain *clkdm)
{
am33xx_cm_clkdm_force_sleep(clkdm->cm_inst, clkdm->clkdm_offs);
return 0;
}
static int am33xx_clkdm_wakeup(struct clockdomain *clkdm)
{
am33xx_cm_clkdm_force_wakeup(clkdm->cm_inst, clkdm->clkdm_offs);
return 0;
}
static void am33xx_clkdm_allow_idle(struct clockdomain *clkdm)
{
am33xx_cm_clkdm_enable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
}
static void am33xx_clkdm_deny_idle(struct clockdomain *clkdm)
{
am33xx_cm_clkdm_disable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
}
static int am33xx_clkdm_clk_enable(struct clockdomain *clkdm)
{
if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
return am33xx_clkdm_wakeup(clkdm);
return 0;
}
static int am33xx_clkdm_clk_disable(struct clockdomain *clkdm)
{
bool hwsup = false;
hwsup = am33xx_cm_is_clkdm_in_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
am33xx_clkdm_sleep(clkdm);
return 0;
}
struct clkdm_ops am33xx_clkdm_operations = {
.clkdm_sleep = am33xx_clkdm_sleep,
.clkdm_wakeup = am33xx_clkdm_wakeup,
.clkdm_allow_idle = am33xx_clkdm_allow_idle,
.clkdm_deny_idle = am33xx_clkdm_deny_idle,
.clkdm_clk_enable = am33xx_clkdm_clk_enable,
.clkdm_clk_disable = am33xx_clkdm_clk_disable,
};
| gpl-2.0 |
codeaurora-unoffical/linux-msm | drivers/usb/musb/musb_cppi41.c | 481 | 21930 | // SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/sizes.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include "cppi_dma.h"
#include "musb_core.h"
#include "musb_trace.h"
#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
#define EP_MODE_AUTOREQ_NONE 0
#define EP_MODE_AUTOREQ_ALL_NEOP 1
#define EP_MODE_AUTOREQ_ALWAYS 3
#define EP_MODE_DMA_TRANSPARENT 0
#define EP_MODE_DMA_RNDIS 1
#define EP_MODE_DMA_GEN_RNDIS 3
#define USB_CTRL_TX_MODE 0x70
#define USB_CTRL_RX_MODE 0x74
#define USB_CTRL_AUTOREQ 0xd0
#define USB_TDOWN 0xd8
#define MUSB_DMA_NUM_CHANNELS 15
#define DA8XX_USB_MODE 0x10
#define DA8XX_USB_AUTOREQ 0x14
#define DA8XX_USB_TEARDOWN 0x1c
#define DA8XX_DMA_NUM_CHANNELS 4
struct cppi41_dma_controller {
struct dma_controller controller;
struct cppi41_dma_channel *rx_channel;
struct cppi41_dma_channel *tx_channel;
struct hrtimer early_tx;
struct list_head early_tx_list;
u32 rx_mode;
u32 tx_mode;
u32 auto_req;
u32 tdown_reg;
u32 autoreq_reg;
void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
unsigned int mode);
u8 num_channels;
};
static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
{
u16 csr;
u8 toggle;
if (cppi41_channel->is_tx)
return;
if (!is_host_active(cppi41_channel->controller->controller.musb))
return;
csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
cppi41_channel->usb_toggle = toggle;
}
static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
{
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct musb *musb = hw_ep->musb;
u16 csr;
u8 toggle;
if (cppi41_channel->is_tx)
return;
if (!is_host_active(musb))
return;
musb_ep_select(musb->mregs, hw_ep->epnum);
csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
/*
* AM335x Advisory 1.0.13: Due to internal synchronisation error the
* data toggle may reset from DATA1 to DATA0 during receiving data from
* more than one endpoint.
*/
if (!toggle && toggle == cppi41_channel->usb_toggle) {
csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
musb_dbg(musb, "Restoring DATA1 toggle.");
}
cppi41_channel->usb_toggle = toggle;
}
static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
{
u8 epnum = hw_ep->epnum;
struct musb *musb = hw_ep->musb;
void __iomem *epio = musb->endpoints[epnum].regs;
u16 csr;
musb_ep_select(musb->mregs, hw_ep->epnum);
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_TXPKTRDY)
return false;
return true;
}
static void cppi41_dma_callback(void *private_data,
const struct dmaengine_result *result);
static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
{
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct musb *musb = hw_ep->musb;
void __iomem *epio = hw_ep->regs;
u16 csr;
if (!cppi41_channel->prog_len ||
(cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
/* done, complete */
cppi41_channel->channel.actual_len =
cppi41_channel->transferred;
cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
cppi41_channel->channel.rx_packet_done = true;
/*
* transmit ZLP using PIO mode for transfers which size is
* multiple of EP packet size.
*/
if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
cppi41_channel->packet_sz) == 0) {
musb_ep_select(musb->mregs, hw_ep->epnum);
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
musb_writew(epio, MUSB_TXCSR, csr);
}
trace_musb_cppi41_done(cppi41_channel);
musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
} else {
/* next iteration, reload */
struct dma_chan *dc = cppi41_channel->dc;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
u32 remain_bytes;
cppi41_channel->buf_addr += cppi41_channel->packet_sz;
remain_bytes = cppi41_channel->total_len;
remain_bytes -= cppi41_channel->transferred;
remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
cppi41_channel->prog_len = remain_bytes;
direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
: DMA_DEV_TO_MEM;
dma_desc = dmaengine_prep_slave_single(dc,
cppi41_channel->buf_addr,
remain_bytes,
direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (WARN_ON(!dma_desc))
return;
dma_desc->callback_result = cppi41_dma_callback;
dma_desc->callback_param = &cppi41_channel->channel;
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
trace_musb_cppi41_cont(cppi41_channel);
dma_async_issue_pending(dc);
if (!cppi41_channel->is_tx) {
musb_ep_select(musb->mregs, hw_ep->epnum);
csr = musb_readw(epio, MUSB_RXCSR);
csr |= MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, csr);
}
}
}
static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
{
struct cppi41_dma_controller *controller;
struct cppi41_dma_channel *cppi41_channel, *n;
struct musb *musb;
unsigned long flags;
enum hrtimer_restart ret = HRTIMER_NORESTART;
controller = container_of(timer, struct cppi41_dma_controller,
early_tx);
musb = controller->controller.musb;
spin_lock_irqsave(&musb->lock, flags);
list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
tx_check) {
bool empty;
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
empty = musb_is_tx_fifo_empty(hw_ep);
if (empty) {
list_del_init(&cppi41_channel->tx_check);
cppi41_trans_done(cppi41_channel);
}
}
if (!list_empty(&controller->early_tx_list) &&
!hrtimer_is_queued(&controller->early_tx)) {
ret = HRTIMER_RESTART;
hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
}
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
static void cppi41_dma_callback(void *private_data,
const struct dmaengine_result *result)
{
struct dma_channel *channel = private_data;
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
struct cppi41_dma_controller *controller;
struct musb *musb = hw_ep->musb;
unsigned long flags;
struct dma_tx_state txstate;
u32 transferred;
int is_hs = 0;
bool empty;
controller = cppi41_channel->controller;
if (controller->controller.dma_callback)
controller->controller.dma_callback(&controller->controller);
if (result->result == DMA_TRANS_ABORTED)
return;
spin_lock_irqsave(&musb->lock, flags);
dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
&txstate);
transferred = cppi41_channel->prog_len - txstate.residue;
cppi41_channel->transferred += transferred;
trace_musb_cppi41_gb(cppi41_channel);
update_rx_toggle(cppi41_channel);
if (cppi41_channel->transferred == cppi41_channel->total_len ||
transferred < cppi41_channel->packet_sz)
cppi41_channel->prog_len = 0;
if (cppi41_channel->is_tx) {
u8 type;
if (is_host_active(musb))
type = hw_ep->out_qh->type;
else
type = hw_ep->ep_in.type;
if (type == USB_ENDPOINT_XFER_ISOC)
/*
* Don't use the early-TX-interrupt workaround below
* for Isoch transfter. Since Isoch are periodic
* transfer, by the time the next transfer is
* scheduled, the current one should be done already.
*
* This avoids audio playback underrun issue.
*/
empty = true;
else
empty = musb_is_tx_fifo_empty(hw_ep);
}
if (!cppi41_channel->is_tx || empty) {
cppi41_trans_done(cppi41_channel);
goto out;
}
/*
* On AM335x it has been observed that the TX interrupt fires
* too early that means the TXFIFO is not yet empty but the DMA
* engine says that it is done with the transfer. We don't
* receive a FIFO empty interrupt so the only thing we can do is
* to poll for the bit. On HS it usually takes 2us, on FS around
* 110us - 150us depending on the transfer size.
* We spin on HS (no longer than than 25us and setup a timer on
* FS to check for the bit and complete the transfer.
*/
if (is_host_active(musb)) {
if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
is_hs = 1;
} else {
if (musb->g.speed == USB_SPEED_HIGH)
is_hs = 1;
}
if (is_hs) {
unsigned wait = 25;
do {
empty = musb_is_tx_fifo_empty(hw_ep);
if (empty) {
cppi41_trans_done(cppi41_channel);
goto out;
}
wait--;
if (!wait)
break;
cpu_relax();
} while (1);
}
list_add_tail(&cppi41_channel->tx_check,
&controller->early_tx_list);
if (!hrtimer_is_queued(&controller->early_tx)) {
unsigned long usecs = cppi41_channel->total_len / 10;
hrtimer_start_range_ns(&controller->early_tx,
usecs * NSEC_PER_USEC,
20 * NSEC_PER_USEC,
HRTIMER_MODE_REL);
}
out:
spin_unlock_irqrestore(&musb->lock, flags);
}
static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
{
unsigned shift;
shift = (ep - 1) * 2;
old &= ~(3 << shift);
old |= mode << shift;
return old;
}
static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned mode)
{
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct musb *musb = controller->controller.musb;
u32 port;
u32 new_mode;
u32 old_mode;
if (cppi41_channel->is_tx)
old_mode = controller->tx_mode;
else
old_mode = controller->rx_mode;
port = cppi41_channel->port_num;
new_mode = update_ep_mode(port, mode, old_mode);
if (new_mode == old_mode)
return;
if (cppi41_channel->is_tx) {
controller->tx_mode = new_mode;
musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
} else {
controller->rx_mode = new_mode;
musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
}
}
static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned int mode)
{
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct musb *musb = controller->controller.musb;
unsigned int shift;
u32 port;
u32 new_mode;
u32 old_mode;
old_mode = controller->tx_mode;
port = cppi41_channel->port_num;
shift = (port - 1) * 4;
if (!cppi41_channel->is_tx)
shift += 16;
new_mode = old_mode & ~(3 << shift);
new_mode |= mode << shift;
if (new_mode == old_mode)
return;
controller->tx_mode = new_mode;
musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
}
static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned mode)
{
struct cppi41_dma_controller *controller = cppi41_channel->controller;
u32 port;
u32 new_mode;
u32 old_mode;
old_mode = controller->auto_req;
port = cppi41_channel->port_num;
new_mode = update_ep_mode(port, mode, old_mode);
if (new_mode == old_mode)
return;
controller->auto_req = new_mode;
musb_writel(controller->controller.musb->ctrl_base,
controller->autoreq_reg, new_mode);
}
static bool cppi41_configure_channel(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct dma_chan *dc = cppi41_channel->dc;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
struct musb *musb = cppi41_channel->controller->controller.musb;
unsigned use_gen_rndis = 0;
cppi41_channel->buf_addr = dma_addr;
cppi41_channel->total_len = len;
cppi41_channel->transferred = 0;
cppi41_channel->packet_sz = packet_sz;
cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
/*
* Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
* than max packet size at a time.
*/
if (cppi41_channel->is_tx)
use_gen_rndis = 1;
if (use_gen_rndis) {
/* RNDIS mode */
if (len > packet_sz) {
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), len);
/* gen rndis */
controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_GEN_RNDIS);
/* auto req */
cppi41_set_autoreq_mode(cppi41_channel,
EP_MODE_AUTOREQ_ALL_NEOP);
} else {
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), 0);
controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel,
EP_MODE_AUTOREQ_NONE);
}
} else {
/* fallback mode */
controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
len = min_t(u32, packet_sz, len);
}
cppi41_channel->prog_len = len;
direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!dma_desc)
return false;
dma_desc->callback_result = cppi41_dma_callback;
dma_desc->callback_param = channel;
cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
cppi41_channel->channel.rx_packet_done = false;
trace_musb_cppi41_config(cppi41_channel);
save_rx_toggle(cppi41_channel);
dma_async_issue_pending(dc);
return true;
}
static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
struct musb_hw_ep *hw_ep, u8 is_tx)
{
struct cppi41_dma_controller *controller = container_of(c,
struct cppi41_dma_controller, controller);
struct cppi41_dma_channel *cppi41_channel = NULL;
u8 ch_num = hw_ep->epnum - 1;
if (ch_num >= controller->num_channels)
return NULL;
if (is_tx)
cppi41_channel = &controller->tx_channel[ch_num];
else
cppi41_channel = &controller->rx_channel[ch_num];
if (!cppi41_channel->dc)
return NULL;
if (cppi41_channel->is_allocated)
return NULL;
cppi41_channel->hw_ep = hw_ep;
cppi41_channel->is_allocated = 1;
trace_musb_cppi41_alloc(cppi41_channel);
return &cppi41_channel->channel;
}
static void cppi41_dma_channel_release(struct dma_channel *channel)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
trace_musb_cppi41_free(cppi41_channel);
if (cppi41_channel->is_allocated) {
cppi41_channel->is_allocated = 0;
channel->status = MUSB_DMA_STATUS_FREE;
channel->actual_len = 0;
}
}
static int cppi41_dma_channel_program(struct dma_channel *channel,
u16 packet_sz, u8 mode,
dma_addr_t dma_addr, u32 len)
{
int ret;
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
int hb_mult = 0;
BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
channel->status == MUSB_DMA_STATUS_BUSY);
if (is_host_active(cppi41_channel->controller->controller.musb)) {
if (cppi41_channel->is_tx)
hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
else
hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
}
channel->status = MUSB_DMA_STATUS_BUSY;
channel->actual_len = 0;
if (hb_mult)
packet_sz = hb_mult * (packet_sz & 0x7FF);
ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
if (!ret)
channel->status = MUSB_DMA_STATUS_FREE;
return ret;
}
static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
void *buf, u32 length)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct musb *musb = controller->controller.musb;
if (is_host_active(musb)) {
WARN_ON(1);
return 1;
}
if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
return 0;
if (cppi41_channel->is_tx)
return 1;
/* AM335x Advisory 1.0.13. No workaround for device RX mode */
return 0;
}
static int cppi41_dma_channel_abort(struct dma_channel *channel)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct musb *musb = controller->controller.musb;
void __iomem *epio = cppi41_channel->hw_ep->regs;
int tdbit;
int ret;
unsigned is_tx;
u16 csr;
is_tx = cppi41_channel->is_tx;
trace_musb_cppi41_abort(cppi41_channel);
if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
return 0;
list_del_init(&cppi41_channel->tx_check);
if (is_tx) {
csr = musb_readw(epio, MUSB_TXCSR);
csr &= ~MUSB_TXCSR_DMAENAB;
musb_writew(epio, MUSB_TXCSR, csr);
} else {
cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
/* delay to drain to cppi dma pipeline for isoch */
udelay(250);
csr = musb_readw(epio, MUSB_RXCSR);
csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
musb_writew(epio, MUSB_RXCSR, csr);
/* wait to drain cppi dma pipe line */
udelay(50);
csr = musb_readw(epio, MUSB_RXCSR);
if (csr & MUSB_RXCSR_RXPKTRDY) {
csr |= MUSB_RXCSR_FLUSHFIFO;
musb_writew(epio, MUSB_RXCSR, csr);
musb_writew(epio, MUSB_RXCSR, csr);
}
}
/* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
if (musb->ops->quirks & MUSB_DA8XX)
mdelay(250);
tdbit = 1 << cppi41_channel->port_num;
if (is_tx)
tdbit <<= 16;
do {
if (is_tx)
musb_writel(musb->ctrl_base, controller->tdown_reg,
tdbit);
ret = dmaengine_terminate_all(cppi41_channel->dc);
} while (ret == -EAGAIN);
if (is_tx) {
musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_TXPKTRDY) {
csr |= MUSB_TXCSR_FLUSHFIFO;
musb_writew(epio, MUSB_TXCSR, csr);
}
}
cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
return 0;
}
static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
{
struct dma_chan *dc;
int i;
for (i = 0; i < ctrl->num_channels; i++) {
dc = ctrl->tx_channel[i].dc;
if (dc)
dma_release_channel(dc);
dc = ctrl->rx_channel[i].dc;
if (dc)
dma_release_channel(dc);
}
}
static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
{
cppi41_release_all_dma_chans(controller);
}
static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
{
struct musb *musb = controller->controller.musb;
struct device *dev = musb->controller;
struct device_node *np = dev->parent->of_node;
struct cppi41_dma_channel *cppi41_channel;
int count;
int i;
int ret;
count = of_property_count_strings(np, "dma-names");
if (count < 0)
return count;
for (i = 0; i < count; i++) {
struct dma_chan *dc;
struct dma_channel *musb_dma;
const char *str;
unsigned is_tx;
unsigned int port;
ret = of_property_read_string_index(np, "dma-names", i, &str);
if (ret)
goto err;
if (strstarts(str, "tx"))
is_tx = 1;
else if (strstarts(str, "rx"))
is_tx = 0;
else {
dev_err(dev, "Wrong dmatype %s\n", str);
goto err;
}
ret = kstrtouint(str + 2, 0, &port);
if (ret)
goto err;
ret = -EINVAL;
if (port > controller->num_channels || !port)
goto err;
if (is_tx)
cppi41_channel = &controller->tx_channel[port - 1];
else
cppi41_channel = &controller->rx_channel[port - 1];
cppi41_channel->controller = controller;
cppi41_channel->port_num = port;
cppi41_channel->is_tx = is_tx;
INIT_LIST_HEAD(&cppi41_channel->tx_check);
musb_dma = &cppi41_channel->channel;
musb_dma->private_data = cppi41_channel;
musb_dma->status = MUSB_DMA_STATUS_FREE;
musb_dma->max_len = SZ_4M;
dc = dma_request_chan(dev->parent, str);
if (IS_ERR(dc)) {
ret = PTR_ERR(dc);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request %s: %d.\n",
str, ret);
goto err;
}
cppi41_channel->dc = dc;
}
return 0;
err:
cppi41_release_all_dma_chans(controller);
return ret;
}
void cppi41_dma_controller_destroy(struct dma_controller *c)
{
struct cppi41_dma_controller *controller = container_of(c,
struct cppi41_dma_controller, controller);
hrtimer_cancel(&controller->early_tx);
cppi41_dma_controller_stop(controller);
kfree(controller->rx_channel);
kfree(controller->tx_channel);
kfree(controller);
}
EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
struct dma_controller *
cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct cppi41_dma_controller *controller;
int channel_size;
int ret = 0;
if (!musb->controller->parent->of_node) {
dev_err(musb->controller, "Need DT for the DMA engine.\n");
return NULL;
}
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
goto kzalloc_fail;
hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
controller->early_tx.function = cppi41_recheck_tx_req;
INIT_LIST_HEAD(&controller->early_tx_list);
controller->controller.channel_alloc = cppi41_dma_channel_allocate;
controller->controller.channel_release = cppi41_dma_channel_release;
controller->controller.channel_program = cppi41_dma_channel_program;
controller->controller.channel_abort = cppi41_dma_channel_abort;
controller->controller.is_compatible = cppi41_is_compatible;
controller->controller.musb = musb;
if (musb->ops->quirks & MUSB_DA8XX) {
controller->tdown_reg = DA8XX_USB_TEARDOWN;
controller->autoreq_reg = DA8XX_USB_AUTOREQ;
controller->set_dma_mode = da8xx_set_dma_mode;
controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
} else {
controller->tdown_reg = USB_TDOWN;
controller->autoreq_reg = USB_CTRL_AUTOREQ;
controller->set_dma_mode = cppi41_set_dma_mode;
controller->num_channels = MUSB_DMA_NUM_CHANNELS;
}
channel_size = controller->num_channels *
sizeof(struct cppi41_dma_channel);
controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
if (!controller->rx_channel)
goto rx_channel_alloc_fail;
controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
if (!controller->tx_channel)
goto tx_channel_alloc_fail;
ret = cppi41_dma_controller_start(controller);
if (ret)
goto plat_get_fail;
return &controller->controller;
plat_get_fail:
kfree(controller->tx_channel);
tx_channel_alloc_fail:
kfree(controller->rx_channel);
rx_channel_alloc_fail:
kfree(controller);
kzalloc_fail:
if (ret == -EPROBE_DEFER)
return ERR_PTR(ret);
return NULL;
}
EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);
| gpl-2.0 |
allwinner/linux-2.6.36 | drivers/parisc/eisa_eeprom.c | 993 | 3008 | /*
* EISA "eeprom" support routines
*
* Copyright (C) 2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/eisa_eeprom.h>
#define EISA_EEPROM_MINOR 241
static loff_t eisa_eeprom_llseek(struct file *file, loff_t offset, int origin )
{
switch (origin) {
case 0:
/* nothing to do */
break;
case 1:
offset += file->f_pos;
break;
case 2:
offset += HPEE_MAX_LENGTH;
break;
}
return (offset >= 0 && offset < HPEE_MAX_LENGTH) ? (file->f_pos = offset) : -EINVAL;
}
static ssize_t eisa_eeprom_read(struct file * file,
char __user *buf, size_t count, loff_t *ppos )
{
unsigned char *tmp;
ssize_t ret;
int i;
if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH)
return 0;
count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos;
tmp = kmalloc(count, GFP_KERNEL);
if (tmp) {
for (i = 0; i < count; i++)
tmp[i] = readb(eisa_eeprom_addr+(*ppos)++);
if (copy_to_user (buf, tmp, count))
ret = -EFAULT;
else
ret = count;
kfree (tmp);
} else
ret = -ENOMEM;
return ret;
}
static int eisa_eeprom_open(struct inode *inode, struct file *file)
{
if (file->f_mode & FMODE_WRITE)
return -EINVAL;
return 0;
}
static int eisa_eeprom_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* The various file operations we support.
*/
static const struct file_operations eisa_eeprom_fops = {
.owner = THIS_MODULE,
.llseek = eisa_eeprom_llseek,
.read = eisa_eeprom_read,
.open = eisa_eeprom_open,
.release = eisa_eeprom_release,
};
static struct miscdevice eisa_eeprom_dev = {
EISA_EEPROM_MINOR,
"eisa_eeprom",
&eisa_eeprom_fops
};
static int __init eisa_eeprom_init(void)
{
int retval;
if (!eisa_eeprom_addr)
return -ENODEV;
retval = misc_register(&eisa_eeprom_dev);
if (retval < 0) {
printk(KERN_ERR "EISA EEPROM: cannot register misc device.\n");
return retval;
}
printk(KERN_INFO "EISA EEPROM at 0x%p\n", eisa_eeprom_addr);
return 0;
}
MODULE_LICENSE("GPL");
module_init(eisa_eeprom_init);
| gpl-2.0 |
david-a-wheeler/linux | sound/core/memory.c | 1249 | 2567 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
* Misc memory accessors
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/export.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <sound/core.h>
/**
* copy_to_user_fromio - copy data from mmio-space to user-space
* @dst: the destination pointer on user-space
* @src: the source pointer on mmio
* @count: the data size to copy in bytes
*
* Copies the data from mmio-space to user-space.
*
* Return: Zero if successful, or non-zero on failure.
*/
int copy_to_user_fromio(void __user *dst, const volatile void __iomem *src, size_t count)
{
#if defined(__i386__) || defined(CONFIG_SPARC32)
return copy_to_user(dst, (const void __force*)src, count) ? -EFAULT : 0;
#else
char buf[256];
while (count) {
size_t c = count;
if (c > sizeof(buf))
c = sizeof(buf);
memcpy_fromio(buf, (void __iomem *)src, c);
if (copy_to_user(dst, buf, c))
return -EFAULT;
count -= c;
dst += c;
src += c;
}
return 0;
#endif
}
EXPORT_SYMBOL(copy_to_user_fromio);
/**
* copy_from_user_toio - copy data from user-space to mmio-space
* @dst: the destination pointer on mmio-space
* @src: the source pointer on user-space
* @count: the data size to copy in bytes
*
* Copies the data from user-space to mmio-space.
*
* Return: Zero if successful, or non-zero on failure.
*/
int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size_t count)
{
#if defined(__i386__) || defined(CONFIG_SPARC32)
return copy_from_user((void __force *)dst, src, count) ? -EFAULT : 0;
#else
char buf[256];
while (count) {
size_t c = count;
if (c > sizeof(buf))
c = sizeof(buf);
if (copy_from_user(buf, src, c))
return -EFAULT;
memcpy_toio(dst, buf, c);
count -= c;
dst += c;
src += c;
}
return 0;
#endif
}
EXPORT_SYMBOL(copy_from_user_toio);
| gpl-2.0 |
Dosis/geeksphone-kernel-zero-2.6.35 | drivers/video/tmiofb.c | 1505 | 28536 | /*
* Frame Buffer Device for Toshiba Mobile IO(TMIO) controller
*
* Copyright(C) 2005-2006 Chris Humbert
* Copyright(C) 2005 Dirk Opfer
* Copytight(C) 2007,2008 Dmitry Baryshkov
*
* Based on:
* drivers/video/w100fb.c
* code written by Sharp/Lineo for 2.4 kernels
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/fb.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
/* Why should fb driver call console functions? because acquire_console_sem() */
#include <linux/console.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/uaccess.h>
/*
* accelerator commands
*/
#define TMIOFB_ACC_CSADR(x) (0x00000000 | ((x) & 0x001ffffe))
#define TMIOFB_ACC_CHPIX(x) (0x01000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_CVPIX(x) (0x02000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_PSADR(x) (0x03000000 | ((x) & 0x00fffffe))
#define TMIOFB_ACC_PHPIX(x) (0x04000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_PVPIX(x) (0x05000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_PHOFS(x) (0x06000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_PVOFS(x) (0x07000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_POADR(x) (0x08000000 | ((x) & 0x00fffffe))
#define TMIOFB_ACC_RSTR(x) (0x09000000 | ((x) & 0x000000ff))
#define TMIOFB_ACC_TCLOR(x) (0x0A000000 | ((x) & 0x0000ffff))
#define TMIOFB_ACC_FILL(x) (0x0B000000 | ((x) & 0x0000ffff))
#define TMIOFB_ACC_DSADR(x) (0x0C000000 | ((x) & 0x00fffffe))
#define TMIOFB_ACC_SSADR(x) (0x0D000000 | ((x) & 0x00fffffe))
#define TMIOFB_ACC_DHPIX(x) (0x0E000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_DVPIX(x) (0x0F000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_SHPIX(x) (0x10000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_SVPIX(x) (0x11000000 | ((x) & 0x000003ff))
#define TMIOFB_ACC_LBINI(x) (0x12000000 | ((x) & 0x0000ffff))
#define TMIOFB_ACC_LBK2(x) (0x13000000 | ((x) & 0x0000ffff))
#define TMIOFB_ACC_SHBINI(x) (0x14000000 | ((x) & 0x0000ffff))
#define TMIOFB_ACC_SHBK2(x) (0x15000000 | ((x) & 0x0000ffff))
#define TMIOFB_ACC_SVBINI(x) (0x16000000 | ((x) & 0x0000ffff))
#define TMIOFB_ACC_SVBK2(x) (0x17000000 | ((x) & 0x0000ffff))
#define TMIOFB_ACC_CMGO 0x20000000
#define TMIOFB_ACC_CMGO_CEND 0x00000001
#define TMIOFB_ACC_CMGO_INT 0x00000002
#define TMIOFB_ACC_CMGO_CMOD 0x00000010
#define TMIOFB_ACC_CMGO_CDVRV 0x00000020
#define TMIOFB_ACC_CMGO_CDHRV 0x00000040
#define TMIOFB_ACC_CMGO_RUND 0x00008000
#define TMIOFB_ACC_SCGO 0x21000000
#define TMIOFB_ACC_SCGO_CEND 0x00000001
#define TMIOFB_ACC_SCGO_INT 0x00000002
#define TMIOFB_ACC_SCGO_ROP3 0x00000004
#define TMIOFB_ACC_SCGO_TRNS 0x00000008
#define TMIOFB_ACC_SCGO_DVRV 0x00000010
#define TMIOFB_ACC_SCGO_DHRV 0x00000020
#define TMIOFB_ACC_SCGO_SVRV 0x00000040
#define TMIOFB_ACC_SCGO_SHRV 0x00000080
#define TMIOFB_ACC_SCGO_DSTXY 0x00008000
#define TMIOFB_ACC_SBGO 0x22000000
#define TMIOFB_ACC_SBGO_CEND 0x00000001
#define TMIOFB_ACC_SBGO_INT 0x00000002
#define TMIOFB_ACC_SBGO_DVRV 0x00000010
#define TMIOFB_ACC_SBGO_DHRV 0x00000020
#define TMIOFB_ACC_SBGO_SVRV 0x00000040
#define TMIOFB_ACC_SBGO_SHRV 0x00000080
#define TMIOFB_ACC_SBGO_SBMD 0x00000100
#define TMIOFB_ACC_FLGO 0x23000000
#define TMIOFB_ACC_FLGO_CEND 0x00000001
#define TMIOFB_ACC_FLGO_INT 0x00000002
#define TMIOFB_ACC_FLGO_ROP3 0x00000004
#define TMIOFB_ACC_LDGO 0x24000000
#define TMIOFB_ACC_LDGO_CEND 0x00000001
#define TMIOFB_ACC_LDGO_INT 0x00000002
#define TMIOFB_ACC_LDGO_ROP3 0x00000004
#define TMIOFB_ACC_LDGO_ENDPX 0x00000008
#define TMIOFB_ACC_LDGO_LVRV 0x00000010
#define TMIOFB_ACC_LDGO_LHRV 0x00000020
#define TMIOFB_ACC_LDGO_LDMOD 0x00000040
/* a FIFO is always allocated, even if acceleration is not used */
#define TMIOFB_FIFO_SIZE 512
/*
* LCD Host Controller Configuration Register
*
* This iomem area supports only 16-bit IO.
*/
#define CCR_CMD 0x04 /* Command */
#define CCR_REVID 0x08 /* Revision ID */
#define CCR_BASEL 0x10 /* LCD Control Reg Base Addr Low */
#define CCR_BASEH 0x12 /* LCD Control Reg Base Addr High */
#define CCR_UGCC 0x40 /* Unified Gated Clock Control */
#define CCR_GCC 0x42 /* Gated Clock Control */
#define CCR_USC 0x50 /* Unified Software Clear */
#define CCR_VRAMRTC 0x60 /* VRAM Timing Control */
/* 0x61 VRAM Refresh Control */
#define CCR_VRAMSAC 0x62 /* VRAM Access Control */
/* 0x63 VRAM Status */
#define CCR_VRAMBC 0x64 /* VRAM Block Control */
/*
* LCD Control Register
*
* This iomem area supports only 16-bit IO.
*/
#define LCR_UIS 0x000 /* Unified Interrupt Status */
#define LCR_VHPN 0x008 /* VRAM Horizontal Pixel Number */
#define LCR_CFSAL 0x00a /* Command FIFO Start Address Low */
#define LCR_CFSAH 0x00c /* Command FIFO Start Address High */
#define LCR_CFS 0x00e /* Command FIFO Size */
#define LCR_CFWS 0x010 /* Command FIFO Writeable Size */
#define LCR_BBIE 0x012 /* BitBLT Interrupt Enable */
#define LCR_BBISC 0x014 /* BitBLT Interrupt Status and Clear */
#define LCR_CCS 0x016 /* Command Count Status */
#define LCR_BBES 0x018 /* BitBLT Execution Status */
#define LCR_CMDL 0x01c /* Command Low */
#define LCR_CMDH 0x01e /* Command High */
#define LCR_CFC 0x022 /* Command FIFO Clear */
#define LCR_CCIFC 0x024 /* CMOS Camera IF Control */
#define LCR_HWT 0x026 /* Hardware Test */
#define LCR_LCDCCRC 0x100 /* LCDC Clock and Reset Control */
#define LCR_LCDCC 0x102 /* LCDC Control */
#define LCR_LCDCOPC 0x104 /* LCDC Output Pin Control */
#define LCR_LCDIS 0x108 /* LCD Interrupt Status */
#define LCR_LCDIM 0x10a /* LCD Interrupt Mask */
#define LCR_LCDIE 0x10c /* LCD Interrupt Enable */
#define LCR_GDSAL 0x122 /* Graphics Display Start Address Low */
#define LCR_GDSAH 0x124 /* Graphics Display Start Address High */
#define LCR_VHPCL 0x12a /* VRAM Horizontal Pixel Count Low */
#define LCR_VHPCH 0x12c /* VRAM Horizontal Pixel Count High */
#define LCR_GM 0x12e /* Graphic Mode(VRAM access enable) */
#define LCR_HT 0x140 /* Horizontal Total */
#define LCR_HDS 0x142 /* Horizontal Display Start */
#define LCR_HSS 0x144 /* H-Sync Start */
#define LCR_HSE 0x146 /* H-Sync End */
#define LCR_HNP 0x14c /* Horizontal Number of Pixels */
#define LCR_VT 0x150 /* Vertical Total */
#define LCR_VDS 0x152 /* Vertical Display Start */
#define LCR_VSS 0x154 /* V-Sync Start */
#define LCR_VSE 0x156 /* V-Sync End */
#define LCR_CDLN 0x160 /* Current Display Line Number */
#define LCR_ILN 0x162 /* Interrupt Line Number */
#define LCR_SP 0x164 /* Sync Polarity */
#define LCR_MISC 0x166 /* MISC(RGB565 mode) */
#define LCR_VIHSS 0x16a /* Video Interface H-Sync Start */
#define LCR_VIVS 0x16c /* Video Interface Vertical Start */
#define LCR_VIVE 0x16e /* Video Interface Vertical End */
#define LCR_VIVSS 0x170 /* Video Interface V-Sync Start */
#define LCR_VCCIS 0x17e /* Video / CMOS Camera Interface Select */
#define LCR_VIDWSAL 0x180 /* VI Data Write Start Address Low */
#define LCR_VIDWSAH 0x182 /* VI Data Write Start Address High */
#define LCR_VIDRSAL 0x184 /* VI Data Read Start Address Low */
#define LCR_VIDRSAH 0x186 /* VI Data Read Start Address High */
#define LCR_VIPDDST 0x188 /* VI Picture Data Display Start Timing */
#define LCR_VIPDDET 0x186 /* VI Picture Data Display End Timing */
#define LCR_VIE 0x18c /* Video Interface Enable */
#define LCR_VCS 0x18e /* Video/Camera Select */
#define LCR_VPHWC 0x194 /* Video Picture Horizontal Wait Count */
#define LCR_VPHS 0x196 /* Video Picture Horizontal Size */
#define LCR_VPVWC 0x198 /* Video Picture Vertical Wait Count */
#define LCR_VPVS 0x19a /* Video Picture Vertical Size */
#define LCR_PLHPIX 0x1a0 /* PLHPIX */
#define LCR_XS 0x1a2 /* XStart */
#define LCR_XCKHW 0x1a4 /* XCK High Width */
#define LCR_STHS 0x1a8 /* STH Start */
#define LCR_VT2 0x1aa /* Vertical Total */
#define LCR_YCKSW 0x1ac /* YCK Start Wait */
#define LCR_YSTS 0x1ae /* YST Start */
#define LCR_PPOLS 0x1b0 /* #PPOL Start */
#define LCR_PRECW 0x1b2 /* PREC Width */
#define LCR_VCLKHW 0x1b4 /* VCLK High Width */
#define LCR_OC 0x1b6 /* Output Control */
static char *mode_option __devinitdata;
struct tmiofb_par {
u32 pseudo_palette[16];
#ifdef CONFIG_FB_TMIO_ACCELL
wait_queue_head_t wait_acc;
bool use_polling;
#endif
void __iomem *ccr;
void __iomem *lcr;
};
/*--------------------------------------------------------------------------*/
/*
* reasons for an interrupt:
* uis bbisc lcdis
* 0100 0001 accelerator command completed
* 2000 0001 vsync start
* 2000 0002 display start
* 2000 0004 line number match(0x1ff mask???)
*/
static irqreturn_t tmiofb_irq(int irq, void *__info)
{
struct fb_info *info = __info;
struct tmiofb_par *par = info->par;
unsigned int bbisc = tmio_ioread16(par->lcr + LCR_BBISC);
tmio_iowrite16(bbisc, par->lcr + LCR_BBISC);
#ifdef CONFIG_FB_TMIO_ACCELL
/*
* We were in polling mode and now we got correct irq.
* Switch back to IRQ-based sync of command FIFO
*/
if (unlikely(par->use_polling && irq != -1)) {
printk(KERN_INFO "tmiofb: switching to waitq\n");
par->use_polling = false;
}
if (bbisc & 1)
wake_up(&par->wait_acc);
#endif
return IRQ_HANDLED;
}
/*--------------------------------------------------------------------------*/
/*
* Turns off the LCD controller and LCD host controller.
*/
static int tmiofb_hw_stop(struct platform_device *dev)
{
struct mfd_cell *cell = dev->dev.platform_data;
struct tmio_fb_data *data = cell->driver_data;
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
tmio_iowrite16(0, par->ccr + CCR_UGCC);
tmio_iowrite16(0, par->lcr + LCR_GM);
data->lcd_set_power(dev, 0);
tmio_iowrite16(0x0010, par->lcr + LCR_LCDCCRC);
return 0;
}
/*
* Initializes the LCD host controller.
*/
static int tmiofb_hw_init(struct platform_device *dev)
{
struct mfd_cell *cell = dev->dev.platform_data;
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
const struct resource *nlcr = &cell->resources[0];
const struct resource *vram = &cell->resources[2];
unsigned long base;
if (nlcr == NULL || vram == NULL)
return -EINVAL;
base = nlcr->start;
tmio_iowrite16(0x003a, par->ccr + CCR_UGCC);
tmio_iowrite16(0x003a, par->ccr + CCR_GCC);
tmio_iowrite16(0x3f00, par->ccr + CCR_USC);
msleep(2); /* wait for device to settle */
tmio_iowrite16(0x0000, par->ccr + CCR_USC);
tmio_iowrite16(base >> 16, par->ccr + CCR_BASEH);
tmio_iowrite16(base, par->ccr + CCR_BASEL);
tmio_iowrite16(0x0002, par->ccr + CCR_CMD); /* base address enable */
tmio_iowrite16(0x40a8, par->ccr + CCR_VRAMRTC); /* VRAMRC, VRAMTC */
tmio_iowrite16(0x0018, par->ccr + CCR_VRAMSAC); /* VRAMSTS, VRAMAC */
tmio_iowrite16(0x0002, par->ccr + CCR_VRAMBC);
msleep(2); /* wait for device to settle */
tmio_iowrite16(0x000b, par->ccr + CCR_VRAMBC);
base = vram->start + info->screen_size;
tmio_iowrite16(base >> 16, par->lcr + LCR_CFSAH);
tmio_iowrite16(base, par->lcr + LCR_CFSAL);
tmio_iowrite16(TMIOFB_FIFO_SIZE - 1, par->lcr + LCR_CFS);
tmio_iowrite16(1, par->lcr + LCR_CFC);
tmio_iowrite16(1, par->lcr + LCR_BBIE);
tmio_iowrite16(0, par->lcr + LCR_CFWS);
return 0;
}
/*
* Sets the LCD controller's output resolution and pixel clock
*/
static void tmiofb_hw_mode(struct platform_device *dev)
{
struct mfd_cell *cell = dev->dev.platform_data;
struct tmio_fb_data *data = cell->driver_data;
struct fb_info *info = platform_get_drvdata(dev);
struct fb_videomode *mode = info->mode;
struct tmiofb_par *par = info->par;
unsigned int i;
tmio_iowrite16(0, par->lcr + LCR_GM);
data->lcd_set_power(dev, 0);
tmio_iowrite16(0x0010, par->lcr + LCR_LCDCCRC);
data->lcd_mode(dev, mode);
data->lcd_set_power(dev, 1);
tmio_iowrite16(info->fix.line_length, par->lcr + LCR_VHPN);
tmio_iowrite16(0, par->lcr + LCR_GDSAH);
tmio_iowrite16(0, par->lcr + LCR_GDSAL);
tmio_iowrite16(info->fix.line_length >> 16, par->lcr + LCR_VHPCH);
tmio_iowrite16(info->fix.line_length, par->lcr + LCR_VHPCL);
tmio_iowrite16(i = 0, par->lcr + LCR_HSS);
tmio_iowrite16(i += mode->hsync_len, par->lcr + LCR_HSE);
tmio_iowrite16(i += mode->left_margin, par->lcr + LCR_HDS);
tmio_iowrite16(i += mode->xres + mode->right_margin, par->lcr + LCR_HT);
tmio_iowrite16(mode->xres, par->lcr + LCR_HNP);
tmio_iowrite16(i = 0, par->lcr + LCR_VSS);
tmio_iowrite16(i += mode->vsync_len, par->lcr + LCR_VSE);
tmio_iowrite16(i += mode->upper_margin, par->lcr + LCR_VDS);
tmio_iowrite16(i += mode->yres, par->lcr + LCR_ILN);
tmio_iowrite16(i += mode->lower_margin, par->lcr + LCR_VT);
tmio_iowrite16(3, par->lcr + LCR_MISC); /* RGB565 mode */
tmio_iowrite16(1, par->lcr + LCR_GM); /* VRAM enable */
tmio_iowrite16(0x4007, par->lcr + LCR_LCDCC);
tmio_iowrite16(3, par->lcr + LCR_SP); /* sync polarity */
tmio_iowrite16(0x0010, par->lcr + LCR_LCDCCRC);
msleep(5); /* wait for device to settle */
tmio_iowrite16(0x0014, par->lcr + LCR_LCDCCRC); /* STOP_CKP */
msleep(5); /* wait for device to settle */
tmio_iowrite16(0x0015, par->lcr + LCR_LCDCCRC); /* STOP_CKP|SOFT_RESET*/
tmio_iowrite16(0xfffa, par->lcr + LCR_VCS);
}
/*--------------------------------------------------------------------------*/
#ifdef CONFIG_FB_TMIO_ACCELL
static int __must_check
tmiofb_acc_wait(struct fb_info *info, unsigned int ccs)
{
struct tmiofb_par *par = info->par;
/*
* This code can be called whith interrupts disabled.
* So instead of relaying on irq to trigger the event,
* poll the state till the necessary command is executed.
*/
if (irqs_disabled() || par->use_polling) {
int i = 0;
while (tmio_ioread16(par->lcr + LCR_CCS) > ccs) {
udelay(1);
i++;
if (i > 10000) {
pr_err("tmiofb: timeout waiting for %d\n",
ccs);
return -ETIMEDOUT;
}
tmiofb_irq(-1, info);
}
} else {
if (!wait_event_interruptible_timeout(par->wait_acc,
tmio_ioread16(par->lcr + LCR_CCS) <= ccs,
1000)) {
pr_err("tmiofb: timeout waiting for %d\n", ccs);
return -ETIMEDOUT;
}
}
return 0;
}
/*
* Writes an accelerator command to the accelerator's FIFO.
*/
static int
tmiofb_acc_write(struct fb_info *info, const u32 *cmd, unsigned int count)
{
struct tmiofb_par *par = info->par;
int ret;
ret = tmiofb_acc_wait(info, TMIOFB_FIFO_SIZE - count);
if (ret)
return ret;
for (; count; count--, cmd++) {
tmio_iowrite16(*cmd >> 16, par->lcr + LCR_CMDH);
tmio_iowrite16(*cmd, par->lcr + LCR_CMDL);
}
return ret;
}
/*
* Wait for the accelerator to finish its operations before writing
* to the framebuffer for consistent display output.
*/
static int tmiofb_sync(struct fb_info *fbi)
{
struct tmiofb_par *par = fbi->par;
int ret;
int i = 0;
ret = tmiofb_acc_wait(fbi, 0);
while (tmio_ioread16(par->lcr + LCR_BBES) & 2) { /* blit active */
udelay(1);
i++ ;
if (i > 10000) {
printk(KERN_ERR "timeout waiting for blit to end!\n");
return -ETIMEDOUT;
}
}
return ret;
}
static void
tmiofb_fillrect(struct fb_info *fbi, const struct fb_fillrect *rect)
{
const u32 cmd[] = {
TMIOFB_ACC_DSADR((rect->dy * fbi->mode->xres + rect->dx) * 2),
TMIOFB_ACC_DHPIX(rect->width - 1),
TMIOFB_ACC_DVPIX(rect->height - 1),
TMIOFB_ACC_FILL(rect->color),
TMIOFB_ACC_FLGO,
};
if (fbi->state != FBINFO_STATE_RUNNING ||
fbi->flags & FBINFO_HWACCEL_DISABLED) {
cfb_fillrect(fbi, rect);
return;
}
tmiofb_acc_write(fbi, cmd, ARRAY_SIZE(cmd));
}
static void
tmiofb_copyarea(struct fb_info *fbi, const struct fb_copyarea *area)
{
const u32 cmd[] = {
TMIOFB_ACC_DSADR((area->dy * fbi->mode->xres + area->dx) * 2),
TMIOFB_ACC_DHPIX(area->width - 1),
TMIOFB_ACC_DVPIX(area->height - 1),
TMIOFB_ACC_SSADR((area->sy * fbi->mode->xres + area->sx) * 2),
TMIOFB_ACC_SCGO,
};
if (fbi->state != FBINFO_STATE_RUNNING ||
fbi->flags & FBINFO_HWACCEL_DISABLED) {
cfb_copyarea(fbi, area);
return;
}
tmiofb_acc_write(fbi, cmd, ARRAY_SIZE(cmd));
}
#endif
static void tmiofb_clearscreen(struct fb_info *info)
{
const struct fb_fillrect rect = {
.dx = 0,
.dy = 0,
.width = info->mode->xres,
.height = info->mode->yres,
.color = 0,
.rop = ROP_COPY,
};
info->fbops->fb_fillrect(info, &rect);
}
static int tmiofb_vblank(struct fb_info *fbi, struct fb_vblank *vblank)
{
struct tmiofb_par *par = fbi->par;
struct fb_videomode *mode = fbi->mode;
unsigned int vcount = tmio_ioread16(par->lcr + LCR_CDLN);
unsigned int vds = mode->vsync_len + mode->upper_margin;
vblank->vcount = vcount;
vblank->flags = FB_VBLANK_HAVE_VBLANK | FB_VBLANK_HAVE_VCOUNT
| FB_VBLANK_HAVE_VSYNC;
if (vcount < mode->vsync_len)
vblank->flags |= FB_VBLANK_VSYNCING;
if (vcount < vds || vcount > vds + mode->yres)
vblank->flags |= FB_VBLANK_VBLANKING;
return 0;
}
static int tmiofb_ioctl(struct fb_info *fbi,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case FBIOGET_VBLANK: {
struct fb_vblank vblank = {0};
void __user *argp = (void __user *) arg;
tmiofb_vblank(fbi, &vblank);
if (copy_to_user(argp, &vblank, sizeof vblank))
return -EFAULT;
return 0;
}
#ifdef CONFIG_FB_TMIO_ACCELL
case FBIO_TMIO_ACC_SYNC:
tmiofb_sync(fbi);
return 0;
case FBIO_TMIO_ACC_WRITE: {
u32 __user *argp = (void __user *) arg;
u32 len;
u32 acc[16];
if (get_user(len, argp))
return -EFAULT;
if (len > ARRAY_SIZE(acc))
return -EINVAL;
if (copy_from_user(acc, argp + 1, sizeof(u32) * len))
return -EFAULT;
return tmiofb_acc_write(fbi, acc, len);
}
#endif
}
return -ENOTTY;
}
/*--------------------------------------------------------------------------*/
/* Select the smallest mode that allows the desired resolution to be
* displayed. If desired, the x and y parameters can be rounded up to
* match the selected mode.
*/
static struct fb_videomode *
tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
{
struct mfd_cell *cell =
info->device->platform_data;
struct tmio_fb_data *data = cell->driver_data;
struct fb_videomode *best = NULL;
int i;
for (i = 0; i < data->num_modes; i++) {
struct fb_videomode *mode = data->modes + i;
if (mode->xres >= var->xres && mode->yres >= var->yres
&& (!best || (mode->xres < best->xres
&& mode->yres < best->yres)))
best = mode;
}
return best;
}
static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct fb_videomode *mode;
struct mfd_cell *cell =
info->device->platform_data;
struct tmio_fb_data *data = cell->driver_data;
mode = tmiofb_find_mode(info, var);
if (!mode || var->bits_per_pixel > 16)
return -EINVAL;
fb_videomode_to_var(var, mode);
var->xres_virtual = mode->xres;
var->yres_virtual = info->screen_size / (mode->xres * 2);
if (var->yres_virtual < var->yres)
return -EINVAL;
var->xoffset = 0;
var->yoffset = 0;
var->bits_per_pixel = 16;
var->grayscale = 0;
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
var->nonstd = 0;
var->height = data->height; /* mm */
var->width = data->width; /* mm */
var->rotate = 0;
return 0;
}
static int tmiofb_set_par(struct fb_info *info)
{
struct fb_var_screeninfo *var = &info->var;
struct fb_videomode *mode;
mode = tmiofb_find_mode(info, var);
if (!mode)
return -EINVAL;
info->mode = mode;
info->fix.line_length = info->mode->xres *
var->bits_per_pixel / 8;
tmiofb_hw_mode(to_platform_device(info->device));
tmiofb_clearscreen(info);
return 0;
}
static int tmiofb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct tmiofb_par *par = info->par;
if (regno < ARRAY_SIZE(par->pseudo_palette)) {
par->pseudo_palette[regno] =
((red & 0xf800)) |
((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
return 0;
}
return -EINVAL;
}
static int tmiofb_blank(int blank, struct fb_info *info)
{
/*
* everything is done in lcd/bl drivers.
* this is purely to make sysfs happy and work.
*/
return 0;
}
static struct fb_ops tmiofb_ops = {
.owner = THIS_MODULE,
.fb_ioctl = tmiofb_ioctl,
.fb_check_var = tmiofb_check_var,
.fb_set_par = tmiofb_set_par,
.fb_setcolreg = tmiofb_setcolreg,
.fb_blank = tmiofb_blank,
.fb_imageblit = cfb_imageblit,
#ifdef CONFIG_FB_TMIO_ACCELL
.fb_sync = tmiofb_sync,
.fb_fillrect = tmiofb_fillrect,
.fb_copyarea = tmiofb_copyarea,
#else
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
#endif
};
/*--------------------------------------------------------------------------*/
static int __devinit tmiofb_probe(struct platform_device *dev)
{
struct mfd_cell *cell = dev->dev.platform_data;
struct tmio_fb_data *data = cell->driver_data;
struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
int irq = platform_get_irq(dev, 0);
struct fb_info *info;
struct tmiofb_par *par;
int retval;
/*
* This is the only way ATM to disable the fb
*/
if (data == NULL) {
dev_err(&dev->dev, "NULL platform data!\n");
return -EINVAL;
}
info = framebuffer_alloc(sizeof(struct tmiofb_par), &dev->dev);
if (!info)
return -ENOMEM;
par = info->par;
#ifdef CONFIG_FB_TMIO_ACCELL
init_waitqueue_head(&par->wait_acc);
par->use_polling = true;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_FILLRECT;
#else
info->flags = FBINFO_DEFAULT;
#endif
info->fbops = &tmiofb_ops;
strcpy(info->fix.id, "tmio-fb");
info->fix.smem_start = vram->start;
info->fix.smem_len = resource_size(vram);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.mmio_start = lcr->start;
info->fix.mmio_len = resource_size(lcr);
info->fix.accel = FB_ACCEL_NONE;
info->screen_size = info->fix.smem_len - (4 * TMIOFB_FIFO_SIZE);
info->pseudo_palette = par->pseudo_palette;
par->ccr = ioremap(ccr->start, resource_size(ccr));
if (!par->ccr) {
retval = -ENOMEM;
goto err_ioremap_ccr;
}
par->lcr = ioremap(info->fix.mmio_start, info->fix.mmio_len);
if (!par->lcr) {
retval = -ENOMEM;
goto err_ioremap_lcr;
}
info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
if (!info->screen_base) {
retval = -ENOMEM;
goto err_ioremap_vram;
}
retval = request_irq(irq, &tmiofb_irq, IRQF_DISABLED,
dev_name(&dev->dev), info);
if (retval)
goto err_request_irq;
platform_set_drvdata(dev, info);
retval = fb_find_mode(&info->var, info, mode_option,
data->modes, data->num_modes,
data->modes, 16);
if (!retval) {
retval = -EINVAL;
goto err_find_mode;
}
if (cell->enable) {
retval = cell->enable(dev);
if (retval)
goto err_enable;
}
retval = tmiofb_hw_init(dev);
if (retval)
goto err_hw_init;
fb_videomode_to_modelist(data->modes, data->num_modes,
&info->modelist);
retval = register_framebuffer(info);
if (retval < 0)
goto err_register_framebuffer;
printk(KERN_INFO "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
return 0;
err_register_framebuffer:
/*err_set_par:*/
tmiofb_hw_stop(dev);
err_hw_init:
if (cell->disable)
cell->disable(dev);
err_enable:
err_find_mode:
platform_set_drvdata(dev, NULL);
free_irq(irq, info);
err_request_irq:
iounmap(info->screen_base);
err_ioremap_vram:
iounmap(par->lcr);
err_ioremap_lcr:
iounmap(par->ccr);
err_ioremap_ccr:
framebuffer_release(info);
return retval;
}
static int __devexit tmiofb_remove(struct platform_device *dev)
{
struct mfd_cell *cell = dev->dev.platform_data;
struct fb_info *info = platform_get_drvdata(dev);
int irq = platform_get_irq(dev, 0);
struct tmiofb_par *par;
if (info) {
par = info->par;
unregister_framebuffer(info);
tmiofb_hw_stop(dev);
if (cell->disable)
cell->disable(dev);
platform_set_drvdata(dev, NULL);
free_irq(irq, info);
iounmap(info->screen_base);
iounmap(par->lcr);
iounmap(par->ccr);
framebuffer_release(info);
}
return 0;
}
#ifdef DEBUG
static void tmiofb_dump_regs(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
struct tmiofb_par *par = info->par;
printk(KERN_DEBUG "lhccr:\n");
#define CCR_PR(n) printk(KERN_DEBUG "\t" #n " = \t%04x\n",\
tmio_ioread16(par->ccr + CCR_ ## n));
CCR_PR(CMD);
CCR_PR(REVID);
CCR_PR(BASEL);
CCR_PR(BASEH);
CCR_PR(UGCC);
CCR_PR(GCC);
CCR_PR(USC);
CCR_PR(VRAMRTC);
CCR_PR(VRAMSAC);
CCR_PR(VRAMBC);
#undef CCR_PR
printk(KERN_DEBUG "lcr: \n");
#define LCR_PR(n) printk(KERN_DEBUG "\t" #n " = \t%04x\n",\
tmio_ioread16(par->lcr + LCR_ ## n));
LCR_PR(UIS);
LCR_PR(VHPN);
LCR_PR(CFSAL);
LCR_PR(CFSAH);
LCR_PR(CFS);
LCR_PR(CFWS);
LCR_PR(BBIE);
LCR_PR(BBISC);
LCR_PR(CCS);
LCR_PR(BBES);
LCR_PR(CMDL);
LCR_PR(CMDH);
LCR_PR(CFC);
LCR_PR(CCIFC);
LCR_PR(HWT);
LCR_PR(LCDCCRC);
LCR_PR(LCDCC);
LCR_PR(LCDCOPC);
LCR_PR(LCDIS);
LCR_PR(LCDIM);
LCR_PR(LCDIE);
LCR_PR(GDSAL);
LCR_PR(GDSAH);
LCR_PR(VHPCL);
LCR_PR(VHPCH);
LCR_PR(GM);
LCR_PR(HT);
LCR_PR(HDS);
LCR_PR(HSS);
LCR_PR(HSE);
LCR_PR(HNP);
LCR_PR(VT);
LCR_PR(VDS);
LCR_PR(VSS);
LCR_PR(VSE);
LCR_PR(CDLN);
LCR_PR(ILN);
LCR_PR(SP);
LCR_PR(MISC);
LCR_PR(VIHSS);
LCR_PR(VIVS);
LCR_PR(VIVE);
LCR_PR(VIVSS);
LCR_PR(VCCIS);
LCR_PR(VIDWSAL);
LCR_PR(VIDWSAH);
LCR_PR(VIDRSAL);
LCR_PR(VIDRSAH);
LCR_PR(VIPDDST);
LCR_PR(VIPDDET);
LCR_PR(VIE);
LCR_PR(VCS);
LCR_PR(VPHWC);
LCR_PR(VPHS);
LCR_PR(VPVWC);
LCR_PR(VPVS);
LCR_PR(PLHPIX);
LCR_PR(XS);
LCR_PR(XCKHW);
LCR_PR(STHS);
LCR_PR(VT2);
LCR_PR(YCKSW);
LCR_PR(YSTS);
LCR_PR(PPOLS);
LCR_PR(PRECW);
LCR_PR(VCLKHW);
LCR_PR(OC);
#undef LCR_PR
}
#endif
#ifdef CONFIG_PM
static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
{
struct fb_info *info = platform_get_drvdata(dev);
#ifdef CONFIG_FB_TMIO_ACCELL
struct tmiofb_par *par = info->par;
#endif
struct mfd_cell *cell = dev->dev.platform_data;
int retval = 0;
acquire_console_sem();
fb_set_suspend(info, 1);
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
#ifdef CONFIG_FB_TMIO_ACCELL
/*
* The fb should be usable even if interrupts are disabled (and they are
* during suspend/resume). Switch temporary to forced polling.
*/
printk(KERN_INFO "tmiofb: switching to polling\n");
par->use_polling = true;
#endif
tmiofb_hw_stop(dev);
if (cell->suspend)
retval = cell->suspend(dev);
release_console_sem();
return retval;
}
static int tmiofb_resume(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
struct mfd_cell *cell = dev->dev.platform_data;
int retval = 0;
acquire_console_sem();
if (cell->resume) {
retval = cell->resume(dev);
if (retval)
goto out;
}
tmiofb_irq(-1, info);
tmiofb_hw_init(dev);
tmiofb_hw_mode(dev);
fb_set_suspend(info, 0);
out:
release_console_sem();
return retval;
}
#else
#define tmiofb_suspend NULL
#define tmiofb_resume NULL
#endif
static struct platform_driver tmiofb_driver = {
.driver.name = "tmio-fb",
.driver.owner = THIS_MODULE,
.probe = tmiofb_probe,
.remove = __devexit_p(tmiofb_remove),
.suspend = tmiofb_suspend,
.resume = tmiofb_resume,
};
/*--------------------------------------------------------------------------*/
#ifndef MODULE
static void __init tmiofb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
/*
* FIXME
*/
}
}
#endif
static int __init tmiofb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("tmiofb", &option))
return -ENODEV;
tmiofb_setup(option);
#endif
return platform_driver_register(&tmiofb_driver);
}
static void __exit tmiofb_cleanup(void)
{
platform_driver_unregister(&tmiofb_driver);
}
module_init(tmiofb_init);
module_exit(tmiofb_cleanup);
MODULE_DESCRIPTION("TMIO framebuffer driver");
MODULE_AUTHOR("Chris Humbert, Dirk Opfer, Dmitry Baryshkov");
MODULE_LICENSE("GPL");
| gpl-2.0 |
AmeriCanAndroid/aca-evo3d-omega-htc-35 | drivers/staging/otus/80211core/performance.c | 1505 | 13305 | /*
* Copyright (c) 2007-2008 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* */
/* Module Name : performance.c */
/* */
/* Abstract */
/* This module performance evaluation functions. */
/* */
/* NOTES */
/* None */
/* */
/************************************************************************/
#include "cprecomp.h"
#ifdef ZM_ENABLE_PERFORMANCE_EVALUATION
#define ZM_TP_SIZE 50
static struct zsSummary zm_summary;
static struct zsVariation zm_var;
static struct zsThroughput zm_tp;
void zfiPerformanceInit(zdev_t* dev)
{
u16_t i;
zmw_get_wlan_dev(dev);
zm_summary.tick_base = wd->tick;
zm_summary.tx_msdu_count = 0;
zm_summary.tx_mpdu_count = 0;
zm_summary.rx_msdu_count = 0;
zm_summary.rx_mpdu_count = 0;
zm_summary.rx_broken_seq = 0;
zm_summary.rx_broken_sum = 0;
zm_summary.rx_seq_base = 0;
zm_summary.rx_broken_seq_dis = 0;
zm_summary.rx_duplicate_seq = 0;
zm_summary.rx_old_seq = 0;
zm_summary.reset_count = 0;
zm_summary.reset_sum = 0;
zm_summary.rx_lost_sum = 0;
zm_summary.rx_duplicate_error = 0;
zm_summary.rx_free = 0;
zm_summary.rx_amsdu_len = 0;
zm_summary.rx_flush = 0;
zm_summary.rx_clear = 0;
zm_summary.rx_reorder = 0;
for (i=0; i<100; i++)
{
zm_var.tx_msdu_tick[i] = zm_var.tx_mpdu_tick[i] = 0;
zm_var.rx_msdu_tick[i] = zm_var.rx_mpdu_tick[i] = 0;
}
zfTimerSchedule(dev, ZM_EVENT_TIMEOUT_PERFORMANCE, 100);
zm_tp.size = ZM_TP_SIZE;
zm_tp.head = zm_tp.size - 1;
zm_tp.tail = 0;
for (i=0; i<zm_tp.size; i++)
{
zm_tp.tx[i]=0;
zm_tp.rx[i]=0;
}
}
void zfiPerformanceGraph(zdev_t* dev)
{
s16_t i,j;
u8_t s[ZM_TP_SIZE+5];
zmw_get_wlan_dev(dev);
for (i=0; i<(zm_tp.size-1); i++)
{
zm_tp.tx[i] = zm_tp.tx[i+1];
zm_tp.rx[i] = zm_tp.rx[i+1];
}
zm_tp.tx[zm_tp.size-1] = zm_summary.tx_mpdu_count*1500*8/1000000;
zm_tp.rx[zm_tp.size-1] = zm_summary.rx_msdu_count*1500*8/1000000;
for (i=15; i>0; i--)
{
s[0] = (i/10) + '0';
s[1] = (i%10) + '0';
s[2] = '0';
s[3] = '|';
for (j=0; j<zm_tp.size; j++)
{
if ((zm_tp.tx[j]/10 == i) && (zm_tp.rx[j]/10 == i))
{
s[4+j] = 'X';
}
else if (zm_tp.tx[j]/10 == i)
{
s[4+j] = 'T';
}
else if (zm_tp.rx[j]/10 == i)
{
s[4+j] = 'R';
}
else
{
s[4+j] = ' ';
}
}
s[zm_tp.size+4] = '\0';
DbgPrint("%s",s);
}
DbgPrint("000|__________________________________________________");
}
void zfiPerformanceRefresh(zdev_t* dev)
{
u16_t i;
zmw_get_wlan_dev(dev);
zfiDbgReadReg(dev, 0x11772c);
zm_var.tx_msdu_mean = zm_summary.tx_msdu_count / 100;
zm_var.tx_mpdu_mean = zm_summary.tx_mpdu_count / 100;
zm_var.rx_msdu_mean = zm_summary.rx_msdu_count / 100;
zm_var.rx_mpdu_mean = zm_summary.rx_mpdu_count / 100;
zm_var.tx_msdu_sum = zm_var.tx_mpdu_sum = 0;
zm_var.rx_msdu_sum = zm_var.rx_mpdu_sum = 0;
zm_summary.tx_idle_count = zm_summary.rx_idle_count = 0;
for (i=0; i<100; i++)
{
zm_var.tx_msdu_sum += (zm_var.tx_msdu_tick[i] * zm_var.tx_msdu_tick[i]);
zm_var.tx_mpdu_sum += (zm_var.tx_mpdu_tick[i] * zm_var.tx_mpdu_tick[i]);
zm_var.rx_msdu_sum += (zm_var.rx_msdu_tick[i] * zm_var.rx_msdu_tick[i]);
zm_var.rx_mpdu_sum += (zm_var.rx_mpdu_tick[i] * zm_var.rx_mpdu_tick[i]);
if (!zm_var.tx_mpdu_tick[i]) zm_summary.tx_idle_count++;
if (!zm_var.rx_mpdu_tick[i]) zm_summary.rx_idle_count++;
}
zm_var.tx_msdu_var = (zm_var.tx_msdu_sum / 100) - (zm_var.tx_msdu_mean * zm_var.tx_msdu_mean);
zm_var.tx_mpdu_var = (zm_var.tx_mpdu_sum / 100) - (zm_var.tx_mpdu_mean * zm_var.tx_mpdu_mean);
zm_var.rx_msdu_var = (zm_var.rx_msdu_sum / 100) - (zm_var.rx_msdu_mean * zm_var.rx_msdu_mean);
zm_var.rx_mpdu_var = (zm_var.rx_mpdu_sum / 100) - (zm_var.rx_mpdu_mean * zm_var.rx_mpdu_mean);
zm_summary.tick_base = wd->tick;
zm_summary.rx_broken_sum += zm_summary.rx_broken_seq;
zm_summary.rx_lost_sum += (zm_summary.rx_broken_seq - zm_summary.rx_duplicate_seq - zm_summary.rx_old_seq);
zfiPerformanceGraph(dev);
DbgPrint("******************************************************\n");
DbgPrint("* TX: MSDU=%5d, VAR=%5d; MPDU=%5d, VAR=%5d\n", zm_summary.tx_msdu_count,
zm_var.tx_msdu_var, zm_summary.tx_mpdu_count, zm_var.tx_mpdu_var);
DbgPrint("* TX: idle=%5d,TxRate=%3d, PER=%5d\n", zm_summary.tx_idle_count,
wd->CurrentTxRateKbps/1000,
(u16_t)wd->PER[wd->sta.oppositeInfo[0].rcCell.currentRate]);
DbgPrint("* RX: MSDU=%5d, VAR=%5d; MPDU=%5d, VAR=%5d\n", zm_summary.rx_msdu_count,
zm_var.rx_msdu_var, zm_summary.rx_mpdu_count, zm_var.rx_mpdu_var);
DbgPrint("* RX: idle=%5d,RxRate=%3d,AMSDU=%5d\n", zm_summary.rx_idle_count,
wd->CurrentRxRateKbps/1000, zm_summary.rx_amsdu_len);
DbgPrint("* RX broken seq=%4d, distances=%4d, duplicates=%4d\n", zm_summary.rx_broken_seq,
zm_summary.rx_broken_seq_dis, zm_summary.rx_duplicate_seq);
DbgPrint("* RX old seq=%4d, lost=%4d, broken sum=%4d\n", zm_summary.rx_old_seq,
(zm_summary.rx_broken_seq - zm_summary.rx_duplicate_seq - zm_summary.rx_old_seq),
zm_summary.rx_broken_sum);
DbgPrint("* Rx lost sum=%4d,dup. error=%4d, free count=%4d\n", zm_summary.rx_lost_sum,
zm_summary.rx_duplicate_error, zm_summary.rx_free);
DbgPrint("* Rx flush sum=%4d, clear sum=%4d, reorder=%7d\n", zm_summary.rx_flush,
zm_summary.rx_clear, zm_summary.rx_reorder);
DbgPrint("* Firmware reset=%3d, reset sum=%4d\n", zm_summary.reset_count,
zm_summary.reset_sum);
DbgPrint("******************************************************\n\n");
//reset count 11772c
zm_summary.tx_msdu_count = 0;
zm_summary.tx_mpdu_count = 0;
zm_summary.rx_msdu_count = 0;
zm_summary.rx_mpdu_count = 0;
zm_summary.rx_broken_seq = 0;
zm_summary.rx_broken_seq_dis = 0;
zm_summary.rx_duplicate_seq = 0;
zm_summary.rx_old_seq = 0;
zm_summary.reset_count = 0;
zm_summary.rx_amsdu_len = 0;
for (i=0; i<100; i++)
{
zm_var.tx_msdu_tick[i] = zm_var.tx_mpdu_tick[i] = 0;
zm_var.rx_msdu_tick[i] = zm_var.rx_mpdu_tick[i] = 0;
}
zfTimerSchedule(dev, ZM_EVENT_TIMEOUT_PERFORMANCE, 100);
}
void zfiTxPerformanceMSDU(zdev_t* dev, u32_t tick)
{
u32_t index;
zm_summary.tx_msdu_count++;
index = tick - zm_summary.tick_base;
if (index < 100)
{
zm_var.tx_msdu_tick[index]++;
}
else
{
//DbgPrint("wd->tick exceeded tick_base+100!\n");
}
}
void zfiRxPerformanceMSDU(zdev_t* dev, u32_t tick)
{
u32_t index;
zm_summary.rx_msdu_count++;
index = tick - zm_summary.tick_base;
if (index < 100)
{
zm_var.rx_msdu_tick[index]++;
}
else
{
//DbgPrint("wd->tick exceeded tick_base+100!\n");
}
}
void zfiTxPerformanceMPDU(zdev_t* dev, u32_t tick)
{
u32_t index;
zm_summary.tx_mpdu_count++;
index = tick - zm_summary.tick_base;
if (index < 100)
{
zm_var.tx_mpdu_tick[index]++;
}
else
{
//DbgPrint("wd->tick exceeded tick_base+100!\n");
}
}
#ifndef ZM_INT_USE_EP2_HEADER_SIZE
#define ZM_INT_USE_EP2_HEADER_SIZE 12
#endif
void zfiRxPerformanceMPDU(zdev_t* dev, zbuf_t* buf)
{
u32_t index;
u16_t frameType;
u16_t frameCtrl;
u8_t mpduInd;
u16_t plcpHdrLen;
u16_t len;
zmw_get_wlan_dev(dev);
len = zfwBufGetSize(dev, buf);
mpduInd = zmw_rx_buf_readb(dev, buf, len-1);
/* First MPDU or Single MPDU */
if(((mpduInd & 0x30) == 0x00) || ((mpduInd & 0x30) == 0x20))
//if ((mpduInd & 0x10) == 0x00)
{
plcpHdrLen = 12; // PLCP header length
}
else
{
if (zmw_rx_buf_readh(dev, buf, 4) == wd->macAddr[0] &&
zmw_rx_buf_readh(dev, buf, 6) == wd->macAddr[1] &&
zmw_rx_buf_readh(dev, buf, 8) == wd->macAddr[2]) {
plcpHdrLen = 0;
}
else if (zmw_rx_buf_readh(dev, buf, 16) == wd->macAddr[0] &&
zmw_rx_buf_readh(dev, buf, 18) == wd->macAddr[1] &&
zmw_rx_buf_readh(dev, buf, 20) == wd->macAddr[2]){
plcpHdrLen = 12;
}
else {
plcpHdrLen = 0;
}
}
frameCtrl = zmw_rx_buf_readb(dev, buf, plcpHdrLen + 0);
frameType = frameCtrl & 0xf;
if (frameType != ZM_WLAN_DATA_FRAME)
{
return;
}
zm_summary.rx_mpdu_count++;
index = wd->tick - zm_summary.tick_base;
if (index < 100)
{
zm_var.rx_mpdu_tick[index]++;
}
else
{
//DbgPrint("wd->tick exceeded tick_base+100!\n");
}
}
void zfiRxPerformanceSeq(zdev_t* dev, zbuf_t* buf)
{
u16_t seq_no;
u16_t offset = 0;
u16_t old_dis = zm_summary.rx_broken_seq_dis;
//sys_time = KeQueryPerformanceCounter(&freq);
seq_no = zmw_rx_buf_readh(dev, buf, offset+22) >> 4;
ZM_SEQ_DEBUG("Out %5d\n", seq_no);
if (seq_no < zm_summary.rx_seq_base)
{
if (seq_no == 0)
{
if (zm_summary.rx_seq_base != 4095)
{
zm_summary.rx_broken_seq++;
ZM_SEQ_DEBUG("Broken seq");
zm_summary.rx_broken_seq_dis+=(4096 - zm_summary.rx_seq_base);
}
}
else if ((seq_no < 300) && (zm_summary.rx_seq_base > 3800))
{
zm_summary.rx_broken_seq++;
ZM_SEQ_DEBUG("Broken seq");
zm_summary.rx_broken_seq_dis+=(4096 - zm_summary.rx_seq_base + seq_no);
}
else
{
zm_summary.rx_broken_seq++;
ZM_SEQ_DEBUG("Broken seq");
zm_summary.rx_broken_seq_dis+=(zm_summary.rx_seq_base - seq_no);
zm_summary.rx_old_seq++;
}
}
else
{
if (seq_no != (zm_summary.rx_seq_base + 1))
{
if ((seq_no > 3800) && (zm_summary.rx_seq_base < 300))
{
zm_summary.rx_broken_seq++;
ZM_SEQ_DEBUG("Broken seq");
zm_summary.rx_broken_seq_dis+=(4096 - seq_no + zm_summary.rx_seq_base);
zm_summary.rx_old_seq++;
}
else
{
zm_summary.rx_broken_seq++;
ZM_SEQ_DEBUG("Broken seq");
zm_summary.rx_broken_seq_dis+=(seq_no - zm_summary.rx_seq_base);
}
}
}
if (seq_no == zm_summary.rx_seq_base)
{
zm_summary.rx_duplicate_seq++;
}
if ((zm_summary.rx_broken_seq_dis - old_dis) > 100)
{
DbgPrint("* seq_no=%4d, base_seq=%4d, dis_diff=%4d", seq_no,
zm_summary.rx_seq_base, zm_summary.rx_broken_seq_dis - old_dis);
}
zm_summary.rx_seq_base = seq_no;
}
void zfiRxPerformanceReg(zdev_t* dev, u32_t reg, u32_t rsp)
{
zm_summary.reset_count = (u16_t)rsp - zm_summary.reset_sum;
zm_summary.reset_sum = (u16_t)rsp;
}
void zfiRxPerformanceDup(zdev_t* dev, zbuf_t* buf1, zbuf_t* buf2)
{
u16_t seq_no1, seq_no2;
seq_no1 = zmw_rx_buf_readh(dev, buf1, 22) >> 4;
seq_no2 = zmw_rx_buf_readh(dev, buf2, 22) >> 4;
if (seq_no1 != seq_no2)
{
zm_summary.rx_duplicate_error++;
}
}
void zfiRxPerformanceFree(zdev_t* dev, zbuf_t* buf)
{
zm_summary.rx_free++;
}
void zfiRxPerformanceAMSDU(zdev_t* dev, zbuf_t* buf, u16_t len)
{
if (zm_summary.rx_amsdu_len < len)
{
zm_summary.rx_amsdu_len = len;
}
}
void zfiRxPerformanceFlush(zdev_t* dev)
{
zm_summary.rx_flush++;
}
void zfiRxPerformanceClear(zdev_t* dev)
{
zm_summary.rx_clear++;
ZM_SEQ_DEBUG("RxClear");
}
void zfiRxPerformanceReorder(zdev_t* dev)
{
zm_summary.rx_reorder++;
}
#endif /* end of ZM_ENABLE_PERFORMANCE_EVALUATION */
| gpl-2.0 |
kirilllavrov/android_kernel_huawei_s10101l | arch/arm/mach-omap1/time.c | 2273 | 8347 | /*
* linux/arch/arm/mach-omap1/time.c
*
* OMAP Timers
*
* Copyright (C) 2004 Nokia Corporation
* Partial timer rewrite and additional dynamic tick timer support by
* Tony Lindgen <tony@atomide.com> and
* Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
*
* MPU timer code based on the older MPU timer code for OMAP
* Copyright (C) 2000 RidgeRun, Inc.
* Author: Greg Lonnon <glonnon@ridgerun.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <asm/system.h>
#include <mach/hardware.h>
#include <asm/leds.h>
#include <asm/irq.h>
#include <asm/sched_clock.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <plat/common.h>
#ifdef CONFIG_OMAP_MPU_TIMER
#define OMAP_MPU_TIMER_BASE OMAP_MPU_TIMER1_BASE
#define OMAP_MPU_TIMER_OFFSET 0x100
typedef struct {
u32 cntl; /* CNTL_TIMER, R/W */
u32 load_tim; /* LOAD_TIM, W */
u32 read_tim; /* READ_TIM, R */
} omap_mpu_timer_regs_t;
#define omap_mpu_timer_base(n) \
((omap_mpu_timer_regs_t __iomem *)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE + \
(n)*OMAP_MPU_TIMER_OFFSET))
static inline unsigned long notrace omap_mpu_timer_read(int nr)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
return readl(&timer->read_tim);
}
static inline void omap_mpu_set_autoreset(int nr)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
writel(readl(&timer->cntl) | MPU_TIMER_AR, &timer->cntl);
}
static inline void omap_mpu_remove_autoreset(int nr)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
writel(readl(&timer->cntl) & ~MPU_TIMER_AR, &timer->cntl);
}
static inline void omap_mpu_timer_start(int nr, unsigned long load_val,
int autoreset)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
unsigned int timerflags = MPU_TIMER_CLOCK_ENABLE | MPU_TIMER_ST;
if (autoreset)
timerflags |= MPU_TIMER_AR;
writel(MPU_TIMER_CLOCK_ENABLE, &timer->cntl);
udelay(1);
writel(load_val, &timer->load_tim);
udelay(1);
writel(timerflags, &timer->cntl);
}
static inline void omap_mpu_timer_stop(int nr)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(nr);
writel(readl(&timer->cntl) & ~MPU_TIMER_ST, &timer->cntl);
}
/*
* ---------------------------------------------------------------------------
* MPU timer 1 ... count down to zero, interrupt, reload
* ---------------------------------------------------------------------------
*/
static int omap_mpu_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
omap_mpu_timer_start(0, cycles, 0);
return 0;
}
static void omap_mpu_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
omap_mpu_set_autoreset(0);
break;
case CLOCK_EVT_MODE_ONESHOT:
omap_mpu_timer_stop(0);
omap_mpu_remove_autoreset(0);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
case CLOCK_EVT_MODE_RESUME:
break;
}
}
static struct clock_event_device clockevent_mpu_timer1 = {
.name = "mpu_timer1",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.shift = 32,
.set_next_event = omap_mpu_set_next_event,
.set_mode = omap_mpu_set_mode,
};
static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &clockevent_mpu_timer1;
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction omap_mpu_timer1_irq = {
.name = "mpu_timer1",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = omap_mpu_timer1_interrupt,
};
static __init void omap_init_mpu_timer(unsigned long rate)
{
setup_irq(INT_TIMER1, &omap_mpu_timer1_irq);
omap_mpu_timer_start(0, (rate / HZ) - 1, 1);
clockevent_mpu_timer1.mult = div_sc(rate, NSEC_PER_SEC,
clockevent_mpu_timer1.shift);
clockevent_mpu_timer1.max_delta_ns =
clockevent_delta2ns(-1, &clockevent_mpu_timer1);
clockevent_mpu_timer1.min_delta_ns =
clockevent_delta2ns(1, &clockevent_mpu_timer1);
clockevent_mpu_timer1.cpumask = cpumask_of(0);
clockevents_register_device(&clockevent_mpu_timer1);
}
/*
* ---------------------------------------------------------------------------
* MPU timer 2 ... free running 32-bit clock source and scheduler clock
* ---------------------------------------------------------------------------
*/
static DEFINE_CLOCK_DATA(cd);
static inline unsigned long long notrace _omap_mpu_sched_clock(void)
{
u32 cyc = ~omap_mpu_timer_read(1);
return cyc_to_sched_clock(&cd, cyc, (u32)~0);
}
#ifndef CONFIG_OMAP_32K_TIMER
unsigned long long notrace sched_clock(void)
{
return _omap_mpu_sched_clock();
}
#else
static unsigned long long notrace omap_mpu_sched_clock(void)
{
return _omap_mpu_sched_clock();
}
#endif
static void notrace mpu_update_sched_clock(void)
{
u32 cyc = ~omap_mpu_timer_read(1);
update_sched_clock(&cd, cyc, (u32)~0);
}
static void __init omap_init_clocksource(unsigned long rate)
{
omap_mpu_timer_regs_t __iomem *timer = omap_mpu_timer_base(1);
static char err[] __initdata = KERN_ERR
"%s: can't register clocksource!\n";
omap_mpu_timer_start(1, ~0, 1);
init_sched_clock(&cd, mpu_update_sched_clock, 32, rate);
if (clocksource_mmio_init(&timer->read_tim, "mpu_timer2", rate,
300, 32, clocksource_mmio_readl_down))
printk(err, "mpu_timer2");
}
static void __init omap_mpu_timer_init(void)
{
struct clk *ck_ref = clk_get(NULL, "ck_ref");
unsigned long rate;
BUG_ON(IS_ERR(ck_ref));
rate = clk_get_rate(ck_ref);
clk_put(ck_ref);
/* PTV = 0 */
rate /= 2;
omap_init_mpu_timer(rate);
omap_init_clocksource(rate);
}
#else
static inline void omap_mpu_timer_init(void)
{
pr_err("Bogus timer, should not happen\n");
}
#endif /* CONFIG_OMAP_MPU_TIMER */
#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER)
static unsigned long long (*preferred_sched_clock)(void);
unsigned long long notrace sched_clock(void)
{
if (!preferred_sched_clock)
return 0;
return preferred_sched_clock();
}
static inline void preferred_sched_clock_init(bool use_32k_sched_clock)
{
if (use_32k_sched_clock)
preferred_sched_clock = omap_32k_sched_clock;
else
preferred_sched_clock = omap_mpu_sched_clock;
}
#else
static inline void preferred_sched_clock_init(bool use_32k_sched_clcok)
{
}
#endif
static inline int omap_32k_timer_usable(void)
{
int res = false;
if (cpu_is_omap730() || cpu_is_omap15xx())
return res;
#ifdef CONFIG_OMAP_32K_TIMER
res = omap_32k_timer_init();
#endif
return res;
}
/*
* ---------------------------------------------------------------------------
* Timer initialization
* ---------------------------------------------------------------------------
*/
static void __init omap_timer_init(void)
{
if (omap_32k_timer_usable()) {
preferred_sched_clock_init(1);
} else {
omap_mpu_timer_init();
preferred_sched_clock_init(0);
}
}
struct sys_timer omap_timer = {
.init = omap_timer_init,
};
| gpl-2.0 |
cooks8/android_kernel_samsung_smdk4x12 | fs/notify/fanotify/fanotify_user.c | 2529 | 21954 | #include <linux/fanotify.h>
#include <linux/fcntl.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/anon_inodes.h>
#include <linux/fsnotify_backend.h>
#include <linux/init.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/poll.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#define FANOTIFY_DEFAULT_MAX_EVENTS 16384
#define FANOTIFY_DEFAULT_MAX_MARKS 8192
#define FANOTIFY_DEFAULT_MAX_LISTENERS 128
extern const struct fsnotify_ops fanotify_fsnotify_ops;
static struct kmem_cache *fanotify_mark_cache __read_mostly;
static struct kmem_cache *fanotify_response_event_cache __read_mostly;
struct fanotify_response_event {
struct list_head list;
__s32 fd;
struct fsnotify_event *event;
};
/*
* Get an fsnotify notification event if one exists and is small
* enough to fit in "count". Return an error pointer if the count
* is not large enough.
*
* Called with the group->notification_mutex held.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
BUG_ON(!mutex_is_locked(&group->notification_mutex));
pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
if (fsnotify_notify_queue_is_empty(group))
return NULL;
if (FAN_EVENT_METADATA_LEN > count)
return ERR_PTR(-EINVAL);
/* held the notification_mutex the whole time, so this is the
* same event we peeked above */
return fsnotify_remove_notify_event(group);
}
static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
{
int client_fd;
struct dentry *dentry;
struct vfsmount *mnt;
struct file *new_file;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
client_fd = get_unused_fd();
if (client_fd < 0)
return client_fd;
if (event->data_type != FSNOTIFY_EVENT_PATH) {
WARN_ON(1);
put_unused_fd(client_fd);
return -EINVAL;
}
/*
* we need a new file handle for the userspace program so it can read even if it was
* originally opened O_WRONLY.
*/
dentry = dget(event->path.dentry);
mnt = mntget(event->path.mnt);
/* it's possible this event was an overflow event. in that case dentry and mnt
* are NULL; That's fine, just don't call dentry open */
if (dentry && mnt)
new_file = dentry_open(dentry, mnt,
group->fanotify_data.f_flags | FMODE_NONOTIFY,
current_cred());
else
new_file = ERR_PTR(-EOVERFLOW);
if (IS_ERR(new_file)) {
/*
* we still send an event even if we can't open the file. this
* can happen when say tasks are gone and we try to open their
* /proc files or we try to open a WRONLY file like in sysfs
* we just send the errno to userspace since there isn't much
* else we can do.
*/
put_unused_fd(client_fd);
client_fd = PTR_ERR(new_file);
} else {
fd_install(client_fd, new_file);
}
return client_fd;
}
static int fill_event_metadata(struct fsnotify_group *group,
struct fanotify_event_metadata *metadata,
struct fsnotify_event *event)
{
int ret = 0;
pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
group, metadata, event);
metadata->event_len = FAN_EVENT_METADATA_LEN;
metadata->metadata_len = FAN_EVENT_METADATA_LEN;
metadata->vers = FANOTIFY_METADATA_VERSION;
metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
metadata->pid = pid_vnr(event->tgid);
if (unlikely(event->mask & FAN_Q_OVERFLOW))
metadata->fd = FAN_NOFD;
else {
metadata->fd = create_fd(group, event);
if (metadata->fd < 0)
ret = metadata->fd;
}
return ret;
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
__s32 fd)
{
struct fanotify_response_event *re, *return_re = NULL;
mutex_lock(&group->fanotify_data.access_mutex);
list_for_each_entry(re, &group->fanotify_data.access_list, list) {
if (re->fd != fd)
continue;
list_del_init(&re->list);
return_re = re;
break;
}
mutex_unlock(&group->fanotify_data.access_mutex);
pr_debug("%s: found return_re=%p\n", __func__, return_re);
return return_re;
}
static int process_access_response(struct fsnotify_group *group,
struct fanotify_response *response_struct)
{
struct fanotify_response_event *re;
__s32 fd = response_struct->fd;
__u32 response = response_struct->response;
pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
fd, response);
/*
* make sure the response is valid, if invalid we do nothing and either
* userspace can send a valid response or we will clean it up after the
* timeout
*/
switch (response) {
case FAN_ALLOW:
case FAN_DENY:
break;
default:
return -EINVAL;
}
if (fd < 0)
return -EINVAL;
re = dequeue_re(group, fd);
if (!re)
return -ENOENT;
re->event->response = response;
wake_up(&group->fanotify_data.access_waitq);
kmem_cache_free(fanotify_response_event_cache, re);
return 0;
}
static int prepare_for_access_response(struct fsnotify_group *group,
struct fsnotify_event *event,
__s32 fd)
{
struct fanotify_response_event *re;
if (!(event->mask & FAN_ALL_PERM_EVENTS))
return 0;
re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
if (!re)
return -ENOMEM;
re->event = event;
re->fd = fd;
mutex_lock(&group->fanotify_data.access_mutex);
if (atomic_read(&group->fanotify_data.bypass_perm)) {
mutex_unlock(&group->fanotify_data.access_mutex);
kmem_cache_free(fanotify_response_event_cache, re);
event->response = FAN_ALLOW;
return 0;
}
list_add_tail(&re->list, &group->fanotify_data.access_list);
mutex_unlock(&group->fanotify_data.access_mutex);
return 0;
}
static void remove_access_response(struct fsnotify_group *group,
struct fsnotify_event *event,
__s32 fd)
{
struct fanotify_response_event *re;
if (!(event->mask & FAN_ALL_PERM_EVENTS))
return;
re = dequeue_re(group, fd);
if (!re)
return;
BUG_ON(re->event != event);
kmem_cache_free(fanotify_response_event_cache, re);
return;
}
#else
static int prepare_for_access_response(struct fsnotify_group *group,
struct fsnotify_event *event,
__s32 fd)
{
return 0;
}
static void remove_access_response(struct fsnotify_group *group,
struct fsnotify_event *event,
__s32 fd)
{
return;
}
#endif
static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
char __user *buf)
{
struct fanotify_event_metadata fanotify_event_metadata;
int fd, ret;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
ret = fill_event_metadata(group, &fanotify_event_metadata, event);
if (ret < 0)
goto out;
fd = fanotify_event_metadata.fd;
ret = prepare_for_access_response(group, event, fd);
if (ret)
goto out_close_fd;
ret = -EFAULT;
if (copy_to_user(buf, &fanotify_event_metadata,
fanotify_event_metadata.event_len))
goto out_kill_access_response;
return fanotify_event_metadata.event_len;
out_kill_access_response:
remove_access_response(group, event, fd);
out_close_fd:
if (fd != FAN_NOFD)
sys_close(fd);
out:
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
if (event->mask & FAN_ALL_PERM_EVENTS) {
event->response = FAN_DENY;
wake_up(&group->fanotify_data.access_waitq);
}
#endif
return ret;
}
/* intofiy userspace file descriptor functions */
static unsigned int fanotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
int ret = 0;
poll_wait(file, &group->notification_waitq, wait);
mutex_lock(&group->notification_mutex);
if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM;
mutex_unlock(&group->notification_mutex);
return ret;
}
static ssize_t fanotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct fsnotify_group *group;
struct fsnotify_event *kevent;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group);
while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
if (kevent) {
ret = PTR_ERR(kevent);
if (IS_ERR(kevent))
break;
ret = copy_event_to_user(group, kevent, buf);
fsnotify_put_event(kevent);
if (ret < 0)
break;
buf += ret;
count -= ret;
continue;
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -ERESTARTSYS;
if (signal_pending(current))
break;
if (start != buf)
break;
schedule();
}
finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
}
static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
struct fanotify_response response = { .fd = -1, .response = -1 };
struct fsnotify_group *group;
int ret;
group = file->private_data;
if (count > sizeof(response))
count = sizeof(response);
pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
if (copy_from_user(&response, buf, count))
return -EFAULT;
ret = process_access_response(group, &response);
if (ret < 0)
count = ret;
return count;
#else
return -EINVAL;
#endif
}
static int fanotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
struct fanotify_response_event *re, *lre;
mutex_lock(&group->fanotify_data.access_mutex);
atomic_inc(&group->fanotify_data.bypass_perm);
list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
re, re->event);
list_del_init(&re->list);
re->event->response = FAN_ALLOW;
kmem_cache_free(fanotify_response_event_cache, re);
}
mutex_unlock(&group->fanotify_data.access_mutex);
wake_up(&group->fanotify_data.access_waitq);
#endif
/* matches the fanotify_init->fsnotify_alloc_group */
fsnotify_put_group(group);
return 0;
}
static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct fsnotify_group *group;
struct fsnotify_event_holder *holder;
void __user *p;
int ret = -ENOTTY;
size_t send_len = 0;
group = file->private_data;
p = (void __user *) arg;
switch (cmd) {
case FIONREAD:
mutex_lock(&group->notification_mutex);
list_for_each_entry(holder, &group->notification_list, event_list)
send_len += FAN_EVENT_METADATA_LEN;
mutex_unlock(&group->notification_mutex);
ret = put_user(send_len, (int __user *) p);
break;
}
return ret;
}
static const struct file_operations fanotify_fops = {
.poll = fanotify_poll,
.read = fanotify_read,
.write = fanotify_write,
.fasync = NULL,
.release = fanotify_release,
.unlocked_ioctl = fanotify_ioctl,
.compat_ioctl = fanotify_ioctl,
.llseek = noop_llseek,
};
static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
{
kmem_cache_free(fanotify_mark_cache, fsn_mark);
}
static int fanotify_find_path(int dfd, const char __user *filename,
struct path *path, unsigned int flags)
{
int ret;
pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
dfd, filename, flags);
if (filename == NULL) {
struct file *file;
int fput_needed;
ret = -EBADF;
file = fget_light(dfd, &fput_needed);
if (!file)
goto out;
ret = -ENOTDIR;
if ((flags & FAN_MARK_ONLYDIR) &&
!(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
fput_light(file, fput_needed);
goto out;
}
*path = file->f_path;
path_get(path);
fput_light(file, fput_needed);
} else {
unsigned int lookup_flags = 0;
if (!(flags & FAN_MARK_DONT_FOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
if (flags & FAN_MARK_ONLYDIR)
lookup_flags |= LOOKUP_DIRECTORY;
ret = user_path_at(dfd, filename, lookup_flags, path);
if (ret)
goto out;
}
/* you can only watch an inode if you have read permissions on it */
ret = inode_permission(path->dentry->d_inode, MAY_READ);
if (ret)
path_put(path);
out:
return ret;
}
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
__u32 mask,
unsigned int flags)
{
__u32 oldmask;
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
oldmask = fsn_mark->mask;
fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
} else {
oldmask = fsn_mark->ignored_mask;
fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
}
spin_unlock(&fsn_mark->lock);
if (!(oldmask & ~mask))
fsnotify_destroy_mark(fsn_mark);
return mask & oldmask;
}
static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
struct vfsmount *mnt, __u32 mask,
unsigned int flags)
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark)
return -ENOENT;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
fsnotify_put_mark(fsn_mark);
if (removed & mnt->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
return 0;
}
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
unsigned int flags)
{
struct fsnotify_mark *fsn_mark = NULL;
__u32 removed;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
return -ENOENT;
removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
/* matches the fsnotify_find_inode_mark() */
fsnotify_put_mark(fsn_mark);
if (removed & inode->i_fsnotify_mask)
fsnotify_recalc_inode_mask(inode);
return 0;
}
static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
__u32 mask,
unsigned int flags)
{
__u32 oldmask = -1;
spin_lock(&fsn_mark->lock);
if (!(flags & FAN_MARK_IGNORED_MASK)) {
oldmask = fsn_mark->mask;
fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
} else {
__u32 tmask = fsn_mark->ignored_mask | mask;
fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
}
if (!(flags & FAN_MARK_ONDIR)) {
__u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
}
spin_unlock(&fsn_mark->lock);
return mask & ~oldmask;
}
static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
struct vfsmount *mnt, __u32 mask,
unsigned int flags)
{
struct fsnotify_mark *fsn_mark;
__u32 added;
int ret = 0;
fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
if (!fsn_mark) {
if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
return -ENOSPC;
fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
if (!fsn_mark)
return -ENOMEM;
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
if (ret)
goto err;
}
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
if (added & ~mnt->mnt_fsnotify_mask)
fsnotify_recalc_vfsmount_mask(mnt);
err:
fsnotify_put_mark(fsn_mark);
return ret;
}
static int fanotify_add_inode_mark(struct fsnotify_group *group,
struct inode *inode, __u32 mask,
unsigned int flags)
{
struct fsnotify_mark *fsn_mark;
__u32 added;
int ret = 0;
pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
/*
* If some other task has this inode open for write we should not add
* an ignored mark, unless that ignored mark is supposed to survive
* modification changes anyway.
*/
if ((flags & FAN_MARK_IGNORED_MASK) &&
!(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
(atomic_read(&inode->i_writecount) > 0))
return 0;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark) {
if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
return -ENOSPC;
fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
if (!fsn_mark)
return -ENOMEM;
fsnotify_init_mark(fsn_mark, fanotify_free_mark);
ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
if (ret)
goto err;
}
added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
if (added & ~inode->i_fsnotify_mask)
fsnotify_recalc_inode_mask(inode);
err:
fsnotify_put_mark(fsn_mark);
return ret;
}
/* fanotify syscalls */
SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
{
struct fsnotify_group *group;
int f_flags, fd;
struct user_struct *user;
pr_debug("%s: flags=%d event_f_flags=%d\n",
__func__, flags, event_f_flags);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (flags & ~FAN_ALL_INIT_FLAGS)
return -EINVAL;
user = get_current_user();
if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
free_uid(user);
return -EMFILE;
}
f_flags = O_RDWR | FMODE_NONOTIFY;
if (flags & FAN_CLOEXEC)
f_flags |= O_CLOEXEC;
if (flags & FAN_NONBLOCK)
f_flags |= O_NONBLOCK;
/* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
if (IS_ERR(group)) {
free_uid(user);
return PTR_ERR(group);
}
group->fanotify_data.user = user;
atomic_inc(&user->fanotify_listeners);
group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
mutex_init(&group->fanotify_data.access_mutex);
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
atomic_set(&group->fanotify_data.bypass_perm, 0);
#endif
switch (flags & FAN_ALL_CLASS_BITS) {
case FAN_CLASS_NOTIF:
group->priority = FS_PRIO_0;
break;
case FAN_CLASS_CONTENT:
group->priority = FS_PRIO_1;
break;
case FAN_CLASS_PRE_CONTENT:
group->priority = FS_PRIO_2;
break;
default:
fd = -EINVAL;
goto out_put_group;
}
if (flags & FAN_UNLIMITED_QUEUE) {
fd = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out_put_group;
group->max_events = UINT_MAX;
} else {
group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
}
if (flags & FAN_UNLIMITED_MARKS) {
fd = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out_put_group;
group->fanotify_data.max_marks = UINT_MAX;
} else {
group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
}
fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
if (fd < 0)
goto out_put_group;
return fd;
out_put_group:
fsnotify_put_group(group);
return fd;
}
SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
__u64 mask, int dfd,
const char __user * pathname)
{
struct inode *inode = NULL;
struct vfsmount *mnt = NULL;
struct fsnotify_group *group;
struct file *filp;
struct path path;
int ret, fput_needed;
pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
__func__, fanotify_fd, flags, dfd, pathname, mask);
/* we only use the lower 32 bits as of right now. */
if (mask & ((__u64)0xffffffff << 32))
return -EINVAL;
if (flags & ~FAN_ALL_MARK_FLAGS)
return -EINVAL;
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
case FAN_MARK_ADD: /* fallthrough */
case FAN_MARK_REMOVE:
if (!mask)
return -EINVAL;
case FAN_MARK_FLUSH:
break;
default:
return -EINVAL;
}
if (mask & FAN_ONDIR) {
flags |= FAN_MARK_ONDIR;
mask &= ~FAN_ONDIR;
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
#else
if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
#endif
return -EINVAL;
filp = fget_light(fanotify_fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an fanotify instance */
ret = -EINVAL;
if (unlikely(filp->f_op != &fanotify_fops))
goto fput_and_out;
group = filp->private_data;
/*
* group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
* allowed to set permissions events.
*/
ret = -EINVAL;
if (mask & FAN_ALL_PERM_EVENTS &&
group->priority == FS_PRIO_0)
goto fput_and_out;
ret = fanotify_find_path(dfd, pathname, &path, flags);
if (ret)
goto fput_and_out;
/* inode held in place by reference to path; group by fget on fd */
if (!(flags & FAN_MARK_MOUNT))
inode = path.dentry->d_inode;
else
mnt = path.mnt;
/* create/update an inode mark */
switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
case FAN_MARK_ADD:
if (flags & FAN_MARK_MOUNT)
ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
else
ret = fanotify_add_inode_mark(group, inode, mask, flags);
break;
case FAN_MARK_REMOVE:
if (flags & FAN_MARK_MOUNT)
ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
else
ret = fanotify_remove_inode_mark(group, inode, mask, flags);
break;
case FAN_MARK_FLUSH:
if (flags & FAN_MARK_MOUNT)
fsnotify_clear_vfsmount_marks_by_group(group);
else
fsnotify_clear_inode_marks_by_group(group);
break;
default:
ret = -EINVAL;
}
path_put(&path);
fput_and_out:
fput_light(filp, fput_needed);
return ret;
}
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
long dfd, long pathname)
{
return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
mask, (int) dfd,
(const char __user *) pathname);
}
SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
#endif
/*
* fanotify_user_setup - Our initialization function. Note that we cannot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
static int __init fanotify_user_setup(void)
{
fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
SLAB_PANIC);
return 0;
}
device_initcall(fanotify_user_setup);
| gpl-2.0 |
faux123/msm8660-htc-ics | drivers/infiniband/hw/mthca/mthca_main.c | 2529 | 37418 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include "mthca_dev.h"
#include "mthca_config_reg.h"
#include "mthca_cmd.h"
#include "mthca_profile.h"
#include "mthca_memfree.h"
#include "mthca_wqe.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
int mthca_debug_level = 0;
module_param_named(debug_level, mthca_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
#endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
#ifdef CONFIG_PCI_MSI
static int msi_x = 1;
module_param(msi_x, int, 0444);
MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#else /* CONFIG_PCI_MSI */
#define msi_x (0)
#endif /* CONFIG_PCI_MSI */
static int tune_pci = 0;
module_param(tune_pci, int, 0444);
MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
DEFINE_MUTEX(mthca_device_mutex);
#define MTHCA_DEFAULT_NUM_QP (1 << 16)
#define MTHCA_DEFAULT_RDB_PER_QP (1 << 2)
#define MTHCA_DEFAULT_NUM_CQ (1 << 16)
#define MTHCA_DEFAULT_NUM_MCG (1 << 13)
#define MTHCA_DEFAULT_NUM_MPT (1 << 17)
#define MTHCA_DEFAULT_NUM_MTT (1 << 20)
#define MTHCA_DEFAULT_NUM_UDAV (1 << 15)
#define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18)
#define MTHCA_DEFAULT_NUM_UARC_SIZE (1 << 18)
static struct mthca_profile hca_profile = {
.num_qp = MTHCA_DEFAULT_NUM_QP,
.rdb_per_qp = MTHCA_DEFAULT_RDB_PER_QP,
.num_cq = MTHCA_DEFAULT_NUM_CQ,
.num_mcg = MTHCA_DEFAULT_NUM_MCG,
.num_mpt = MTHCA_DEFAULT_NUM_MPT,
.num_mtt = MTHCA_DEFAULT_NUM_MTT,
.num_udav = MTHCA_DEFAULT_NUM_UDAV, /* Tavor only */
.fmr_reserved_mtts = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */
.uarc_size = MTHCA_DEFAULT_NUM_UARC_SIZE, /* Arbel only */
};
module_param_named(num_qp, hca_profile.num_qp, int, 0444);
MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA");
module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444);
MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP");
module_param_named(num_cq, hca_profile.num_cq, int, 0444);
MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA");
module_param_named(num_mcg, hca_profile.num_mcg, int, 0444);
MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA");
module_param_named(num_mpt, hca_profile.num_mpt, int, 0444);
MODULE_PARM_DESC(num_mpt,
"maximum number of memory protection table entries per HCA");
module_param_named(num_mtt, hca_profile.num_mtt, int, 0444);
MODULE_PARM_DESC(num_mtt,
"maximum number of memory translation table segments per HCA");
module_param_named(num_udav, hca_profile.num_udav, int, 0444);
MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA");
module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR");
static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static int mthca_tune_pci(struct mthca_dev *mdev)
{
if (!tune_pci)
return 0;
/* First try to max out Read Byte Count */
if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
mthca_err(mdev, "Couldn't set PCI-X max read count, "
"aborting.\n");
return -ENODEV;
}
} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
if (pcie_set_readrq(mdev->pdev, 4096)) {
mthca_err(mdev, "Couldn't write PCI Express read request, "
"aborting.\n");
return -ENODEV;
}
} else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
mthca_info(mdev, "No PCI Express capability, "
"not setting Max Read Request Size.\n");
return 0;
}
static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
{
int err;
u8 status;
mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
if (dev_lim->min_page_sz > PAGE_SIZE) {
mthca_err(mdev, "HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %ld, aborting.\n",
dev_lim->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
mthca_err(mdev, "HCA has %d ports, but we only support %d, "
"aborting.\n",
dev_lim->num_ports, MTHCA_MAX_PORTS);
return -ENODEV;
}
if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
"PCI resource 2 size of 0x%llx, aborting.\n",
dev_lim->uar_size,
(unsigned long long)pci_resource_len(mdev->pdev, 2));
return -ENODEV;
}
mdev->limits.num_ports = dev_lim->num_ports;
mdev->limits.vl_cap = dev_lim->max_vl;
mdev->limits.mtu_cap = dev_lim->max_mtu;
mdev->limits.gid_table_len = dev_lim->max_gids;
mdev->limits.pkey_table_len = dev_lim->max_pkeys;
mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
/*
* Need to allow for worst case send WQE overhead and check
* whether max_desc_sz imposes a lower limit than max_sg; UD
* send has the biggest overhead.
*/
mdev->limits.max_sg = min_t(int, dev_lim->max_sg,
(dev_lim->max_desc_sz -
sizeof (struct mthca_next_seg) -
(mthca_is_memfree(mdev) ?
sizeof (struct mthca_arbel_ud_seg) :
sizeof (struct mthca_tavor_ud_seg))) /
sizeof (struct mthca_data_seg));
mdev->limits.max_wqes = dev_lim->max_qp_sz;
mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp;
mdev->limits.reserved_qps = dev_lim->reserved_qps;
mdev->limits.max_srq_wqes = dev_lim->max_srq_sz;
mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
mdev->limits.max_desc_sz = dev_lim->max_desc_sz;
mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
* empty CQ and a full CQ.
*/
mdev->limits.max_cqes = dev_lim->max_cq_sz - 1;
mdev->limits.reserved_cqs = dev_lim->reserved_cqs;
mdev->limits.reserved_eqs = dev_lim->reserved_eqs;
mdev->limits.reserved_mtts = dev_lim->reserved_mtts;
mdev->limits.reserved_mrws = dev_lim->reserved_mrws;
mdev->limits.reserved_uars = dev_lim->reserved_uars;
mdev->limits.reserved_pds = dev_lim->reserved_pds;
mdev->limits.port_width_cap = dev_lim->max_port_width;
mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1);
mdev->limits.flags = dev_lim->flags;
/*
* For old FW that doesn't return static rate support, use a
* value of 0x3 (only static rate values of 0 or 1 are handled),
* except on Sinai, where even old FW can handle static rate
* values of 2 and 3.
*/
if (dev_lim->stat_rate_support)
mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
mdev->limits.stat_rate_support = 0xf;
else
mdev->limits.stat_rate_support = 0x3;
/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
May be doable since hardware supports it for SRQ.
IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
supported by driver. */
mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN;
if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
mdev->mthca_flags |= MTHCA_FLAG_SRQ;
if (mthca_is_memfree(mdev))
if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
return 0;
}
static int mthca_init_tavor(struct mthca_dev *mdev)
{
s64 size;
u8 status;
int err;
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
err = mthca_SYS_EN(mdev, &status);
if (err) {
mthca_err(mdev, "SYS_EN command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "SYS_EN returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_QUERY_FW(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
err = mthca_QUERY_DDR(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
goto err_disable;
}
profile = hca_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.uarc_size = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (size < 0) {
err = size;
goto err_disable;
}
err = mthca_INIT_HCA(mdev, &init_hca, &status);
if (err) {
mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
return 0;
err_disable:
mthca_SYS_DIS(mdev, &status);
return err;
}
static int mthca_load_fw(struct mthca_dev *mdev)
{
u8 status;
int err;
/* FIXME: use HCA-attached memory for FW if present */
mdev->fw.arbel.fw_icm =
mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.fw_icm) {
mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
return -ENOMEM;
}
err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
if (err) {
mthca_err(mdev, "MAP_FA command failed, aborting.\n");
goto err_free;
}
if (status) {
mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_free;
}
err = mthca_RUN_FW(mdev, &status);
if (err) {
mthca_err(mdev, "RUN_FW command failed, aborting.\n");
goto err_unmap_fa;
}
if (status) {
mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_unmap_fa;
}
return 0;
err_unmap_fa:
mthca_UNMAP_FA(mdev, &status);
err_free:
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
return err;
}
static int mthca_init_icm(struct mthca_dev *mdev,
struct mthca_dev_lim *dev_lim,
struct mthca_init_hca_param *init_hca,
u64 icm_size)
{
u64 aux_pages;
u8 status;
int err;
err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
if (err) {
mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.aux_icm) {
mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
return -ENOMEM;
}
err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
if (err) {
mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
goto err_free_aux;
}
if (status) {
mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_free_aux;
}
err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
if (err) {
mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_aux;
}
/* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
mdev->limits.mtt_seg_size,
mdev->limits.num_mtt_segs,
mdev->limits.reserved_mtts,
1, 0);
if (!mdev->mr_table.mtt_table) {
mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_eq;
}
mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
dev_lim->mpt_entry_sz,
mdev->limits.num_mpts,
mdev->limits.reserved_mrws,
1, 1);
if (!mdev->mr_table.mpt_table) {
mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_mtt;
}
mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
dev_lim->qpc_entry_sz,
mdev->limits.num_qps,
mdev->limits.reserved_qps,
0, 0);
if (!mdev->qp_table.qp_table) {
mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_mpt;
}
mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
dev_lim->eqpc_entry_sz,
mdev->limits.num_qps,
mdev->limits.reserved_qps,
0, 0);
if (!mdev->qp_table.eqp_table) {
mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_qp;
}
mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
MTHCA_RDB_ENTRY_SIZE,
mdev->limits.num_qps <<
mdev->qp_table.rdb_shift, 0,
0, 0);
if (!mdev->qp_table.rdb_table) {
mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
err = -ENOMEM;
goto err_unmap_eqp;
}
mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
dev_lim->cqc_entry_sz,
mdev->limits.num_cqs,
mdev->limits.reserved_cqs,
0, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_rdb;
}
if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
mdev->srq_table.table =
mthca_alloc_icm_table(mdev, init_hca->srqc_base,
dev_lim->srq_entry_sz,
mdev->limits.num_srqs,
mdev->limits.reserved_srqs,
0, 0);
if (!mdev->srq_table.table) {
mthca_err(mdev, "Failed to map SRQ context memory, "
"aborting.\n");
err = -ENOMEM;
goto err_unmap_cq;
}
}
/*
* It's not strictly required, but for simplicity just map the
* whole multicast group table now. The table isn't very big
* and it's a lot easier than trying to track ref counts.
*/
mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
MTHCA_MGM_ENTRY_SIZE,
mdev->limits.num_mgms +
mdev->limits.num_amgms,
mdev->limits.num_mgms +
mdev->limits.num_amgms,
0, 0);
if (!mdev->mcg_table.table) {
mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_srq;
}
return 0;
err_unmap_srq:
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
err_unmap_cq:
mthca_free_icm_table(mdev, mdev->cq_table.table);
err_unmap_rdb:
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
err_unmap_eqp:
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
err_unmap_qp:
mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
err_unmap_mpt:
mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
err_unmap_mtt:
mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
err_unmap_eq:
mthca_unmap_eq_icm(mdev);
err_unmap_aux:
mthca_UNMAP_ICM_AUX(mdev, &status);
err_free_aux:
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
return err;
}
static void mthca_free_icms(struct mthca_dev *mdev)
{
u8 status;
mthca_free_icm_table(mdev, mdev->mcg_table.table);
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
mthca_unmap_eq_icm(mdev);
mthca_UNMAP_ICM_AUX(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
}
static int mthca_init_arbel(struct mthca_dev *mdev)
{
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
s64 icm_size;
u8 status;
int err;
err = mthca_QUERY_FW(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_ENABLE_LAM(mdev, &status);
if (err) {
mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
return err;
}
if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
} else if (status) {
mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_load_fw(mdev);
if (err) {
mthca_err(mdev, "Failed to start FW, aborting.\n");
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
goto err_stop_fw;
}
profile = hca_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.num_udav = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (icm_size < 0) {
err = icm_size;
goto err_stop_fw;
}
err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
if (err)
goto err_stop_fw;
err = mthca_INIT_HCA(mdev, &init_hca, &status);
if (err) {
mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
goto err_free_icm;
}
if (status) {
mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_free_icm;
}
return 0;
err_free_icm:
mthca_free_icms(mdev);
err_stop_fw:
mthca_UNMAP_FA(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
err_disable:
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_DISABLE_LAM(mdev, &status);
return err;
}
static void mthca_close_hca(struct mthca_dev *mdev)
{
u8 status;
mthca_CLOSE_HCA(mdev, 0, &status);
if (mthca_is_memfree(mdev)) {
mthca_free_icms(mdev);
mthca_UNMAP_FA(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_DISABLE_LAM(mdev, &status);
} else
mthca_SYS_DIS(mdev, &status);
}
static int mthca_init_hca(struct mthca_dev *mdev)
{
u8 status;
int err;
struct mthca_adapter adapter;
if (mthca_is_memfree(mdev))
err = mthca_init_arbel(mdev);
else
err = mthca_init_tavor(mdev);
if (err)
return err;
err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
if (err) {
mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
goto err_close;
}
if (status) {
mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_close;
}
mdev->eq_table.inta_pin = adapter.inta_pin;
if (!mthca_is_memfree(mdev))
mdev->rev_id = adapter.revision_id;
memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
return 0;
err_close:
mthca_close_hca(mdev);
return err;
}
static int mthca_setup_hca(struct mthca_dev *dev)
{
int err;
u8 status;
MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
err = mthca_init_uar_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"user access region table, aborting.\n");
return err;
}
err = mthca_uar_alloc(dev, &dev->driver_uar);
if (err) {
mthca_err(dev, "Failed to allocate driver access region, "
"aborting.\n");
goto err_uar_table_free;
}
dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!dev->kar) {
mthca_err(dev, "Couldn't map kernel access region, "
"aborting.\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mthca_init_pd_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"protection domain table, aborting.\n");
goto err_kar_unmap;
}
err = mthca_init_mr_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"memory region table, aborting.\n");
goto err_pd_table_free;
}
err = mthca_pd_alloc(dev, 1, &dev->driver_pd);
if (err) {
mthca_err(dev, "Failed to create driver PD, "
"aborting.\n");
goto err_mr_table_free;
}
err = mthca_init_eq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
goto err_pd_free;
}
err = mthca_cmd_use_events(dev);
if (err) {
mthca_err(dev, "Failed to switch to event-driven "
"firmware commands, aborting.\n");
goto err_eq_table_free;
}
err = mthca_NOP(dev, &status);
if (err || status) {
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
mthca_warn(dev, "NOP command failed to generate interrupt "
"(IRQ %d).\n",
dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
mthca_warn(dev, "Trying again with MSI-X disabled.\n");
} else {
mthca_err(dev, "NOP command failed to generate interrupt "
"(IRQ %d), aborting.\n",
dev->pdev->irq);
mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
goto err_cmd_poll;
}
mthca_dbg(dev, "NOP command IRQ test passed\n");
err = mthca_init_cq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"completion queue table, aborting.\n");
goto err_cmd_poll;
}
err = mthca_init_srq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"shared receive queue table, aborting.\n");
goto err_cq_table_free;
}
err = mthca_init_qp_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"queue pair table, aborting.\n");
goto err_srq_table_free;
}
err = mthca_init_av_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"address vector table, aborting.\n");
goto err_qp_table_free;
}
err = mthca_init_mcg_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"multicast group table, aborting.\n");
goto err_av_table_free;
}
return 0;
err_av_table_free:
mthca_cleanup_av_table(dev);
err_qp_table_free:
mthca_cleanup_qp_table(dev);
err_srq_table_free:
mthca_cleanup_srq_table(dev);
err_cq_table_free:
mthca_cleanup_cq_table(dev);
err_cmd_poll:
mthca_cmd_use_polling(dev);
err_eq_table_free:
mthca_cleanup_eq_table(dev);
err_pd_free:
mthca_pd_free(dev, &dev->driver_pd);
err_mr_table_free:
mthca_cleanup_mr_table(dev);
err_pd_table_free:
mthca_cleanup_pd_table(dev);
err_kar_unmap:
iounmap(dev->kar);
err_uar_free:
mthca_uar_free(dev, &dev->driver_uar);
err_uar_table_free:
mthca_cleanup_uar_table(dev);
return err;
}
static int mthca_enable_msi_x(struct mthca_dev *mdev)
{
struct msix_entry entries[3];
int err;
entries[0].entry = 0;
entries[1].entry = 1;
entries[2].entry = 2;
err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
if (err) {
if (err > 0)
mthca_info(mdev, "Only %d MSI-X vectors available, "
"not using MSI-X\n", err);
return err;
}
mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector;
return 0;
}
/* Types of supported HCA */
enum {
TAVOR, /* MT23108 */
ARBEL_COMPAT, /* MT25208 in Tavor compat mode */
ARBEL_NATIVE, /* MT25208 with extended features */
SINAI /* MT25204 */
};
#define MTHCA_FW_VER(major, minor, subminor) \
(((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
static struct {
u64 latest_fw;
u32 flags;
} mthca_hca_table[] = {
[TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0),
.flags = 0 },
[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
.flags = MTHCA_FLAG_PCIE },
[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE },
[SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE |
MTHCA_FLAG_SINAI_OPT }
};
static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
{
int ddr_hidden = 0;
int err;
struct mthca_dev *mdev;
printk(KERN_INFO PFX "Initializing %s\n",
pci_name(pdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, "
"aborting.\n");
return err;
}
/*
* Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
* be present)
*/
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 0) != 1 << 20) {
dev_err(&pdev->dev, "Missing DCS, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing UAR, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
ddr_hidden = 1;
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Cannot obtain PCI resources, "
"aborting.\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
goto err_free_res;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
"consistent PCI DMA mask.\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
"aborting.\n");
goto err_free_res;
}
}
/* We can handle large RDMA requests, so allow larger segments. */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
if (!mdev) {
dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n");
err = -ENOMEM;
goto err_free_res;
}
mdev->pdev = pdev;
mdev->mthca_flags = mthca_hca_table[hca_type].flags;
if (ddr_hidden)
mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
* the HCA in an undefined state.
*/
err = mthca_reset(mdev);
if (err) {
mthca_err(mdev, "Failed to reset HCA, aborting.\n");
goto err_free_dev;
}
if (mthca_cmd_init(mdev)) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
err = mthca_tune_pci(mdev);
if (err)
goto err_cmd;
err = mthca_init_hca(mdev);
if (err)
goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff),
(int) (mthca_hca_table[hca_type].latest_fw >> 32),
(int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
(int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
}
if (msi_x && !mthca_enable_msi_x(mdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
}
if (err)
goto err_close;
err = mthca_register_device(mdev);
if (err)
goto err_cleanup;
err = mthca_create_agents(mdev);
if (err)
goto err_unregister;
pci_set_drvdata(pdev, mdev);
mdev->hca_type = hca_type;
mdev->active = true;
return 0;
err_unregister:
mthca_unregister_device(mdev);
err_cleanup:
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
mthca_pd_free(mdev, &mdev->driver_pd);
mthca_cleanup_mr_table(mdev);
mthca_cleanup_pd_table(mdev);
mthca_cleanup_uar_table(mdev);
err_close:
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
mthca_close_hca(mdev);
err_cmd:
mthca_cmd_cleanup(mdev);
err_free_dev:
ib_dealloc_device(&mdev->ib_dev);
err_free_res:
pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static void __mthca_remove_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev = pci_get_drvdata(pdev);
u8 status;
int p;
if (mdev) {
mthca_free_agents(mdev);
mthca_unregister_device(mdev);
for (p = 1; p <= mdev->limits.num_ports; ++p)
mthca_CLOSE_IB(mdev, p, &status);
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
mthca_pd_free(mdev, &mdev->driver_pd);
mthca_cleanup_mr_table(mdev);
mthca_cleanup_pd_table(mdev);
iounmap(mdev->kar);
mthca_uar_free(mdev, &mdev->driver_uar);
mthca_cleanup_uar_table(mdev);
mthca_close_hca(mdev);
mthca_cmd_cleanup(mdev);
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
ib_dealloc_device(&mdev->ib_dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
int __mthca_restart_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev;
int hca_type;
mdev = pci_get_drvdata(pdev);
if (!mdev)
return -ENODEV;
hca_type = mdev->hca_type;
__mthca_remove_one(pdev);
return __mthca_init_one(pdev, hca_type);
}
static int __devinit mthca_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
mutex_lock(&mthca_device_mutex);
printk_once(KERN_INFO "%s", mthca_version);
if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
pci_name(pdev), id->driver_data);
mutex_unlock(&mthca_device_mutex);
return -ENODEV;
}
ret = __mthca_init_one(pdev, id->driver_data);
mutex_unlock(&mthca_device_mutex);
return ret;
}
static void __devexit mthca_remove_one(struct pci_dev *pdev)
{
mutex_lock(&mthca_device_mutex);
__mthca_remove_one(pdev);
mutex_unlock(&mthca_device_mutex);
}
static struct pci_device_id mthca_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
.driver_data = ARBEL_COMPAT },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
.driver_data = ARBEL_COMPAT },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
.driver_data = ARBEL_NATIVE },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
.driver_data = ARBEL_NATIVE },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
.driver_data = SINAI },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mthca_pci_table);
static struct pci_driver mthca_driver = {
.name = DRV_NAME,
.id_table = mthca_pci_table,
.probe = mthca_init_one,
.remove = __devexit_p(mthca_remove_one)
};
static void __init __mthca_check_profile_val(const char *name, int *pval,
int pval_default)
{
/* value must be positive and power of 2 */
int old_pval = *pval;
if (old_pval <= 0)
*pval = pval_default;
else
*pval = roundup_pow_of_two(old_pval);
if (old_pval != *pval) {
printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n",
old_pval, name);
printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval);
}
}
#define mthca_check_profile_val(name, default) \
__mthca_check_profile_val(#name, &hca_profile.name, default)
static void __init mthca_validate_profile(void)
{
mthca_check_profile_val(num_qp, MTHCA_DEFAULT_NUM_QP);
mthca_check_profile_val(rdb_per_qp, MTHCA_DEFAULT_RDB_PER_QP);
mthca_check_profile_val(num_cq, MTHCA_DEFAULT_NUM_CQ);
mthca_check_profile_val(num_mcg, MTHCA_DEFAULT_NUM_MCG);
mthca_check_profile_val(num_mpt, MTHCA_DEFAULT_NUM_MPT);
mthca_check_profile_val(num_mtt, MTHCA_DEFAULT_NUM_MTT);
mthca_check_profile_val(num_udav, MTHCA_DEFAULT_NUM_UDAV);
mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS);
if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) {
printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n",
hca_profile.fmr_reserved_mtts);
printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n",
hca_profile.num_mtt);
hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2;
printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
hca_profile.fmr_reserved_mtts);
}
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
}
}
static int __init mthca_init(void)
{
int ret;
mthca_validate_profile();
ret = mthca_catas_init();
if (ret)
return ret;
ret = pci_register_driver(&mthca_driver);
if (ret < 0) {
mthca_catas_cleanup();
return ret;
}
return 0;
}
static void __exit mthca_cleanup(void)
{
pci_unregister_driver(&mthca_driver);
mthca_catas_cleanup();
}
module_init(mthca_init);
module_exit(mthca_cleanup);
| gpl-2.0 |
sarwarbhuiyan/linux | drivers/net/wireless/cw1200/bh.c | 2529 | 14821 | /*
* Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
*
* Copyright (c) 2010, ST-Ericsson
* Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
*
* Based on:
* ST-Ericsson UMAC CW1200 driver, which is
* Copyright (c) 2010, ST-Ericsson
* Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <net/mac80211.h>
#include <linux/kthread.h>
#include <linux/timer.h>
#include "cw1200.h"
#include "bh.h"
#include "hwio.h"
#include "wsm.h"
#include "hwbus.h"
#include "debug.h"
#include "fwio.h"
static int cw1200_bh(void *arg);
#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
/* an SPI message cannot be bigger than (2"12-1)*2 bytes
* "*2" to cvt to bytes
*/
#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
#define PIGGYBACK_CTRL_REG (2)
#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
/* Suspend state privates */
enum cw1200_bh_pm_state {
CW1200_BH_RESUMED = 0,
CW1200_BH_SUSPEND,
CW1200_BH_SUSPENDED,
CW1200_BH_RESUME,
};
typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
u8 *data, size_t size);
static void cw1200_bh_work(struct work_struct *work)
{
struct cw1200_common *priv =
container_of(work, struct cw1200_common, bh_work);
cw1200_bh(priv);
}
int cw1200_register_bh(struct cw1200_common *priv)
{
int err = 0;
/* Realtime workqueue */
priv->bh_workqueue = alloc_workqueue("cw1200_bh",
WQ_MEM_RECLAIM | WQ_HIGHPRI
| WQ_CPU_INTENSIVE, 1);
if (!priv->bh_workqueue)
return -ENOMEM;
INIT_WORK(&priv->bh_work, cw1200_bh_work);
pr_debug("[BH] register.\n");
atomic_set(&priv->bh_rx, 0);
atomic_set(&priv->bh_tx, 0);
atomic_set(&priv->bh_term, 0);
atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
priv->bh_error = 0;
priv->hw_bufs_used = 0;
priv->buf_id_tx = 0;
priv->buf_id_rx = 0;
init_waitqueue_head(&priv->bh_wq);
init_waitqueue_head(&priv->bh_evt_wq);
err = !queue_work(priv->bh_workqueue, &priv->bh_work);
WARN_ON(err);
return err;
}
void cw1200_unregister_bh(struct cw1200_common *priv)
{
atomic_add(1, &priv->bh_term);
wake_up(&priv->bh_wq);
flush_workqueue(priv->bh_workqueue);
destroy_workqueue(priv->bh_workqueue);
priv->bh_workqueue = NULL;
pr_debug("[BH] unregistered.\n");
}
void cw1200_irq_handler(struct cw1200_common *priv)
{
pr_debug("[BH] irq.\n");
/* Disable Interrupts! */
/* NOTE: hwbus_ops->lock already held */
__cw1200_irq_enable(priv, 0);
if (/* WARN_ON */(priv->bh_error))
return;
if (atomic_add_return(1, &priv->bh_rx) == 1)
wake_up(&priv->bh_wq);
}
EXPORT_SYMBOL_GPL(cw1200_irq_handler);
void cw1200_bh_wakeup(struct cw1200_common *priv)
{
pr_debug("[BH] wakeup.\n");
if (priv->bh_error) {
pr_err("[BH] wakeup failed (BH error)\n");
return;
}
if (atomic_add_return(1, &priv->bh_tx) == 1)
wake_up(&priv->bh_wq);
}
int cw1200_bh_suspend(struct cw1200_common *priv)
{
pr_debug("[BH] suspend.\n");
if (priv->bh_error) {
wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
return -EINVAL;
}
atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
wake_up(&priv->bh_wq);
return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
(CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
1 * HZ) ? 0 : -ETIMEDOUT;
}
int cw1200_bh_resume(struct cw1200_common *priv)
{
pr_debug("[BH] resume.\n");
if (priv->bh_error) {
wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
return -EINVAL;
}
atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
wake_up(&priv->bh_wq);
return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
(CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
1 * HZ) ? 0 : -ETIMEDOUT;
}
static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
{
++priv->hw_bufs_used;
}
int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
{
int ret = 0;
int hw_bufs_used = priv->hw_bufs_used;
priv->hw_bufs_used -= count;
if (WARN_ON(priv->hw_bufs_used < 0))
ret = -1;
else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
ret = 1;
if (!priv->hw_bufs_used)
wake_up(&priv->bh_evt_wq);
return ret;
}
static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
u16 *ctrl_reg)
{
int ret;
ret = cw1200_reg_read_16(priv,
ST90TDS_CONTROL_REG_ID, ctrl_reg);
if (ret) {
ret = cw1200_reg_read_16(priv,
ST90TDS_CONTROL_REG_ID, ctrl_reg);
if (ret)
pr_err("[BH] Failed to read control register.\n");
}
return ret;
}
static int cw1200_device_wakeup(struct cw1200_common *priv)
{
u16 ctrl_reg;
int ret;
pr_debug("[BH] Device wakeup.\n");
/* First, set the dpll register */
ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
cw1200_dpll_from_clk(priv->hw_refclk));
if (WARN_ON(ret))
return ret;
/* To force the device to be always-on, the host sets WLAN_UP to 1 */
ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
ST90TDS_CONT_WUP_BIT);
if (WARN_ON(ret))
return ret;
ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
if (WARN_ON(ret))
return ret;
/* If the device returns WLAN_RDY as 1, the device is active and will
* remain active.
*/
if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
pr_debug("[BH] Device awake.\n");
return 1;
}
return 0;
}
/* Must be called from BH thraed. */
void cw1200_enable_powersave(struct cw1200_common *priv,
bool enable)
{
pr_debug("[BH] Powerave is %s.\n",
enable ? "enabled" : "disabled");
priv->powersave_enabled = enable;
}
static int cw1200_bh_rx_helper(struct cw1200_common *priv,
uint16_t *ctrl_reg,
int *tx)
{
size_t read_len = 0;
struct sk_buff *skb_rx = NULL;
struct wsm_hdr *wsm;
size_t wsm_len;
u16 wsm_id;
u8 wsm_seq;
int rx_resync = 1;
size_t alloc_len;
u8 *data;
read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
if (!read_len)
return 0; /* No more work */
if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
(read_len > EFFECTIVE_BUF_SIZE))) {
pr_debug("Invalid read len: %zu (%04x)",
read_len, *ctrl_reg);
goto err;
}
/* Add SIZE of PIGGYBACK reg (CONTROL Reg)
* to the NEXT Message length + 2 Bytes for SKB
*/
read_len = read_len + 2;
alloc_len = priv->hwbus_ops->align_size(
priv->hwbus_priv, read_len);
/* Check if not exceeding CW1200 capabilities */
if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
pr_debug("Read aligned len: %zu\n",
alloc_len);
}
skb_rx = dev_alloc_skb(alloc_len);
if (WARN_ON(!skb_rx))
goto err;
skb_trim(skb_rx, 0);
skb_put(skb_rx, read_len);
data = skb_rx->data;
if (WARN_ON(!data))
goto err;
if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
pr_err("rx blew up, len %zu\n", alloc_len);
goto err;
}
/* Piggyback */
*ctrl_reg = __le16_to_cpu(
((__le16 *)data)[alloc_len / 2 - 1]);
wsm = (struct wsm_hdr *)data;
wsm_len = __le16_to_cpu(wsm->len);
if (WARN_ON(wsm_len > read_len))
goto err;
if (priv->wsm_enable_wsm_dumps)
print_hex_dump_bytes("<-- ",
DUMP_PREFIX_NONE,
data, wsm_len);
wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
skb_trim(skb_rx, wsm_len);
if (wsm_id == 0x0800) {
wsm_handle_exception(priv,
&data[sizeof(*wsm)],
wsm_len - sizeof(*wsm));
goto err;
} else if (!rx_resync) {
if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
goto err;
}
priv->wsm_rx_seq = (wsm_seq + 1) & 7;
rx_resync = 0;
if (wsm_id & 0x0400) {
int rc = wsm_release_tx_buffer(priv, 1);
if (WARN_ON(rc < 0))
return rc;
else if (rc > 0)
*tx = 1;
}
/* cw1200_wsm_rx takes care on SKB livetime */
if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
goto err;
if (skb_rx) {
dev_kfree_skb(skb_rx);
skb_rx = NULL;
}
return 0;
err:
if (skb_rx) {
dev_kfree_skb(skb_rx);
skb_rx = NULL;
}
return -1;
}
static int cw1200_bh_tx_helper(struct cw1200_common *priv,
int *pending_tx,
int *tx_burst)
{
size_t tx_len;
u8 *data;
int ret;
struct wsm_hdr *wsm;
if (priv->device_can_sleep) {
ret = cw1200_device_wakeup(priv);
if (WARN_ON(ret < 0)) { /* Error in wakeup */
*pending_tx = 1;
return 0;
} else if (ret) { /* Woke up */
priv->device_can_sleep = false;
} else { /* Did not awake */
*pending_tx = 1;
return 0;
}
}
wsm_alloc_tx_buffer(priv);
ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
if (ret <= 0) {
wsm_release_tx_buffer(priv, 1);
if (WARN_ON(ret < 0))
return ret; /* Error */
return 0; /* No work */
}
wsm = (struct wsm_hdr *)data;
BUG_ON(tx_len < sizeof(*wsm));
BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
atomic_add(1, &priv->bh_tx);
tx_len = priv->hwbus_ops->align_size(
priv->hwbus_priv, tx_len);
/* Check if not exceeding CW1200 capabilities */
if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
pr_debug("Write aligned len: %zu\n", tx_len);
wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
pr_err("tx blew up, len %zu\n", tx_len);
wsm_release_tx_buffer(priv, 1);
return -1; /* Error */
}
if (priv->wsm_enable_wsm_dumps)
print_hex_dump_bytes("--> ",
DUMP_PREFIX_NONE,
data,
__le16_to_cpu(wsm->len));
wsm_txed(priv, data);
priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
if (*tx_burst > 1) {
cw1200_debug_tx_burst(priv);
return 1; /* Work remains */
}
return 0;
}
static int cw1200_bh(void *arg)
{
struct cw1200_common *priv = arg;
int rx, tx, term, suspend;
u16 ctrl_reg = 0;
int tx_allowed;
int pending_tx = 0;
int tx_burst;
long status;
u32 dummy;
int ret;
for (;;) {
if (!priv->hw_bufs_used &&
priv->powersave_enabled &&
!priv->device_can_sleep &&
!atomic_read(&priv->recent_scan)) {
status = 1 * HZ;
pr_debug("[BH] Device wakedown. No data.\n");
cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
priv->device_can_sleep = true;
} else if (priv->hw_bufs_used) {
/* Interrupt loss detection */
status = 1 * HZ;
} else {
status = MAX_SCHEDULE_TIMEOUT;
}
/* Dummy Read for SDIO retry mechanism*/
if ((priv->hw_type != -1) &&
(atomic_read(&priv->bh_rx) == 0) &&
(atomic_read(&priv->bh_tx) == 0))
cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
&dummy, sizeof(dummy));
pr_debug("[BH] waiting ...\n");
status = wait_event_interruptible_timeout(priv->bh_wq, ({
rx = atomic_xchg(&priv->bh_rx, 0);
tx = atomic_xchg(&priv->bh_tx, 0);
term = atomic_xchg(&priv->bh_term, 0);
suspend = pending_tx ?
0 : atomic_read(&priv->bh_suspend);
(rx || tx || term || suspend || priv->bh_error);
}), status);
pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
rx, tx, term, suspend, priv->bh_error, status);
/* Did an error occur? */
if ((status < 0 && status != -ERESTARTSYS) ||
term || priv->bh_error) {
break;
}
if (!status) { /* wait_event timed out */
unsigned long timestamp = jiffies;
long timeout;
int pending = 0;
int i;
/* Check to see if we have any outstanding frames */
if (priv->hw_bufs_used && (!rx || !tx)) {
wiphy_warn(priv->hw->wiphy,
"Missed interrupt? (%d frames outstanding)\n",
priv->hw_bufs_used);
rx = 1;
/* Get a timestamp of "oldest" frame */
for (i = 0; i < 4; ++i)
pending += cw1200_queue_get_xmit_timestamp(
&priv->tx_queue[i],
×tamp,
priv->pending_frame_id);
/* Check if frame transmission is timed out.
* Add an extra second with respect to possible
* interrupt loss.
*/
timeout = timestamp +
WSM_CMD_LAST_CHANCE_TIMEOUT +
1 * HZ -
jiffies;
/* And terminate BH thread if the frame is "stuck" */
if (pending && timeout < 0) {
wiphy_warn(priv->hw->wiphy,
"Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
priv->hw_bufs_used, pending,
timestamp, jiffies);
break;
}
} else if (!priv->device_can_sleep &&
!atomic_read(&priv->recent_scan)) {
pr_debug("[BH] Device wakedown. Timeout.\n");
cw1200_reg_write_16(priv,
ST90TDS_CONTROL_REG_ID, 0);
priv->device_can_sleep = true;
}
goto done;
} else if (suspend) {
pr_debug("[BH] Device suspend.\n");
if (priv->powersave_enabled) {
pr_debug("[BH] Device wakedown. Suspend.\n");
cw1200_reg_write_16(priv,
ST90TDS_CONTROL_REG_ID, 0);
priv->device_can_sleep = true;
}
atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
wake_up(&priv->bh_evt_wq);
status = wait_event_interruptible(priv->bh_wq,
CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
if (status < 0) {
wiphy_err(priv->hw->wiphy,
"Failed to wait for resume: %ld.\n",
status);
break;
}
pr_debug("[BH] Device resume.\n");
atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
wake_up(&priv->bh_evt_wq);
atomic_add(1, &priv->bh_rx);
goto done;
}
rx:
tx += pending_tx;
pending_tx = 0;
if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
break;
/* Don't bother trying to rx unless we have data to read */
if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
if (ret < 0)
break;
/* Double up here if there's more data.. */
if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
if (ret < 0)
break;
}
}
tx:
if (tx) {
tx = 0;
BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
tx_allowed = tx_burst > 0;
if (!tx_allowed) {
/* Buffers full. Ensure we process tx
* after we handle rx..
*/
pending_tx = tx;
goto done_rx;
}
ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
if (ret < 0)
break;
if (ret > 0) /* More to transmit */
tx = ret;
/* Re-read ctrl reg */
if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
break;
}
done_rx:
if (priv->bh_error)
break;
if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
goto rx;
if (tx)
goto tx;
done:
/* Re-enable device interrupts */
priv->hwbus_ops->lock(priv->hwbus_priv);
__cw1200_irq_enable(priv, 1);
priv->hwbus_ops->unlock(priv->hwbus_priv);
}
/* Explicitly disable device interrupts */
priv->hwbus_ops->lock(priv->hwbus_priv);
__cw1200_irq_enable(priv, 0);
priv->hwbus_ops->unlock(priv->hwbus_priv);
if (!term) {
pr_err("[BH] Fatal error, exiting.\n");
priv->bh_error = 1;
/* TODO: schedule_work(recovery) */
}
return 0;
}
| gpl-2.0 |
XileForce/Linaro-LSK | drivers/net/wireless/rtlwifi/rtl8188ee/pwrseq.c | 2785 | 3418 | /******************************************************************************
*
* Copyright(c) 2009-2013 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "pwrseqcmd.h"
#include "pwrseq.h"
/* drivers should parse below arrays and do the corresponding actions */
/*3 Power on Array*/
struct wlan_pwr_cfg rtl8188e_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS +
RTL8188E_TRANS_END_STEPS] = {
RTL8188E_TRANS_CARDEMU_TO_ACT
RTL8188E_TRANS_END
};
/*3Radio off GPIO Array */
struct wlan_pwr_cfg rtl8188e_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS
+ RTL8188E_TRANS_END_STEPS] = {
RTL8188E_TRANS_ACT_TO_CARDEMU
RTL8188E_TRANS_END
};
/*3Card Disable Array*/
struct wlan_pwr_cfg rtl8188e_card_disable_flow
[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS +
RTL8188E_TRANS_END_STEPS] = {
RTL8188E_TRANS_ACT_TO_CARDEMU
RTL8188E_TRANS_CARDEMU_TO_CARDDIS
RTL8188E_TRANS_END
};
/*3 Card Enable Array*/
struct wlan_pwr_cfg rtl8188e_card_enable_flow
[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS +
RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS +
RTL8188E_TRANS_END_STEPS] = {
RTL8188E_TRANS_CARDDIS_TO_CARDEMU
RTL8188E_TRANS_CARDEMU_TO_ACT
RTL8188E_TRANS_END
};
/*3Suspend Array*/
struct wlan_pwr_cfg rtl8188e_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS
+ RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS
+ RTL8188E_TRANS_END_STEPS] = {
RTL8188E_TRANS_ACT_TO_CARDEMU
RTL8188E_TRANS_CARDEMU_TO_SUS
RTL8188E_TRANS_END
};
/*3 Resume Array*/
struct wlan_pwr_cfg rtl8188e_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS
+ RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS
+ RTL8188E_TRANS_END_STEPS] = {
RTL8188E_TRANS_SUS_TO_CARDEMU
RTL8188E_TRANS_CARDEMU_TO_ACT
RTL8188E_TRANS_END
};
/*3HWPDN Array*/
struct wlan_pwr_cfg rtl8188e_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS
+ RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS
+ RTL8188E_TRANS_END_STEPS] = {
RTL8188E_TRANS_ACT_TO_CARDEMU
RTL8188E_TRANS_CARDEMU_TO_PDN
RTL8188E_TRANS_END
};
/*3 Enter LPS */
struct wlan_pwr_cfg rtl8188e_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS
+ RTL8188E_TRANS_END_STEPS] = {
/*FW behavior*/
RTL8188E_TRANS_ACT_TO_LPS
RTL8188E_TRANS_END
};
/*3 Leave LPS */
struct wlan_pwr_cfg rtl8188e_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS
+ RTL8188E_TRANS_END_STEPS] = {
/*FW behavior*/
RTL8188E_TRANS_LPS_TO_ACT
RTL8188E_TRANS_END
};
| gpl-2.0 |
scotthartbti/android_kernel_samsung_trlte | drivers/video/sis/initextlfb.c | 4833 | 7140 | /*
* SiS 300/540/630[S]/730[S]
* SiS 315[E|PRO]/550/[M]65x/[M]66x[F|M|G]X/[M]74x[GX]/330/[M]76x[GX]
* XGI V3XT/V5/V8, Z7
* frame buffer driver for Linux kernels >= 2.4.14 and >=2.6.3
*
* Linux kernel specific extensions to init.c/init301.c
*
* Copyright (C) 2001-2005 Thomas Winischhofer, Vienna, Austria.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the named License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
*
* Author: Thomas Winischhofer <thomas@winischhofer.net>
*/
#include "initdef.h"
#include "vgatypes.h"
#include "vstruct.h"
#include <linux/types.h>
#include <linux/fb.h>
int sisfb_mode_rate_to_dclock(struct SiS_Private *SiS_Pr,
unsigned char modeno, unsigned char rateindex);
int sisfb_mode_rate_to_ddata(struct SiS_Private *SiS_Pr, unsigned char modeno,
unsigned char rateindex, struct fb_var_screeninfo *var);
bool sisfb_gettotalfrommode(struct SiS_Private *SiS_Pr, unsigned char modeno,
int *htotal, int *vtotal, unsigned char rateindex);
extern bool SiSInitPtr(struct SiS_Private *SiS_Pr);
extern bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo,
unsigned short *ModeIdIndex);
extern void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
int xres, int yres, struct fb_var_screeninfo *var, bool writeres);
int
sisfb_mode_rate_to_dclock(struct SiS_Private *SiS_Pr, unsigned char modeno,
unsigned char rateindex)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, ClockIndex = 0;
unsigned short RRTI = 0;
int Clock;
if(!SiSInitPtr(SiS_Pr)) return 65000;
if(rateindex > 0) rateindex--;
#ifdef CONFIG_FB_SIS_315
switch(ModeNo) {
case 0x5a: ModeNo = 0x50; break;
case 0x5b: ModeNo = 0x56;
}
#endif
if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) {
printk(KERN_ERR "Could not find mode %x\n", ModeNo);
return 65000;
}
RRTI = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex;
if(SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag & HaveWideTiming) {
if(SiS_Pr->SiS_UseWide == 1) {
/* Wide screen: Ignore rateindex */
ClockIndex = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRTVCLK_WIDE;
} else {
RRTI += rateindex;
ClockIndex = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRTVCLK_NORM;
}
} else {
RRTI += rateindex;
ClockIndex = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRTVCLK;
}
Clock = SiS_Pr->SiS_VCLKData[ClockIndex].CLOCK * 1000;
return Clock;
}
int
sisfb_mode_rate_to_ddata(struct SiS_Private *SiS_Pr, unsigned char modeno,
unsigned char rateindex, struct fb_var_screeninfo *var)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, index = 0, RRTI = 0;
int j;
if(!SiSInitPtr(SiS_Pr)) return 0;
if(rateindex > 0) rateindex--;
#ifdef CONFIG_FB_SIS_315
switch(ModeNo) {
case 0x5a: ModeNo = 0x50; break;
case 0x5b: ModeNo = 0x56;
}
#endif
if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return 0;
RRTI = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex;
if(SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag & HaveWideTiming) {
if(SiS_Pr->SiS_UseWide == 1) {
/* Wide screen: Ignore rateindex */
index = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRT1CRTC_WIDE;
} else {
RRTI += rateindex;
index = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRT1CRTC_NORM;
}
} else {
RRTI += rateindex;
index = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRT1CRTC;
}
SiS_Generic_ConvertCRData(SiS_Pr,
(unsigned char *)&SiS_Pr->SiS_CRT1Table[index].CR[0],
SiS_Pr->SiS_RefIndex[RRTI].XRes,
SiS_Pr->SiS_RefIndex[RRTI].YRes,
var, false);
if(SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag & 0x8000)
var->sync &= ~FB_SYNC_VERT_HIGH_ACT;
else
var->sync |= FB_SYNC_VERT_HIGH_ACT;
if(SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag & 0x4000)
var->sync &= ~FB_SYNC_HOR_HIGH_ACT;
else
var->sync |= FB_SYNC_HOR_HIGH_ACT;
var->vmode = FB_VMODE_NONINTERLACED;
if(SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag & 0x0080)
var->vmode = FB_VMODE_INTERLACED;
else {
j = 0;
while(SiS_Pr->SiS_EModeIDTable[j].Ext_ModeID != 0xff) {
if(SiS_Pr->SiS_EModeIDTable[j].Ext_ModeID ==
SiS_Pr->SiS_RefIndex[RRTI].ModeID) {
if(SiS_Pr->SiS_EModeIDTable[j].Ext_ModeFlag & DoubleScanMode) {
var->vmode = FB_VMODE_DOUBLE;
}
break;
}
j++;
}
}
if((var->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED) {
#if 0 /* Do this? */
var->upper_margin <<= 1;
var->lower_margin <<= 1;
var->vsync_len <<= 1;
#endif
} else if((var->vmode & FB_VMODE_MASK) == FB_VMODE_DOUBLE) {
var->upper_margin >>= 1;
var->lower_margin >>= 1;
var->vsync_len >>= 1;
}
return 1;
}
bool
sisfb_gettotalfrommode(struct SiS_Private *SiS_Pr, unsigned char modeno, int *htotal,
int *vtotal, unsigned char rateindex)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, CRT1Index = 0;
unsigned short RRTI = 0;
unsigned char sr_data, cr_data, cr_data2;
if(!SiSInitPtr(SiS_Pr)) return false;
if(rateindex > 0) rateindex--;
#ifdef CONFIG_FB_SIS_315
switch(ModeNo) {
case 0x5a: ModeNo = 0x50; break;
case 0x5b: ModeNo = 0x56;
}
#endif
if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false;
RRTI = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex;
if(SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag & HaveWideTiming) {
if(SiS_Pr->SiS_UseWide == 1) {
/* Wide screen: Ignore rateindex */
CRT1Index = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRT1CRTC_WIDE;
} else {
RRTI += rateindex;
CRT1Index = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRT1CRTC_NORM;
}
} else {
RRTI += rateindex;
CRT1Index = SiS_Pr->SiS_RefIndex[RRTI].Ext_CRT1CRTC;
}
sr_data = SiS_Pr->SiS_CRT1Table[CRT1Index].CR[14];
cr_data = SiS_Pr->SiS_CRT1Table[CRT1Index].CR[0];
*htotal = (((cr_data & 0xff) | ((unsigned short) (sr_data & 0x03) << 8)) + 5) * 8;
sr_data = SiS_Pr->SiS_CRT1Table[CRT1Index].CR[13];
cr_data = SiS_Pr->SiS_CRT1Table[CRT1Index].CR[6];
cr_data2 = SiS_Pr->SiS_CRT1Table[CRT1Index].CR[7];
*vtotal = ((cr_data & 0xFF) |
((unsigned short)(cr_data2 & 0x01) << 8) |
((unsigned short)(cr_data2 & 0x20) << 4) |
((unsigned short)(sr_data & 0x01) << 10)) + 2;
if(SiS_Pr->SiS_RefIndex[RRTI].Ext_InfoFlag & InterlaceMode)
*vtotal *= 2;
return true;
}
| gpl-2.0 |
Kurre/kernel_exynos_KK | arch/x86/mm/tlb.c | 7649 | 9009 | #include <linux/init.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/cache.h>
#include <asm/apic.h>
#include <asm/uv/uv.h>
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
= { &init_mm, 0, };
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
*
* These mean you can really definitely utterly forget about
* writing to user space from interrupts. (Its not allowed anyway).
*
* Optimizations Manfred Spraul <manfred@colorfullife.com>
*
* More scalable flush, from Andi Kleen
*
* To avoid global state use 8 different call vectors.
* Each CPU uses a specific vector to trigger flushes on other
* CPUs. Depending on the received vector the target CPUs look into
* the right array slot for the flush data.
*
* With more than 8 CPUs they are hashed to the 8 available
* vectors. The limited global vector space forces us to this right now.
* In future when interrupts are split into per CPU domains this could be
* fixed, at the cost of triggering multiple IPIs in some cases.
*/
union smp_flush_state {
struct {
struct mm_struct *flush_mm;
unsigned long flush_va;
raw_spinlock_t tlbstate_lock;
DECLARE_BITMAP(flush_cpumask, NR_CPUS);
};
char pad[INTERNODE_CACHE_BYTES];
} ____cacheline_internodealigned_in_smp;
/* State is put into the per CPU data section, but padded
to a full cache line because other CPUs can access it and we don't
want false sharing in the per cpu data segment. */
static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
/*
* We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask.
*/
void leave_mm(int cpu)
{
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG();
cpumask_clear_cpu(cpu,
mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
load_cr3(swapper_pg_dir);
}
EXPORT_SYMBOL_GPL(leave_mm);
/*
*
* The flush IPI assumes that a thread switch happens in this order:
* [cpu0: the cpu that switches]
* 1) switch_mm() either 1a) or 1b)
* 1a) thread switch to a different mm
* 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
* Stop ipi delivery for the old mm. This is not synchronized with
* the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superfluous
* tlb flush.
* 1a2) set cpu mmu_state to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0
* was in lazy tlb mode.
* 1a3) update cpu active_mm
* Now cpu0 accepts tlb flushes for the new mm.
* 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
* Now the other cpus will send tlb flush ipis.
* 1a4) change cr3.
* 1b) thread switch without mm change
* cpu active_mm is correct, cpu0 already handles
* flush ipis.
* 1b1) set cpu mmu_state to TLBSTATE_OK
* 1b2) test_and_set the cpu bit in cpu_vm_mask.
* Atomically set the bit [other cpus will start sending flush ipis],
* and test the bit.
* 1b3) if the bit was 0: leave_mm was called, flush the tlb.
* 2) switch %%esp, ie current
*
* The interrupt must handle 2 special cases:
* - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
* - the cpu performs speculative tlb reads, i.e. even if the cpu only
* runs in kernel space, the cpu could load tlb entries for user space
* pages.
*
* The good news is that cpu mmu_state is local to each cpu, no
* write/read ordering problems.
*/
/*
* TLB flush IPI:
*
* 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
* 2) Leave the mm if we are in the lazy tlb mode.
*
* Interrupts are disabled.
*/
/*
* FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
* but still used for documentation purpose but the usage is slightly
* inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
* entry calls in with the first parameter in %eax. Maybe define
* intrlinkage?
*/
#ifdef CONFIG_X86_64
asmlinkage
#endif
void smp_invalidate_interrupt(struct pt_regs *regs)
{
unsigned int cpu;
unsigned int sender;
union smp_flush_state *f;
cpu = smp_processor_id();
/*
* orig_rax contains the negated interrupt vector.
* Use that to determine where the sender put the data.
*/
sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
f = &flush_state[sender];
if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
goto out;
/*
* This was a BUG() but until someone can quote me the
* line from the intel manual that guarantees an IPI to
* multiple CPUs is retried _only_ on the erroring CPUs
* its staying as a return
*
* BUG();
*/
if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_va == TLB_FLUSH_ALL)
local_flush_tlb();
else
__flush_tlb_one(f->flush_va);
} else
leave_mm(cpu);
}
out:
ack_APIC_irq();
smp_mb__before_clear_bit();
cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
smp_mb__after_clear_bit();
inc_irq_stat(irq_tlb_count);
}
static void flush_tlb_others_ipi(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long va)
{
unsigned int sender;
union smp_flush_state *f;
/* Caller has disabled preemption */
sender = this_cpu_read(tlb_vector_offset);
f = &flush_state[sender];
if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
raw_spin_lock(&f->tlbstate_lock);
f->flush_mm = mm;
f->flush_va = va;
if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
/*
* We have to send the IPI only to
* CPUs affected.
*/
apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
INVALIDATE_TLB_VECTOR_START + sender);
while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
cpu_relax();
}
f->flush_mm = NULL;
f->flush_va = 0;
if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
raw_spin_unlock(&f->tlbstate_lock);
}
void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long va)
{
if (is_uv_system()) {
unsigned int cpu;
cpu = smp_processor_id();
cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
if (cpumask)
flush_tlb_others_ipi(cpumask, mm, va);
return;
}
flush_tlb_others_ipi(cpumask, mm, va);
}
static void __cpuinit calculate_tlb_offset(void)
{
int cpu, node, nr_node_vecs, idx = 0;
/*
* we are changing tlb_vector_offset for each CPU in runtime, but this
* will not cause inconsistency, as the write is atomic under X86. we
* might see more lock contentions in a short time, but after all CPU's
* tlb_vector_offset are changed, everything should go normal
*
* Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
* waste some vectors.
**/
if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
nr_node_vecs = 1;
else
nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
for_each_online_node(node) {
int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
nr_node_vecs;
int cpu_offset = 0;
for_each_cpu(cpu, cpumask_of_node(node)) {
per_cpu(tlb_vector_offset, cpu) = node_offset +
cpu_offset;
cpu_offset++;
cpu_offset = cpu_offset % nr_node_vecs;
}
idx++;
}
}
static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
unsigned long action, void *hcpu)
{
switch (action & 0xf) {
case CPU_ONLINE:
case CPU_DEAD:
calculate_tlb_offset();
}
return NOTIFY_OK;
}
static int __cpuinit init_smp_flush(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(flush_state); i++)
raw_spin_lock_init(&flush_state[i].tlbstate_lock);
calculate_tlb_offset();
hotcpu_notifier(tlb_cpuhp_notify, 0);
return 0;
}
core_initcall(init_smp_flush);
void flush_tlb_current_task(void)
{
struct mm_struct *mm = current->mm;
preempt_disable();
local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}
void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
if (current->active_mm == mm) {
if (current->mm)
local_flush_tlb();
else
leave_mm(smp_processor_id());
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
if (current->active_mm == mm) {
if (current->mm)
__flush_tlb_one(va);
else
leave_mm(smp_processor_id());
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, va);
preempt_enable();
}
static void do_flush_tlb_all(void *info)
{
__flush_tlb_all();
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id());
}
void flush_tlb_all(void)
{
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
| gpl-2.0 |
CM-Tab-S/android_kernel_samsung_klimtwifi | drivers/net/wireless/orinoco/hw.c | 8417 | 34700 | /* Encapsulate basic setting changes and retrieval on Hermes hardware
*
* See copyright notice in main.c
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/if_arp.h>
#include <linux/ieee80211.h>
#include <linux/wireless.h>
#include <net/cfg80211.h>
#include "hermes.h"
#include "hermes_rid.h"
#include "orinoco.h"
#include "hw.h"
#define SYMBOL_MAX_VER_LEN (14)
/* Symbol firmware has a bug allocating buffers larger than this */
#define TX_NICBUF_SIZE_BUG 1585
/********************************************************************/
/* Data tables */
/********************************************************************/
/* This tables gives the actual meanings of the bitrate IDs returned
* by the firmware. */
static const struct {
int bitrate; /* in 100s of kilobits */
int automatic;
u16 agere_txratectrl;
u16 intersil_txratectrl;
} bitrate_table[] = {
{110, 1, 3, 15}, /* Entry 0 is the default */
{10, 0, 1, 1},
{10, 1, 1, 1},
{20, 0, 2, 2},
{20, 1, 6, 3},
{55, 0, 4, 4},
{55, 1, 7, 7},
{110, 0, 5, 8},
};
#define BITRATE_TABLE_SIZE ARRAY_SIZE(bitrate_table)
/* Firmware version encoding */
struct comp_id {
u16 id, variant, major, minor;
} __packed;
static inline enum fwtype determine_firmware_type(struct comp_id *nic_id)
{
if (nic_id->id < 0x8000)
return FIRMWARE_TYPE_AGERE;
else if (nic_id->id == 0x8000 && nic_id->major == 0)
return FIRMWARE_TYPE_SYMBOL;
else
return FIRMWARE_TYPE_INTERSIL;
}
/* Set priv->firmware type, determine firmware properties
* This function can be called before we have registerred with netdev,
* so all errors go out with dev_* rather than printk
*
* If non-NULL stores a firmware description in fw_name.
* If non-NULL stores a HW version in hw_ver
*
* These are output via generic cfg80211 ethtool support.
*/
int determine_fw_capabilities(struct orinoco_private *priv,
char *fw_name, size_t fw_name_len,
u32 *hw_ver)
{
struct device *dev = priv->dev;
struct hermes *hw = &priv->hw;
int err;
struct comp_id nic_id, sta_id;
unsigned int firmver;
char tmp[SYMBOL_MAX_VER_LEN + 1] __attribute__((aligned(2)));
/* Get the hardware version */
err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_NICID, &nic_id);
if (err) {
dev_err(dev, "Cannot read hardware identity: error %d\n",
err);
return err;
}
le16_to_cpus(&nic_id.id);
le16_to_cpus(&nic_id.variant);
le16_to_cpus(&nic_id.major);
le16_to_cpus(&nic_id.minor);
dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n",
nic_id.id, nic_id.variant, nic_id.major, nic_id.minor);
if (hw_ver)
*hw_ver = (((nic_id.id & 0xff) << 24) |
((nic_id.variant & 0xff) << 16) |
((nic_id.major & 0xff) << 8) |
(nic_id.minor & 0xff));
priv->firmware_type = determine_firmware_type(&nic_id);
/* Get the firmware version */
err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_STAID, &sta_id);
if (err) {
dev_err(dev, "Cannot read station identity: error %d\n",
err);
return err;
}
le16_to_cpus(&sta_id.id);
le16_to_cpus(&sta_id.variant);
le16_to_cpus(&sta_id.major);
le16_to_cpus(&sta_id.minor);
dev_info(dev, "Station identity %04x:%04x:%04x:%04x\n",
sta_id.id, sta_id.variant, sta_id.major, sta_id.minor);
switch (sta_id.id) {
case 0x15:
dev_err(dev, "Primary firmware is active\n");
return -ENODEV;
case 0x14b:
dev_err(dev, "Tertiary firmware is active\n");
return -ENODEV;
case 0x1f: /* Intersil, Agere, Symbol Spectrum24 */
case 0x21: /* Symbol Spectrum24 Trilogy */
break;
default:
dev_notice(dev, "Unknown station ID, please report\n");
break;
}
/* Default capabilities */
priv->has_sensitivity = 1;
priv->has_mwo = 0;
priv->has_preamble = 0;
priv->has_port3 = 1;
priv->has_ibss = 1;
priv->has_wep = 0;
priv->has_big_wep = 0;
priv->has_alt_txcntl = 0;
priv->has_ext_scan = 0;
priv->has_wpa = 0;
priv->do_fw_download = 0;
/* Determine capabilities from the firmware version */
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE:
/* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
if (fw_name)
snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d",
sta_id.major, sta_id.minor);
firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
priv->has_ibss = (firmver >= 0x60006);
priv->has_wep = (firmver >= 0x40020);
priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell
Gold cards from the others? */
priv->has_mwo = (firmver >= 0x60000);
priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */
priv->ibss_port = 1;
priv->has_hostscan = (firmver >= 0x8000a);
priv->do_fw_download = 1;
priv->broken_monitor = (firmver >= 0x80000);
priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */
priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */
priv->has_wpa = (firmver >= 0x9002a);
/* Tested with Agere firmware :
* 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II
* Tested CableTron firmware : 4.32 => Anton */
break;
case FIRMWARE_TYPE_SYMBOL:
/* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */
/* Intel MAC : 00:02:B3:* */
/* 3Com MAC : 00:50:DA:* */
memset(tmp, 0, sizeof(tmp));
/* Get the Symbol firmware version */
err = hw->ops->read_ltv(hw, USER_BAP,
HERMES_RID_SECONDARYVERSION_SYMBOL,
SYMBOL_MAX_VER_LEN, NULL, &tmp);
if (err) {
dev_warn(dev, "Error %d reading Symbol firmware info. "
"Wildly guessing capabilities...\n", err);
firmver = 0;
tmp[0] = '\0';
} else {
/* The firmware revision is a string, the format is
* something like : "V2.20-01".
* Quick and dirty parsing... - Jean II
*/
firmver = ((tmp[1] - '0') << 16)
| ((tmp[3] - '0') << 12)
| ((tmp[4] - '0') << 8)
| ((tmp[6] - '0') << 4)
| (tmp[7] - '0');
tmp[SYMBOL_MAX_VER_LEN] = '\0';
}
if (fw_name)
snprintf(fw_name, fw_name_len, "Symbol %s", tmp);
priv->has_ibss = (firmver >= 0x20000);
priv->has_wep = (firmver >= 0x15012);
priv->has_big_wep = (firmver >= 0x20000);
priv->has_pm = (firmver >= 0x20000 && firmver < 0x22000) ||
(firmver >= 0x29000 && firmver < 0x30000) ||
firmver >= 0x31000;
priv->has_preamble = (firmver >= 0x20000);
priv->ibss_port = 4;
/* Symbol firmware is found on various cards, but
* there has been no attempt to check firmware
* download on non-spectrum_cs based cards.
*
* Given that the Agere firmware download works
* differently, we should avoid doing a firmware
* download with the Symbol algorithm on non-spectrum
* cards.
*
* For now we can identify a spectrum_cs based card
* because it has a firmware reset function.
*/
priv->do_fw_download = (priv->stop_fw != NULL);
priv->broken_disableport = (firmver == 0x25013) ||
(firmver >= 0x30000 && firmver <= 0x31000);
priv->has_hostscan = (firmver >= 0x31001) ||
(firmver >= 0x29057 && firmver < 0x30000);
/* Tested with Intel firmware : 0x20015 => Jean II */
/* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */
break;
case FIRMWARE_TYPE_INTERSIL:
/* D-Link, Linksys, Adtron, ZoomAir, and many others...
* Samsung, Compaq 100/200 and Proxim are slightly
* different and less well tested */
/* D-Link MAC : 00:40:05:* */
/* Addtron MAC : 00:90:D1:* */
if (fw_name)
snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d",
sta_id.major, sta_id.minor, sta_id.variant);
firmver = ((unsigned long)sta_id.major << 16) |
((unsigned long)sta_id.minor << 8) | sta_id.variant;
priv->has_ibss = (firmver >= 0x000700); /* FIXME */
priv->has_big_wep = priv->has_wep = (firmver >= 0x000800);
priv->has_pm = (firmver >= 0x000700);
priv->has_hostscan = (firmver >= 0x010301);
if (firmver >= 0x000800)
priv->ibss_port = 0;
else {
dev_notice(dev, "Intersil firmware earlier than v0.8.x"
" - several features not supported\n");
priv->ibss_port = 1;
}
break;
}
if (fw_name)
dev_info(dev, "Firmware determined as %s\n", fw_name);
#ifndef CONFIG_HERMES_PRISM
if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
dev_err(dev, "Support for Prism chipset is not enabled\n");
return -ENODEV;
}
#endif
return 0;
}
/* Read settings from EEPROM into our private structure.
* MAC address gets dropped into callers buffer
* Can be called before netdev registration.
*/
int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
{
struct device *dev = priv->dev;
struct hermes_idstring nickbuf;
struct hermes *hw = &priv->hw;
int len;
int err;
u16 reclen;
/* Get the MAC address */
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
ETH_ALEN, NULL, dev_addr);
if (err) {
dev_warn(dev, "Failed to read MAC address!\n");
goto out;
}
dev_dbg(dev, "MAC address %pM\n", dev_addr);
/* Get the station name */
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
sizeof(nickbuf), &reclen, &nickbuf);
if (err) {
dev_err(dev, "failed to read station name\n");
goto out;
}
if (nickbuf.len)
len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len));
else
len = min(IW_ESSID_MAX_SIZE, 2 * reclen);
memcpy(priv->nick, &nickbuf.val, len);
priv->nick[len] = '\0';
dev_dbg(dev, "Station name \"%s\"\n", priv->nick);
/* Get allowed channels */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CHANNELLIST,
&priv->channel_mask);
if (err) {
dev_err(dev, "Failed to read channel list!\n");
goto out;
}
/* Get initial AP density */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE,
&priv->ap_density);
if (err || priv->ap_density < 1 || priv->ap_density > 3)
priv->has_sensitivity = 0;
/* Get initial RTS threshold */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
&priv->rts_thresh);
if (err) {
dev_err(dev, "Failed to read RTS threshold!\n");
goto out;
}
/* Get initial fragmentation settings */
if (priv->has_mwo)
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFMWOROBUST_AGERE,
&priv->mwo_robust);
else
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
&priv->frag_thresh);
if (err) {
dev_err(dev, "Failed to read fragmentation settings!\n");
goto out;
}
/* Power management setup */
if (priv->has_pm) {
priv->pm_on = 0;
priv->pm_mcast = 1;
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFMAXSLEEPDURATION,
&priv->pm_period);
if (err) {
dev_err(dev, "Failed to read power management "
"period!\n");
goto out;
}
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFPMHOLDOVERDURATION,
&priv->pm_timeout);
if (err) {
dev_err(dev, "Failed to read power management "
"timeout!\n");
goto out;
}
}
/* Preamble setup */
if (priv->has_preamble) {
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CNFPREAMBLE_SYMBOL,
&priv->preamble);
if (err) {
dev_err(dev, "Failed to read preamble setup\n");
goto out;
}
}
/* Retry settings */
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
&priv->short_retry_limit);
if (err) {
dev_err(dev, "Failed to read short retry limit\n");
goto out;
}
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
&priv->long_retry_limit);
if (err) {
dev_err(dev, "Failed to read long retry limit\n");
goto out;
}
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
&priv->retry_lifetime);
if (err) {
dev_err(dev, "Failed to read max retry lifetime\n");
goto out;
}
out:
return err;
}
/* Can be called before netdev registration */
int orinoco_hw_allocate_fid(struct orinoco_private *priv)
{
struct device *dev = priv->dev;
struct hermes *hw = &priv->hw;
int err;
err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
/* Try workaround for old Symbol firmware bug */
priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
dev_warn(dev, "Firmware ALLOC bug detected "
"(old Symbol firmware?). Work around %s\n",
err ? "failed!" : "ok.");
}
return err;
}
int orinoco_get_bitratemode(int bitrate, int automatic)
{
int ratemode = -1;
int i;
if ((bitrate != 10) && (bitrate != 20) &&
(bitrate != 55) && (bitrate != 110))
return ratemode;
for (i = 0; i < BITRATE_TABLE_SIZE; i++) {
if ((bitrate_table[i].bitrate == bitrate) &&
(bitrate_table[i].automatic == automatic)) {
ratemode = i;
break;
}
}
return ratemode;
}
void orinoco_get_ratemode_cfg(int ratemode, int *bitrate, int *automatic)
{
BUG_ON((ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE));
*bitrate = bitrate_table[ratemode].bitrate * 100000;
*automatic = bitrate_table[ratemode].automatic;
}
int orinoco_hw_program_rids(struct orinoco_private *priv)
{
struct net_device *dev = priv->ndev;
struct wireless_dev *wdev = netdev_priv(dev);
struct hermes *hw = &priv->hw;
int err;
struct hermes_idstring idbuf;
/* Set the MAC address */
err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
HERMES_BYTES_TO_RECLEN(ETH_ALEN),
dev->dev_addr);
if (err) {
printk(KERN_ERR "%s: Error %d setting MAC address\n",
dev->name, err);
return err;
}
/* Set up the link mode */
err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE,
priv->port_type);
if (err) {
printk(KERN_ERR "%s: Error %d setting port type\n",
dev->name, err);
return err;
}
/* Set the channel/frequency */
if (priv->channel != 0 && priv->iw_mode != NL80211_IFTYPE_STATION) {
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFOWNCHANNEL,
priv->channel);
if (err) {
printk(KERN_ERR "%s: Error %d setting channel %d\n",
dev->name, err, priv->channel);
return err;
}
}
if (priv->has_ibss) {
u16 createibss;
if ((strlen(priv->desired_essid) == 0) && (priv->createibss)) {
printk(KERN_WARNING "%s: This firmware requires an "
"ESSID in IBSS-Ad-Hoc mode.\n", dev->name);
/* With wvlan_cs, in this case, we would crash.
* hopefully, this driver will behave better...
* Jean II */
createibss = 0;
} else {
createibss = priv->createibss;
}
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFCREATEIBSS,
createibss);
if (err) {
printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n",
dev->name, err);
return err;
}
}
/* Set the desired BSSID */
err = __orinoco_hw_set_wap(priv);
if (err) {
printk(KERN_ERR "%s: Error %d setting AP address\n",
dev->name, err);
return err;
}
/* Set the desired ESSID */
idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
/* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2),
&idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting OWNSSID\n",
dev->name, err);
return err;
}
err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2),
&idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n",
dev->name, err);
return err;
}
/* Set the station name */
idbuf.len = cpu_to_le16(strlen(priv->nick));
memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
HERMES_BYTES_TO_RECLEN(strlen(priv->nick) + 2),
&idbuf);
if (err) {
printk(KERN_ERR "%s: Error %d setting nickname\n",
dev->name, err);
return err;
}
/* Set AP density */
if (priv->has_sensitivity) {
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFSYSTEMSCALE,
priv->ap_density);
if (err) {
printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. "
"Disabling sensitivity control\n",
dev->name, err);
priv->has_sensitivity = 0;
}
}
/* Set RTS threshold */
err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD,
priv->rts_thresh);
if (err) {
printk(KERN_ERR "%s: Error %d setting RTS threshold\n",
dev->name, err);
return err;
}
/* Set fragmentation threshold or MWO robustness */
if (priv->has_mwo)
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFMWOROBUST_AGERE,
priv->mwo_robust);
else
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
priv->frag_thresh);
if (err) {
printk(KERN_ERR "%s: Error %d setting fragmentation\n",
dev->name, err);
return err;
}
/* Set bitrate */
err = __orinoco_hw_set_bitrate(priv);
if (err) {
printk(KERN_ERR "%s: Error %d setting bitrate\n",
dev->name, err);
return err;
}
/* Set power management */
if (priv->has_pm) {
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFPMENABLED,
priv->pm_on);
if (err) {
printk(KERN_ERR "%s: Error %d setting up PM\n",
dev->name, err);
return err;
}
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFMULTICASTRECEIVE,
priv->pm_mcast);
if (err) {
printk(KERN_ERR "%s: Error %d setting up PM\n",
dev->name, err);
return err;
}
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFMAXSLEEPDURATION,
priv->pm_period);
if (err) {
printk(KERN_ERR "%s: Error %d setting up PM\n",
dev->name, err);
return err;
}
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFPMHOLDOVERDURATION,
priv->pm_timeout);
if (err) {
printk(KERN_ERR "%s: Error %d setting up PM\n",
dev->name, err);
return err;
}
}
/* Set preamble - only for Symbol so far... */
if (priv->has_preamble) {
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFPREAMBLE_SYMBOL,
priv->preamble);
if (err) {
printk(KERN_ERR "%s: Error %d setting preamble\n",
dev->name, err);
return err;
}
}
/* Set up encryption */
if (priv->has_wep || priv->has_wpa) {
err = __orinoco_hw_setup_enc(priv);
if (err) {
printk(KERN_ERR "%s: Error %d activating encryption\n",
dev->name, err);
return err;
}
}
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
/* Enable monitor mode */
dev->type = ARPHRD_IEEE80211;
err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_MONITOR, 0, NULL);
} else {
/* Disable monitor mode */
dev->type = ARPHRD_ETHER;
err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
HERMES_TEST_STOP, 0, NULL);
}
if (err)
return err;
/* Reset promiscuity / multicast*/
priv->promiscuous = 0;
priv->mc_count = 0;
/* Record mode change */
wdev->iftype = priv->iw_mode;
return 0;
}
/* Get tsc from the firmware */
int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
{
struct hermes *hw = &priv->hw;
int err = 0;
u8 tsc_arr[4][ORINOCO_SEQ_LEN];
if ((key < 0) || (key >= 4))
return -EINVAL;
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
sizeof(tsc_arr), NULL, &tsc_arr);
if (!err)
memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
return err;
}
int __orinoco_hw_set_bitrate(struct orinoco_private *priv)
{
struct hermes *hw = &priv->hw;
int ratemode = priv->bitratemode;
int err = 0;
if (ratemode >= BITRATE_TABLE_SIZE) {
printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n",
priv->ndev->name, ratemode);
return -EINVAL;
}
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE:
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFTXRATECONTROL,
bitrate_table[ratemode].agere_txratectrl);
break;
case FIRMWARE_TYPE_INTERSIL:
case FIRMWARE_TYPE_SYMBOL:
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFTXRATECONTROL,
bitrate_table[ratemode].intersil_txratectrl);
break;
default:
BUG();
}
return err;
}
int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate)
{
struct hermes *hw = &priv->hw;
int i;
int err = 0;
u16 val;
err = hermes_read_wordrec(hw, USER_BAP,
HERMES_RID_CURRENTTXRATE, &val);
if (err)
return err;
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE: /* Lucent style rate */
/* Note : in Lucent firmware, the return value of
* HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s,
* and therefore is totally different from the
* encoding of HERMES_RID_CNFTXRATECONTROL.
* Don't forget that 6Mb/s is really 5.5Mb/s */
if (val == 6)
*bitrate = 5500000;
else
*bitrate = val * 1000000;
break;
case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */
case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */
for (i = 0; i < BITRATE_TABLE_SIZE; i++)
if (bitrate_table[i].intersil_txratectrl == val) {
*bitrate = bitrate_table[i].bitrate * 100000;
break;
}
if (i >= BITRATE_TABLE_SIZE) {
printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n",
priv->ndev->name, val);
err = -EIO;
}
break;
default:
BUG();
}
return err;
}
/* Set fixed AP address */
int __orinoco_hw_set_wap(struct orinoco_private *priv)
{
int roaming_flag;
int err = 0;
struct hermes *hw = &priv->hw;
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE:
/* not supported */
break;
case FIRMWARE_TYPE_INTERSIL:
if (priv->bssid_fixed)
roaming_flag = 2;
else
roaming_flag = 1;
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFROAMINGMODE,
roaming_flag);
break;
case FIRMWARE_TYPE_SYMBOL:
err = HERMES_WRITE_RECORD(hw, USER_BAP,
HERMES_RID_CNFMANDATORYBSSID_SYMBOL,
&priv->desired_bssid);
break;
}
return err;
}
/* Change the WEP keys and/or the current keys. Can be called
* either from __orinoco_hw_setup_enc() or directly from
* orinoco_ioctl_setiwencode(). In the later case the association
* with the AP is not broken (if the firmware can handle it),
* which is needed for 802.1x implementations. */
int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
{
struct hermes *hw = &priv->hw;
int err = 0;
int i;
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE:
{
struct orinoco_key keys[ORINOCO_MAX_KEYS];
memset(&keys, 0, sizeof(keys));
for (i = 0; i < ORINOCO_MAX_KEYS; i++) {
int len = min(priv->keys[i].key_len,
ORINOCO_MAX_KEY_SIZE);
memcpy(&keys[i].data, priv->keys[i].key, len);
if (len > SMALL_KEY_SIZE)
keys[i].len = cpu_to_le16(LARGE_KEY_SIZE);
else if (len > 0)
keys[i].len = cpu_to_le16(SMALL_KEY_SIZE);
else
keys[i].len = cpu_to_le16(0);
}
err = HERMES_WRITE_RECORD(hw, USER_BAP,
HERMES_RID_CNFWEPKEYS_AGERE,
&keys);
if (err)
return err;
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFTXKEY_AGERE,
priv->tx_key);
if (err)
return err;
break;
}
case FIRMWARE_TYPE_INTERSIL:
case FIRMWARE_TYPE_SYMBOL:
{
int keylen;
/* Force uniform key length to work around
* firmware bugs */
keylen = priv->keys[priv->tx_key].key_len;
if (keylen > LARGE_KEY_SIZE) {
printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n",
priv->ndev->name, priv->tx_key, keylen);
return -E2BIG;
} else if (keylen > SMALL_KEY_SIZE)
keylen = LARGE_KEY_SIZE;
else if (keylen > 0)
keylen = SMALL_KEY_SIZE;
else
keylen = 0;
/* Write all 4 keys */
for (i = 0; i < ORINOCO_MAX_KEYS; i++) {
u8 key[LARGE_KEY_SIZE] = { 0 };
memcpy(key, priv->keys[i].key,
priv->keys[i].key_len);
err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFDEFAULTKEY0 + i,
HERMES_BYTES_TO_RECLEN(keylen),
key);
if (err)
return err;
}
/* Write the index of the key used in transmission */
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFWEPDEFAULTKEYID,
priv->tx_key);
if (err)
return err;
}
break;
}
return 0;
}
int __orinoco_hw_setup_enc(struct orinoco_private *priv)
{
struct hermes *hw = &priv->hw;
int err = 0;
int master_wep_flag;
int auth_flag;
int enc_flag;
/* Setup WEP keys */
if (priv->encode_alg == ORINOCO_ALG_WEP)
__orinoco_hw_setup_wepkeys(priv);
if (priv->wep_restrict)
auth_flag = HERMES_AUTH_SHARED_KEY;
else
auth_flag = HERMES_AUTH_OPEN;
if (priv->wpa_enabled)
enc_flag = 2;
else if (priv->encode_alg == ORINOCO_ALG_WEP)
enc_flag = 1;
else
enc_flag = 0;
switch (priv->firmware_type) {
case FIRMWARE_TYPE_AGERE: /* Agere style WEP */
if (priv->encode_alg == ORINOCO_ALG_WEP) {
/* Enable the shared-key authentication. */
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFAUTHENTICATION_AGERE,
auth_flag);
}
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFWEPENABLED_AGERE,
enc_flag);
if (err)
return err;
if (priv->has_wpa) {
/* Set WPA key management */
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE,
priv->key_mgmt);
if (err)
return err;
}
break;
case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */
case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */
if (priv->encode_alg == ORINOCO_ALG_WEP) {
if (priv->wep_restrict ||
(priv->firmware_type == FIRMWARE_TYPE_SYMBOL))
master_wep_flag = HERMES_WEP_PRIVACY_INVOKED |
HERMES_WEP_EXCL_UNENCRYPTED;
else
master_wep_flag = HERMES_WEP_PRIVACY_INVOKED;
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFAUTHENTICATION,
auth_flag);
if (err)
return err;
} else
master_wep_flag = 0;
if (priv->iw_mode == NL80211_IFTYPE_MONITOR)
master_wep_flag |= HERMES_WEP_HOST_DECRYPT;
/* Master WEP setting : on/off */
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFWEPFLAGS_INTERSIL,
master_wep_flag);
if (err)
return err;
break;
}
return 0;
}
/* key must be 32 bytes, including the tx and rx MIC keys.
* rsc must be NULL or up to 8 bytes
* tsc must be NULL or up to 8 bytes
*/
int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
int set_tx, u8 *key, u8 *rsc, size_t rsc_len,
u8 *tsc, size_t tsc_len)
{
struct {
__le16 idx;
u8 rsc[ORINOCO_SEQ_LEN];
u8 key[TKIP_KEYLEN];
u8 tx_mic[MIC_KEYLEN];
u8 rx_mic[MIC_KEYLEN];
u8 tsc[ORINOCO_SEQ_LEN];
} __packed buf;
struct hermes *hw = &priv->hw;
int ret;
int err;
int k;
u16 xmitting;
key_idx &= 0x3;
if (set_tx)
key_idx |= 0x8000;
buf.idx = cpu_to_le16(key_idx);
memcpy(buf.key, key,
sizeof(buf.key) + sizeof(buf.tx_mic) + sizeof(buf.rx_mic));
if (rsc_len > sizeof(buf.rsc))
rsc_len = sizeof(buf.rsc);
if (tsc_len > sizeof(buf.tsc))
tsc_len = sizeof(buf.tsc);
memset(buf.rsc, 0, sizeof(buf.rsc));
memset(buf.tsc, 0, sizeof(buf.tsc));
if (rsc != NULL)
memcpy(buf.rsc, rsc, rsc_len);
if (tsc != NULL)
memcpy(buf.tsc, tsc, tsc_len);
else
buf.tsc[4] = 0x10;
/* Wait up to 100ms for tx queue to empty */
for (k = 100; k > 0; k--) {
udelay(1000);
ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
&xmitting);
if (ret || !xmitting)
break;
}
if (k == 0)
ret = -ETIMEDOUT;
err = HERMES_WRITE_RECORD(hw, USER_BAP,
HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE,
&buf);
return ret ? ret : err;
}
int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx)
{
struct hermes *hw = &priv->hw;
int err;
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE,
key_idx);
if (err)
printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n",
priv->ndev->name, err, key_idx);
return err;
}
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
struct net_device *dev,
int mc_count, int promisc)
{
struct hermes *hw = &priv->hw;
int err = 0;
if (promisc != priv->promiscuous) {
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFPROMISCUOUSMODE,
promisc);
if (err) {
printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n",
priv->ndev->name, err);
} else
priv->promiscuous = promisc;
}
/* If we're not in promiscuous mode, then we need to set the
* group address if either we want to multicast, or if we were
* multicasting and want to stop */
if (!promisc && (mc_count || priv->mc_count)) {
struct netdev_hw_addr *ha;
struct hermes_multicast mclist;
int i = 0;
netdev_for_each_mc_addr(ha, dev) {
if (i == mc_count)
break;
memcpy(mclist.addr[i++], ha->addr, ETH_ALEN);
}
err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFGROUPADDRESSES,
HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
&mclist);
if (err)
printk(KERN_ERR "%s: Error %d setting multicast list.\n",
priv->ndev->name, err);
else
priv->mc_count = mc_count;
}
return err;
}
/* Return : < 0 -> error code ; >= 0 -> length */
int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
char buf[IW_ESSID_MAX_SIZE + 1])
{
struct hermes *hw = &priv->hw;
int err = 0;
struct hermes_idstring essidbuf;
char *p = (char *)(&essidbuf.val);
int len;
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
if (strlen(priv->desired_essid) > 0) {
/* We read the desired SSID from the hardware rather
than from priv->desired_essid, just in case the
firmware is allowed to change it on us. I'm not
sure about this */
/* My guess is that the OWNSSID should always be whatever
* we set to the card, whereas CURRENT_SSID is the one that
* may change... - Jean II */
u16 rid;
*active = 1;
rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
HERMES_RID_CNFDESIREDSSID;
err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
NULL, &essidbuf);
if (err)
goto fail_unlock;
} else {
*active = 0;
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
sizeof(essidbuf), NULL, &essidbuf);
if (err)
goto fail_unlock;
}
len = le16_to_cpu(essidbuf.len);
BUG_ON(len > IW_ESSID_MAX_SIZE);
memset(buf, 0, IW_ESSID_MAX_SIZE);
memcpy(buf, p, len);
err = len;
fail_unlock:
orinoco_unlock(priv, &flags);
return err;
}
int orinoco_hw_get_freq(struct orinoco_private *priv)
{
struct hermes *hw = &priv->hw;
int err = 0;
u16 channel;
int freq = 0;
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL,
&channel);
if (err)
goto out;
/* Intersil firmware 1.3.5 returns 0 when the interface is down */
if (channel == 0) {
err = -EBUSY;
goto out;
}
if ((channel < 1) || (channel > NUM_CHANNELS)) {
printk(KERN_WARNING "%s: Channel out of range (%d)!\n",
priv->ndev->name, channel);
err = -EBUSY;
goto out;
}
freq = ieee80211_dsss_chan_to_freq(channel);
out:
orinoco_unlock(priv, &flags);
if (err > 0)
err = -EBUSY;
return err ? err : freq;
}
int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
int *numrates, s32 *rates, int max)
{
struct hermes *hw = &priv->hw;
struct hermes_idstring list;
unsigned char *p = (unsigned char *)&list.val;
int err = 0;
int num;
int i;
unsigned long flags;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
sizeof(list), NULL, &list);
orinoco_unlock(priv, &flags);
if (err)
return err;
num = le16_to_cpu(list.len);
*numrates = num;
num = min(num, max);
for (i = 0; i < num; i++)
rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */
return 0;
}
int orinoco_hw_trigger_scan(struct orinoco_private *priv,
const struct cfg80211_ssid *ssid)
{
struct net_device *dev = priv->ndev;
struct hermes *hw = &priv->hw;
unsigned long flags;
int err = 0;
if (orinoco_lock(priv, &flags) != 0)
return -EBUSY;
/* Scanning with port 0 disabled would fail */
if (!netif_running(dev)) {
err = -ENETDOWN;
goto out;
}
/* In monitor mode, the scan results are always empty.
* Probe responses are passed to the driver as received
* frames and could be processed in software. */
if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
err = -EOPNOTSUPP;
goto out;
}
if (priv->has_hostscan) {
switch (priv->firmware_type) {
case FIRMWARE_TYPE_SYMBOL:
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFHOSTSCAN_SYMBOL,
HERMES_HOSTSCAN_SYMBOL_ONCE |
HERMES_HOSTSCAN_SYMBOL_BCAST);
break;
case FIRMWARE_TYPE_INTERSIL: {
__le16 req[3];
req[0] = cpu_to_le16(0x3fff); /* All channels */
req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */
req[2] = 0; /* Any ESSID */
err = HERMES_WRITE_RECORD(hw, USER_BAP,
HERMES_RID_CNFHOSTSCAN, &req);
break;
}
case FIRMWARE_TYPE_AGERE:
if (ssid->ssid_len > 0) {
struct hermes_idstring idbuf;
size_t len = ssid->ssid_len;
idbuf.len = cpu_to_le16(len);
memcpy(idbuf.val, ssid->ssid, len);
err = hw->ops->write_ltv(hw, USER_BAP,
HERMES_RID_CNFSCANSSID_AGERE,
HERMES_BYTES_TO_RECLEN(len + 2),
&idbuf);
} else
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFSCANSSID_AGERE,
0); /* Any ESSID */
if (err)
break;
if (priv->has_ext_scan) {
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFSCANCHANNELS2GHZ,
0x7FFF);
if (err)
goto out;
err = hermes_inquire(hw,
HERMES_INQ_CHANNELINFO);
} else
err = hermes_inquire(hw, HERMES_INQ_SCAN);
break;
}
} else
err = hermes_inquire(hw, HERMES_INQ_SCAN);
out:
orinoco_unlock(priv, &flags);
return err;
}
/* Disassociate from node with BSSID addr */
int orinoco_hw_disassociate(struct orinoco_private *priv,
u8 *addr, u16 reason_code)
{
struct hermes *hw = &priv->hw;
int err;
struct {
u8 addr[ETH_ALEN];
__le16 reason_code;
} __packed buf;
/* Currently only supported by WPA enabled Agere fw */
if (!priv->has_wpa)
return -EOPNOTSUPP;
memcpy(buf.addr, addr, ETH_ALEN);
buf.reason_code = cpu_to_le16(reason_code);
err = HERMES_WRITE_RECORD(hw, USER_BAP,
HERMES_RID_CNFDISASSOCIATE,
&buf);
return err;
}
int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
u8 *addr)
{
struct hermes *hw = &priv->hw;
int err;
err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
ETH_ALEN, NULL, addr);
return err;
}
| gpl-2.0 |
Kali-/lge-kernel-msm7x30 | arch/sparc/kernel/sparc_ksyms_32.c | 9185 | 1127 | /*
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/head.h>
#include <asm/dma.h>
struct poll {
int fd;
short events;
short revents;
};
/* from entry.S */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
/* from head_32.S */
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(empty_zero_page);
/* Defined using magic */
#ifndef CONFIG_SMP
EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
#else
EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
#endif
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
| gpl-2.0 |
jekkos/android_kernel_htc_msm8960 | drivers/uwb/drp-avail.c | 14817 | 8822 | /*
* Ultra Wide Band
* DRP availability management
*
* Copyright (C) 2005-2006 Intel Corporation
* Reinette Chatre <reinette.chatre@intel.com>
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* Manage DRP Availability (the MAS available for DRP
* reservations). Thus:
*
* - Handle DRP Availability Change notifications
*
* - Allow the reservation manager to indicate MAS reserved/released
* by local (owned by/targeted at the radio controller)
* reservations.
*
* - Based on the two sources above, generate a DRP Availability IE to
* be included in the beacon.
*
* See also the documentation for struct uwb_drp_avail.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitmap.h>
#include "uwb-internal.h"
/**
* uwb_drp_avail_init - initialize an RC's MAS availability
*
* All MAS are available initially. The RC will inform use which
* slots are used for the BP (it may change in size).
*/
void uwb_drp_avail_init(struct uwb_rc *rc)
{
bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS);
bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS);
bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS);
}
/*
* Determine MAS available for new local reservations.
*
* avail = global & local & pending
*/
void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
{
bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
}
/**
* uwb_drp_avail_reserve_pending - reserve MAS for a new reservation
* @rc: the radio controller
* @mas: the MAS to reserve
*
* Returns 0 on success, or -EBUSY if the MAS requested aren't available.
*/
int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
struct uwb_mas_bm avail;
uwb_drp_available(rc, &avail);
if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS))
return -EBUSY;
bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
return 0;
}
/**
* uwb_drp_avail_reserve - reserve MAS for an established reservation
* @rc: the radio controller
* @mas: the MAS to reserve
*/
void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
}
/**
* uwb_drp_avail_release - release MAS from a pending or established reservation
* @rc: the radio controller
* @mas: the MAS to release
*/
void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
uwb_rsv_handle_drp_avail_change(rc);
}
/**
* uwb_drp_avail_ie_update - update the DRP Availability IE
* @rc: the radio controller
*
* avail = global & local
*/
void uwb_drp_avail_ie_update(struct uwb_rc *rc)
{
struct uwb_mas_bm avail;
bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY;
rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8;
uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail);
rc->drp_avail.ie_valid = true;
}
/**
* Create an unsigned long from a buffer containing a byte stream.
*
* @array: pointer to buffer
* @itr: index of buffer from where we start
* @len: the buffer's remaining size may not be exact multiple of
* sizeof(unsigned long), @len is the length of buffer that needs
* to be converted. This will be sizeof(unsigned long) or smaller
* (BUG if not). If it is smaller then we will pad the remaining
* space of the result with zeroes.
*/
static
unsigned long get_val(u8 *array, size_t itr, size_t len)
{
unsigned long val = 0;
size_t top = itr + len;
BUG_ON(len > sizeof(val));
while (itr < top) {
val <<= 8;
val |= array[top - 1];
top--;
}
val <<= 8 * (sizeof(val) - len); /* padding */
return val;
}
/**
* Initialize bitmap from data buffer.
*
* The bitmap to be converted could come from a IE, for example a
* DRP Availability IE.
* From ECMA-368 1.0 [16.8.7]: "
* octets: 1 1 N * (0 to 32)
* Element ID Length (=N) DRP Availability Bitmap
*
* The DRP Availability Bitmap field is up to 256 bits long, one
* bit for each MAS in the superframe, where the least-significant
* bit of the field corresponds to the first MAS in the superframe
* and successive bits correspond to successive MASs."
*
* The DRP Availability bitmap is in octets from 0 to 32, so octet
* 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32
* octets, the bits in octets not included at the end of the bitmap are
* treated as zero. In this case (when the bitmap is smaller than 32
* octets) the MAS represented range from MAS 1 to MAS (size of bitmap)
* with the last octet still containing bits for MAS 1-8, etc.
*
* For example:
* F00F0102 03040506 0708090A 0B0C0D0E 0F010203
* ^^^^
* ||||
* ||||
* |||\LSB of byte is MAS 9
* ||\MSB of byte is MAS 16
* |\LSB of first byte is MAS 1
* \ MSB of byte is MAS 8
*
* An example of this encoding can be found in ECMA-368 Annex-D [Table D.11]
*
* The resulting bitmap will have the following mapping:
* bit position 0 == MAS 1
* bit position 1 == MAS 2
* ...
* bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS
*
* @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP)
* @buffer: pointer to buffer containing bitmap data in big endian
* format (MSB first)
* @buffer_size:number of bytes with which bitmap should be initialized
*/
static
void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer,
size_t buffer_size)
{
u8 *buffer = _buffer;
size_t itr, len;
unsigned long val;
itr = 0;
while (itr < buffer_size) {
len = buffer_size - itr >= sizeof(val) ?
sizeof(val) : buffer_size - itr;
val = get_val(buffer, itr, len);
bmp_itr[itr / sizeof(val)] = val;
itr += sizeof(val);
}
}
/**
* Extract DRP Availability bitmap from the notification.
*
* The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes
* We convert that to our internal representation.
*/
static
int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp)
{
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_drp_avail *drp_evt;
int result = -EINVAL;
/* Is there enough data to decode the event? */
if (evt->notif.size < sizeof(*drp_evt)) {
dev_err(dev, "DRP Availability Change: Not enough "
"data to decode event [%zu bytes, %zu "
"needed]\n", evt->notif.size, sizeof(*drp_evt));
goto error;
}
drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb);
buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8);
result = 0;
error:
return result;
}
/**
* Process an incoming DRP Availability notification.
*
* @evt: Event information (packs the actual event data, which
* radio controller it came to, etc).
*
* @returns: 0 on success (so uwbd() frees the event buffer), < 0
* on error.
*
* According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that
* the MAS slot is available, bits set to ZERO indicate that the slot
* is busy.
*
* So we clear available slots, we set used slots :)
*
* The notification only marks non-availability based on the BP and
* received DRP IEs that are not for this radio controller. A copy of
* this bitmap is needed to generate the real availability (which
* includes local and pending reservations).
*
* The DRP Availability IE that this radio controller emits will need
* to be updated.
*/
int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
{
int result;
struct uwb_rc *rc = evt->rc;
DECLARE_BITMAP(bmp, UWB_NUM_MAS);
result = uwbd_evt_get_drp_avail(evt, bmp);
if (result < 0)
return result;
mutex_lock(&rc->rsvs_mutex);
bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
uwb_rsv_handle_drp_avail_change(rc);
mutex_unlock(&rc->rsvs_mutex);
uwb_rsv_sched_update(rc);
return 0;
}
| gpl-2.0 |
varigit/VAR-SOM-AM33-Kernel-3-14 | drivers/cpufreq/cpufreq_stats.c | 226 | 10004 | /*
* drivers/cpufreq/cpufreq_stats.c
*
* Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
* (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/cputime.h>
static spinlock_t cpufreq_stats_lock;
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
unsigned long long last_time;
unsigned int max_state;
unsigned int state_num;
unsigned int last_index;
u64 *time_in_state;
unsigned int *freq_table;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
unsigned int *trans_table;
#endif
};
static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
struct cpufreq_stats_attribute {
struct attribute attr;
ssize_t(*show) (struct cpufreq_stats *, char *);
};
static int cpufreq_stats_update(unsigned int cpu)
{
struct cpufreq_stats *stat;
unsigned long long cur_time;
cur_time = get_jiffies_64();
spin_lock(&cpufreq_stats_lock);
stat = per_cpu(cpufreq_stats_table, cpu);
if (stat->time_in_state)
stat->time_in_state[stat->last_index] +=
cur_time - stat->last_time;
stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
}
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
if (!stat)
return 0;
return sprintf(buf, "%d\n",
per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
}
static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
{
ssize_t len = 0;
int i;
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
if (!stat)
return 0;
cpufreq_stats_update(stat->cpu);
for (i = 0; i < stat->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
(unsigned long long)
jiffies_64_to_clock_t(stat->time_in_state[i]));
}
return len;
}
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
{
ssize_t len = 0;
int i, j;
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
if (!stat)
return 0;
cpufreq_stats_update(stat->cpu);
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stat->state_num; i++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
stat->freq_table[i]);
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
for (i = 0; i < stat->state_num; i++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
stat->freq_table[i]);
for (j = 0; j < stat->state_num; j++) {
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
stat->trans_table[i*stat->max_state+j]);
}
if (len >= PAGE_SIZE)
break;
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
}
if (len >= PAGE_SIZE)
return PAGE_SIZE;
return len;
}
cpufreq_freq_attr_ro(trans_table);
#endif
cpufreq_freq_attr_ro(total_trans);
cpufreq_freq_attr_ro(time_in_state);
static struct attribute *default_attrs[] = {
&total_trans.attr,
&time_in_state.attr,
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
&trans_table.attr,
#endif
NULL
};
static struct attribute_group stats_attr_group = {
.attrs = default_attrs,
.name = "stats"
};
static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
{
int index;
for (index = 0; index < stat->max_state; index++)
if (stat->freq_table[index] == freq)
return index;
return -1;
}
static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
if (!stat)
return;
pr_debug("%s: Free stat table\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
kfree(stat->time_in_state);
kfree(stat);
per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
}
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
if (cpufreq_frequency_get_table(policy->cpu))
__cpufreq_stats_free_table(policy);
cpufreq_cpu_put(policy);
}
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table)
{
unsigned int i, j, count = 0, ret = 0;
struct cpufreq_stats *stat;
struct cpufreq_policy *current_policy;
unsigned int alloc_size;
unsigned int cpu = policy->cpu;
if (per_cpu(cpufreq_stats_table, cpu))
return -EBUSY;
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if ((stat) == NULL)
return -ENOMEM;
current_policy = cpufreq_cpu_get(cpu);
if (current_policy == NULL) {
ret = -EINVAL;
goto error_get_fail;
}
ret = sysfs_create_group(¤t_policy->kobj, &stats_attr_group);
if (ret)
goto error_out;
stat->cpu = cpu;
per_cpu(cpufreq_stats_table, cpu) = stat;
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
count++;
}
alloc_size = count * sizeof(int) + count * sizeof(u64);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
alloc_size += count * count * sizeof(int);
#endif
stat->max_state = count;
stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
if (!stat->time_in_state) {
ret = -ENOMEM;
goto error_out;
}
stat->freq_table = (unsigned int *)(stat->time_in_state + count);
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
stat->trans_table = stat->freq_table + count;
#endif
j = 0;
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
continue;
if (freq_table_get_index(stat, freq) == -1)
stat->freq_table[j++] = freq;
}
stat->state_num = j;
spin_lock(&cpufreq_stats_lock);
stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock);
cpufreq_cpu_put(current_policy);
return 0;
error_out:
cpufreq_cpu_put(current_policy);
error_get_fail:
kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL;
return ret;
}
static void cpufreq_stats_create_table(unsigned int cpu)
{
struct cpufreq_policy *policy;
struct cpufreq_frequency_table *table;
/*
* "likely(!policy)" because normally cpufreq_stats will be registered
* before cpufreq driver
*/
policy = cpufreq_cpu_get(cpu);
if (likely(!policy))
return;
table = cpufreq_frequency_get_table(policy->cpu);
if (likely(table))
__cpufreq_stats_create_table(policy, table);
cpufreq_cpu_put(policy);
}
static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
policy->last_cpu);
pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
policy->cpu, policy->last_cpu);
per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
policy->last_cpu);
per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
stat->cpu = policy->cpu;
}
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
int ret = 0;
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
if (val == CPUFREQ_UPDATE_POLICY_CPU) {
cpufreq_stats_update_policy_cpu(policy);
return 0;
}
table = cpufreq_frequency_get_table(cpu);
if (!table)
return 0;
if (val == CPUFREQ_CREATE_POLICY)
ret = __cpufreq_stats_create_table(policy, table);
else if (val == CPUFREQ_REMOVE_POLICY)
__cpufreq_stats_free_table(policy);
return ret;
}
static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
struct cpufreq_stats *stat;
int old_index, new_index;
if (val != CPUFREQ_POSTCHANGE)
return 0;
stat = per_cpu(cpufreq_stats_table, freq->cpu);
if (!stat)
return 0;
old_index = stat->last_index;
new_index = freq_table_get_index(stat, freq->new);
/* We can't do stat->time_in_state[-1]= .. */
if (old_index == -1 || new_index == -1)
return 0;
cpufreq_stats_update(freq->cpu);
if (old_index == new_index)
return 0;
spin_lock(&cpufreq_stats_lock);
stat->last_index = new_index;
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
stat->trans_table[old_index * stat->max_state + new_index]++;
#endif
stat->total_trans++;
spin_unlock(&cpufreq_stats_lock);
return 0;
}
static struct notifier_block notifier_policy_block = {
.notifier_call = cpufreq_stat_notifier_policy
};
static struct notifier_block notifier_trans_block = {
.notifier_call = cpufreq_stat_notifier_trans
};
static int __init cpufreq_stats_init(void)
{
int ret;
unsigned int cpu;
spin_lock_init(&cpufreq_stats_lock);
ret = cpufreq_register_notifier(¬ifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
if (ret)
return ret;
for_each_online_cpu(cpu)
cpufreq_stats_create_table(cpu);
ret = cpufreq_register_notifier(¬ifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
cpufreq_unregister_notifier(¬ifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
return ret;
}
return 0;
}
static void __exit cpufreq_stats_exit(void)
{
unsigned int cpu;
cpufreq_unregister_notifier(¬ifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
cpufreq_unregister_notifier(¬ifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
for_each_online_cpu(cpu)
cpufreq_stats_free_table(cpu);
}
MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
"through sysfs filesystem");
MODULE_LICENSE("GPL");
module_init(cpufreq_stats_init);
module_exit(cpufreq_stats_exit);
| gpl-2.0 |
turl/zeppelin_kernel | arch/sh/mm/fault_64.c | 226 | 8021 | /*
* The SH64 TLB miss.
*
* Original code from fault.c
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* Fast PTE->TLB refill path
* Copyright (C) 2003 Richard.Curnow@superh.com
*
* IMPORTANT NOTES :
* The do_fast_page_fault function is called from a context in entry.S
* where very few registers have been saved. In particular, the code in
* this file must be compiled not to use ANY caller-save registers that
* are not part of the restricted save set. Also, it means that code in
* this file must not make calls to functions elsewhere in the kernel, or
* else the excepting context will see corruption in its caller-save
* registers. Plus, the entry.S save area is non-reentrant, so this code
* has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
* on any exception.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/tlb.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <cpu/registers.h>
/* Callable from fault.c, so not static */
inline void __do_tlb_refill(unsigned long address,
unsigned long long is_text_not_data, pte_t *pte)
{
unsigned long long ptel;
unsigned long long pteh=0;
struct tlb_info *tlbp;
unsigned long long next;
/* Get PTEL first */
ptel = pte_val(*pte);
/*
* Set PTEH register
*/
pteh = address & MMU_VPN_MASK;
/* Sign extend based on neff. */
#if (NEFF == 32)
/* Faster sign extension */
pteh = (unsigned long long)(signed long long)(signed long)pteh;
#else
/* General case */
pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh;
#endif
/* Set the ASID. */
pteh |= get_asid() << PTEH_ASID_SHIFT;
pteh |= PTEH_VALID;
/* Set PTEL register, set_pte has performed the sign extension */
ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
next = tlbp->next;
__flush_tlb_slot(next);
asm volatile ("putcfg %0,1,%2\n\n\t"
"putcfg %0,0,%1\n"
: : "r" (next), "r" (pteh), "r" (ptel) );
next += TLB_STEP;
if (next > tlbp->last) next = tlbp->first;
tlbp->next = next;
}
static int handle_vmalloc_fault(struct mm_struct *mm,
unsigned long protection_flags,
unsigned long long textaccess,
unsigned long address)
{
pgd_t *dir;
pud_t *pud;
pmd_t *pmd;
static pte_t *pte;
pte_t entry;
dir = pgd_offset_k(address);
pud = pud_offset(dir, address);
if (pud_none_or_clear_bad(pud))
return 0;
pmd = pmd_offset(pud, address);
if (pmd_none_or_clear_bad(pmd))
return 0;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
return 0;
if ((pte_val(entry) & protection_flags) != protection_flags)
return 0;
__do_tlb_refill(address, textaccess, pte);
return 1;
}
static int handle_tlbmiss(struct mm_struct *mm,
unsigned long long protection_flags,
unsigned long long textaccess,
unsigned long address)
{
pgd_t *dir;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
/* NB. The PGD currently only contains a single entry - there is no
page table tree stored for the top half of the address space since
virtual pages in that region should never be mapped in user mode.
(In kernel mode, the only things in that region are the 512Mb super
page (locked in), and vmalloc (modules) + I/O device pages (handled
by handle_vmalloc_fault), so no PGD for the upper half is required
by kernel mode either).
See how mm->pgd is allocated and initialised in pgd_alloc to see why
the next test is necessary. - RPC */
if (address >= (unsigned long) TASK_SIZE)
/* upper half - never has page table entries. */
return 0;
dir = pgd_offset(mm, address);
if (pgd_none(*dir) || !pgd_present(*dir))
return 0;
if (!pgd_present(*dir))
return 0;
pud = pud_offset(dir, address);
if (pud_none(*pud) || !pud_present(*pud))
return 0;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd) || !pmd_present(*pmd))
return 0;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
return 0;
/*
* If the page doesn't have sufficient protection bits set to
* service the kind of fault being handled, there's not much
* point doing the TLB refill. Punt the fault to the general
* handler.
*/
if ((pte_val(entry) & protection_flags) != protection_flags)
return 0;
__do_tlb_refill(address, textaccess, pte);
return 1;
}
/*
* Put all this information into one structure so that everything is just
* arithmetic relative to a single base address. This reduces the number
* of movi/shori pairs needed just to load addresses of static data.
*/
struct expevt_lookup {
unsigned short protection_flags[8];
unsigned char is_text_access[8];
unsigned char is_write_access[8];
};
#define PRU (1<<9)
#define PRW (1<<8)
#define PRX (1<<7)
#define PRR (1<<6)
#define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
#define YOUNG (_PAGE_ACCESSED)
/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
the fault happened in user mode or privileged mode. */
static struct expevt_lookup expevt_lookup_table = {
.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
.is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
};
/*
This routine handles page faults that can be serviced just by refilling a
TLB entry from an existing page table entry. (This case represents a very
large majority of page faults.) Return 1 if the fault was successfully
handled. Return 0 if the fault could not be handled. (This leads into the
general fault handling in fault.c which deals with mapping file-backed
pages, stack growth, segmentation faults, swapping etc etc)
*/
asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
unsigned long long expevt,
unsigned long address)
{
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long long textaccess;
unsigned long long protection_flags;
unsigned long long index;
unsigned long long expevt4;
/* The next few lines implement a way of hashing EXPEVT into a
* small array index which can be used to lookup parameters
* specific to the type of TLBMISS being handled.
*
* Note:
* ITLBMISS has EXPEVT==0xa40
* RTLBMISS has EXPEVT==0x040
* WTLBMISS has EXPEVT==0x060
*/
expevt4 = (expevt >> 4);
/* TODO : xor ssr_md into this expression too. Then we can check
* that PRU is set when it needs to be. */
index = expevt4 ^ (expevt4 >> 5);
index &= 7;
protection_flags = expevt_lookup_table.protection_flags[index];
textaccess = expevt_lookup_table.is_text_access[index];
/* SIM
* Note this is now called with interrupts still disabled
* This is to cope with being called for a missing IO port
* address with interrupts disabled. This should be fixed as
* soon as we have a better 'fast path' miss handler.
*
* Plus take care how you try and debug this stuff.
* For example, writing debug data to a port which you
* have just faulted on is not going to work.
*/
tsk = current;
mm = tsk->mm;
if ((address >= VMALLOC_START && address < VMALLOC_END) ||
(address >= IOBASE_VADDR && address < IOBASE_END)) {
if (ssr_md)
/*
* Process-contexts can never have this address
* range mapped
*/
if (handle_vmalloc_fault(mm, protection_flags,
textaccess, address))
return 1;
} else if (!in_interrupt() && mm) {
if (handle_tlbmiss(mm, protection_flags, textaccess, address))
return 1;
}
return 0;
}
| gpl-2.0 |
GalaxyTab4/starlightknight_kernel_samsung_matissewifi | drivers/input/mouse/psmouse-base.c | 482 | 45762 | /*
* PS/2 mouse driver
*
* Copyright (c) 1999-2002 Vojtech Pavlik
* Copyright (c) 2003-2004 Dmitry Torokhov
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define psmouse_fmt(fmt) fmt
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/init.h>
#include <linux/libps2.h>
#include <linux/mutex.h>
#include "psmouse.h"
#include "synaptics.h"
#include "logips2pp.h"
#include "alps.h"
#include "hgpk.h"
#include "lifebook.h"
#include "trackpoint.h"
#include "touchkit_ps2.h"
#include "elantech.h"
#include "sentelic.h"
#define DRIVER_DESC "PS/2 mouse driver"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
static unsigned int psmouse_max_proto = PSMOUSE_AUTO;
static int psmouse_set_maxproto(const char *val, const struct kernel_param *);
static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp);
static struct kernel_param_ops param_ops_proto_abbrev = {
.set = psmouse_set_maxproto,
.get = psmouse_get_maxproto,
};
#define param_check_proto_abbrev(name, p) __param_check(name, p, unsigned int)
module_param_named(proto, psmouse_max_proto, proto_abbrev, 0644);
MODULE_PARM_DESC(proto, "Highest protocol extension to probe (bare, imps, exps, any). Useful for KVM switches.");
static unsigned int psmouse_resolution = 200;
module_param_named(resolution, psmouse_resolution, uint, 0644);
MODULE_PARM_DESC(resolution, "Resolution, in dpi.");
static unsigned int psmouse_rate = 100;
module_param_named(rate, psmouse_rate, uint, 0644);
MODULE_PARM_DESC(rate, "Report rate, in reports per second.");
static bool psmouse_smartscroll = 1;
module_param_named(smartscroll, psmouse_smartscroll, bool, 0644);
MODULE_PARM_DESC(smartscroll, "Logitech Smartscroll autorepeat, 1 = enabled (default), 0 = disabled.");
static unsigned int psmouse_resetafter = 5;
module_param_named(resetafter, psmouse_resetafter, uint, 0644);
MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never).");
static unsigned int psmouse_resync_time;
module_param_named(resync_time, psmouse_resync_time, uint, 0644);
MODULE_PARM_DESC(resync_time, "How long can mouse stay idle before forcing resync (in seconds, 0 = never).");
PSMOUSE_DEFINE_ATTR(protocol, S_IWUSR | S_IRUGO,
NULL,
psmouse_attr_show_protocol, psmouse_attr_set_protocol);
PSMOUSE_DEFINE_ATTR(rate, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, rate),
psmouse_show_int_attr, psmouse_attr_set_rate);
PSMOUSE_DEFINE_ATTR(resolution, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, resolution),
psmouse_show_int_attr, psmouse_attr_set_resolution);
PSMOUSE_DEFINE_ATTR(resetafter, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, resetafter),
psmouse_show_int_attr, psmouse_set_int_attr);
PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO,
(void *) offsetof(struct psmouse, resync_time),
psmouse_show_int_attr, psmouse_set_int_attr);
static struct attribute *psmouse_attributes[] = {
&psmouse_attr_protocol.dattr.attr,
&psmouse_attr_rate.dattr.attr,
&psmouse_attr_resolution.dattr.attr,
&psmouse_attr_resetafter.dattr.attr,
&psmouse_attr_resync_time.dattr.attr,
NULL
};
static struct attribute_group psmouse_attribute_group = {
.attrs = psmouse_attributes,
};
/*
* psmouse_mutex protects all operations changing state of mouse
* (connecting, disconnecting, changing rate or resolution via
* sysfs). We could use a per-device semaphore but since there
* rarely more than one PS/2 mouse connected and since semaphore
* is taken in "slow" paths it is not worth it.
*/
static DEFINE_MUTEX(psmouse_mutex);
static struct workqueue_struct *kpsmoused_wq;
struct psmouse_protocol {
enum psmouse_type type;
bool maxproto;
bool ignore_parity; /* Protocol should ignore parity errors from KBC */
const char *name;
const char *alias;
int (*detect)(struct psmouse *, bool);
int (*init)(struct psmouse *);
};
/*
* psmouse_process_byte() analyzes the PS/2 data stream and reports
* relevant events to the input module once full packet has arrived.
*/
psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
unsigned char *packet = psmouse->packet;
if (psmouse->pktcnt < psmouse->pktsize)
return PSMOUSE_GOOD_DATA;
/*
* Full packet accumulated, process it
*/
/*
* Scroll wheel on IntelliMice, scroll buttons on NetMice
*/
if (psmouse->type == PSMOUSE_IMPS || psmouse->type == PSMOUSE_GENPS)
input_report_rel(dev, REL_WHEEL, -(signed char) packet[3]);
/*
* Scroll wheel and buttons on IntelliMouse Explorer
*/
if (psmouse->type == PSMOUSE_IMEX) {
switch (packet[3] & 0xC0) {
case 0x80: /* vertical scroll on IntelliMouse Explorer 4.0 */
input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
break;
case 0x40: /* horizontal scroll on IntelliMouse Explorer 4.0 */
input_report_rel(dev, REL_HWHEEL, (int) (packet[3] & 32) - (int) (packet[3] & 31));
break;
case 0x00:
case 0xC0:
input_report_rel(dev, REL_WHEEL, (int) (packet[3] & 8) - (int) (packet[3] & 7));
input_report_key(dev, BTN_SIDE, (packet[3] >> 4) & 1);
input_report_key(dev, BTN_EXTRA, (packet[3] >> 5) & 1);
break;
}
}
/*
* Extra buttons on Genius NewNet 3D
*/
if (psmouse->type == PSMOUSE_GENPS) {
input_report_key(dev, BTN_SIDE, (packet[0] >> 6) & 1);
input_report_key(dev, BTN_EXTRA, (packet[0] >> 7) & 1);
}
/*
* Extra button on ThinkingMouse
*/
if (psmouse->type == PSMOUSE_THINKPS) {
input_report_key(dev, BTN_EXTRA, (packet[0] >> 3) & 1);
/* Without this bit of weirdness moving up gives wildly high Y changes. */
packet[1] |= (packet[0] & 0x40) << 1;
}
/*
* Cortron PS2 Trackball reports SIDE button on the 4th bit of the first
* byte.
*/
if (psmouse->type == PSMOUSE_CORTRON) {
input_report_key(dev, BTN_SIDE, (packet[0] >> 3) & 1);
packet[0] |= 0x08;
}
/*
* Generic PS/2 Mouse
*/
input_report_key(dev, BTN_LEFT, packet[0] & 1);
input_report_key(dev, BTN_MIDDLE, (packet[0] >> 2) & 1);
input_report_key(dev, BTN_RIGHT, (packet[0] >> 1) & 1);
input_report_rel(dev, REL_X, packet[1] ? (int) packet[1] - (int) ((packet[0] << 4) & 0x100) : 0);
input_report_rel(dev, REL_Y, packet[2] ? (int) ((packet[0] << 3) & 0x100) - (int) packet[2] : 0);
input_sync(dev);
return PSMOUSE_FULL_PACKET;
}
void psmouse_queue_work(struct psmouse *psmouse, struct delayed_work *work,
unsigned long delay)
{
queue_delayed_work(kpsmoused_wq, work, delay);
}
/*
* __psmouse_set_state() sets new psmouse state and resets all flags.
*/
static inline void __psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
{
psmouse->state = new_state;
psmouse->pktcnt = psmouse->out_of_sync_cnt = 0;
psmouse->ps2dev.flags = 0;
psmouse->last = jiffies;
}
/*
* psmouse_set_state() sets new psmouse state and resets all flags and
* counters while holding serio lock so fighting with interrupt handler
* is not a concern.
*/
void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
{
serio_pause_rx(psmouse->ps2dev.serio);
__psmouse_set_state(psmouse, new_state);
serio_continue_rx(psmouse->ps2dev.serio);
}
/*
* psmouse_handle_byte() processes one byte of the input data stream
* by calling corresponding protocol handler.
*/
static int psmouse_handle_byte(struct psmouse *psmouse)
{
psmouse_ret_t rc = psmouse->protocol_handler(psmouse);
switch (rc) {
case PSMOUSE_BAD_DATA:
if (psmouse->state == PSMOUSE_ACTIVATED) {
psmouse_warn(psmouse,
"%s at %s lost sync at byte %d\n",
psmouse->name, psmouse->phys,
psmouse->pktcnt);
if (++psmouse->out_of_sync_cnt == psmouse->resetafter) {
__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
psmouse_notice(psmouse,
"issuing reconnect request\n");
serio_reconnect(psmouse->ps2dev.serio);
return -1;
}
}
psmouse->pktcnt = 0;
break;
case PSMOUSE_FULL_PACKET:
psmouse->pktcnt = 0;
if (psmouse->out_of_sync_cnt) {
psmouse->out_of_sync_cnt = 0;
psmouse_notice(psmouse,
"%s at %s - driver resynced.\n",
psmouse->name, psmouse->phys);
}
break;
case PSMOUSE_GOOD_DATA:
break;
}
return 0;
}
/*
* psmouse_interrupt() handles incoming characters, either passing them
* for normal processing or gathering them as command response.
*/
static irqreturn_t psmouse_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct psmouse *psmouse = serio_get_drvdata(serio);
if (psmouse->state == PSMOUSE_IGNORE)
goto out;
if (unlikely((flags & SERIO_TIMEOUT) ||
((flags & SERIO_PARITY) && !psmouse->ignore_parity))) {
if (psmouse->state == PSMOUSE_ACTIVATED)
psmouse_warn(psmouse,
"bad data from KBC -%s%s\n",
flags & SERIO_TIMEOUT ? " timeout" : "",
flags & SERIO_PARITY ? " bad parity" : "");
ps2_cmd_aborted(&psmouse->ps2dev);
goto out;
}
if (unlikely(psmouse->ps2dev.flags & PS2_FLAG_ACK))
if (ps2_handle_ack(&psmouse->ps2dev, data))
goto out;
if (unlikely(psmouse->ps2dev.flags & PS2_FLAG_CMD))
if (ps2_handle_response(&psmouse->ps2dev, data))
goto out;
if (psmouse->state <= PSMOUSE_RESYNCING)
goto out;
if (psmouse->state == PSMOUSE_ACTIVATED &&
psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) {
psmouse_info(psmouse, "%s at %s lost synchronization, throwing %d bytes away.\n",
psmouse->name, psmouse->phys, psmouse->pktcnt);
psmouse->badbyte = psmouse->packet[0];
__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
psmouse_queue_work(psmouse, &psmouse->resync_work, 0);
goto out;
}
psmouse->packet[psmouse->pktcnt++] = data;
/*
* Check if this is a new device announcement (0xAA 0x00)
*/
if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) {
if (psmouse->pktcnt == 1) {
psmouse->last = jiffies;
goto out;
}
if (psmouse->packet[1] == PSMOUSE_RET_ID ||
(psmouse->type == PSMOUSE_HGPK &&
psmouse->packet[1] == PSMOUSE_RET_BAT)) {
__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
serio_reconnect(serio);
goto out;
}
/*
* Not a new device, try processing first byte normally
*/
psmouse->pktcnt = 1;
if (psmouse_handle_byte(psmouse))
goto out;
psmouse->packet[psmouse->pktcnt++] = data;
}
/*
* See if we need to force resync because mouse was idle for too long
*/
if (psmouse->state == PSMOUSE_ACTIVATED &&
psmouse->pktcnt == 1 && psmouse->resync_time &&
time_after(jiffies, psmouse->last + psmouse->resync_time * HZ)) {
psmouse->badbyte = psmouse->packet[0];
__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
psmouse_queue_work(psmouse, &psmouse->resync_work, 0);
goto out;
}
psmouse->last = jiffies;
psmouse_handle_byte(psmouse);
out:
return IRQ_HANDLED;
}
/*
* psmouse_sliced_command() sends an extended PS/2 command to the mouse
* using sliced syntax, understood by advanced devices, such as Logitech
* or Synaptics touchpads. The command is encoded as:
* 0xE6 0xE8 rr 0xE8 ss 0xE8 tt 0xE8 uu where (rr*64)+(ss*16)+(tt*4)+uu
* is the command.
*/
int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command)
{
int i;
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11))
return -1;
for (i = 6; i >= 0; i -= 2) {
unsigned char d = (command >> i) & 3;
if (ps2_command(&psmouse->ps2dev, &d, PSMOUSE_CMD_SETRES))
return -1;
}
return 0;
}
/*
* psmouse_reset() resets the mouse into power-on state.
*/
int psmouse_reset(struct psmouse *psmouse)
{
unsigned char param[2];
if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_RESET_BAT))
return -1;
if (param[0] != PSMOUSE_RET_BAT && param[1] != PSMOUSE_RET_ID)
return -1;
return 0;
}
/*
* Here we set the mouse resolution.
*/
void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
{
static const unsigned char params[] = { 0, 1, 2, 2, 3 };
unsigned char p;
if (resolution == 0 || resolution > 200)
resolution = 200;
p = params[resolution / 50];
ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
psmouse->resolution = 25 << p;
}
/*
* Here we set the mouse report rate.
*/
static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
{
static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
unsigned char r;
int i = 0;
while (rates[i] > rate) i++;
r = rates[i];
ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
psmouse->rate = r;
}
/*
* psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
*/
static int psmouse_poll(struct psmouse *psmouse)
{
return ps2_command(&psmouse->ps2dev, psmouse->packet,
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
}
/*
* Genius NetMouse magic init.
*/
static int genius_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[4];
param[0] = 3;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO);
if (param[0] != 0x00 || param[1] != 0x33 || param[2] != 0x55)
return -1;
if (set_properties) {
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(BTN_EXTRA, psmouse->dev->keybit);
__set_bit(BTN_SIDE, psmouse->dev->keybit);
__set_bit(REL_WHEEL, psmouse->dev->relbit);
psmouse->vendor = "Genius";
psmouse->name = "Mouse";
psmouse->pktsize = 4;
}
return 0;
}
/*
* IntelliMouse magic init.
*/
static int intellimouse_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[2];
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 100;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 80;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
ps2_command(ps2dev, param, PSMOUSE_CMD_GETID);
if (param[0] != 3)
return -1;
if (set_properties) {
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(REL_WHEEL, psmouse->dev->relbit);
if (!psmouse->vendor)
psmouse->vendor = "Generic";
if (!psmouse->name)
psmouse->name = "Wheel Mouse";
psmouse->pktsize = 4;
}
return 0;
}
/*
* Try IntelliMouse/Explorer magic init.
*/
static int im_explorer_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[2];
intellimouse_detect(psmouse, 0);
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 80;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
ps2_command(ps2dev, param, PSMOUSE_CMD_GETID);
if (param[0] != 4)
return -1;
/* Magic to enable horizontal scrolling on IntelliMouse 4.0 */
param[0] = 200;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 80;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 40;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
if (set_properties) {
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(REL_WHEEL, psmouse->dev->relbit);
__set_bit(REL_HWHEEL, psmouse->dev->relbit);
__set_bit(BTN_SIDE, psmouse->dev->keybit);
__set_bit(BTN_EXTRA, psmouse->dev->keybit);
if (!psmouse->vendor)
psmouse->vendor = "Generic";
if (!psmouse->name)
psmouse->name = "Explorer Mouse";
psmouse->pktsize = 4;
}
return 0;
}
/*
* Kensington ThinkingMouse / ExpertMouse magic init.
*/
static int thinking_detect(struct psmouse *psmouse, bool set_properties)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[2];
static const unsigned char seq[] = { 20, 60, 40, 20, 20, 60, 40, 20, 20 };
int i;
param[0] = 10;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
param[0] = 0;
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES);
for (i = 0; i < ARRAY_SIZE(seq); i++) {
param[0] = seq[i];
ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE);
}
ps2_command(ps2dev, param, PSMOUSE_CMD_GETID);
if (param[0] != 2)
return -1;
if (set_properties) {
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(BTN_EXTRA, psmouse->dev->keybit);
psmouse->vendor = "Kensington";
psmouse->name = "ThinkingMouse";
}
return 0;
}
/*
* Bare PS/2 protocol "detection". Always succeeds.
*/
static int ps2bare_detect(struct psmouse *psmouse, bool set_properties)
{
if (set_properties) {
if (!psmouse->vendor)
psmouse->vendor = "Generic";
if (!psmouse->name)
psmouse->name = "Mouse";
/*
* We have no way of figuring true number of buttons so let's
* assume that the device has 3.
*/
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
}
return 0;
}
/*
* Cortron PS/2 protocol detection. There's no special way to detect it, so it
* must be forced by sysfs protocol writing.
*/
static int cortron_detect(struct psmouse *psmouse, bool set_properties)
{
if (set_properties) {
psmouse->vendor = "Cortron";
psmouse->name = "PS/2 Trackball";
__set_bit(BTN_MIDDLE, psmouse->dev->keybit);
__set_bit(BTN_SIDE, psmouse->dev->keybit);
}
return 0;
}
/*
* Apply default settings to the psmouse structure. Most of them will
* be overridden by individual protocol initialization routines.
*/
static void psmouse_apply_defaults(struct psmouse *psmouse)
{
struct input_dev *input_dev = psmouse->dev;
memset(input_dev->evbit, 0, sizeof(input_dev->evbit));
memset(input_dev->keybit, 0, sizeof(input_dev->keybit));
memset(input_dev->relbit, 0, sizeof(input_dev->relbit));
memset(input_dev->absbit, 0, sizeof(input_dev->absbit));
memset(input_dev->mscbit, 0, sizeof(input_dev->mscbit));
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_REL, input_dev->evbit);
__set_bit(BTN_LEFT, input_dev->keybit);
__set_bit(BTN_RIGHT, input_dev->keybit);
__set_bit(REL_X, input_dev->relbit);
__set_bit(REL_Y, input_dev->relbit);
psmouse->set_rate = psmouse_set_rate;
psmouse->set_resolution = psmouse_set_resolution;
psmouse->poll = psmouse_poll;
psmouse->protocol_handler = psmouse_process_byte;
psmouse->pktsize = 3;
psmouse->reconnect = NULL;
psmouse->disconnect = NULL;
psmouse->cleanup = NULL;
psmouse->pt_activate = NULL;
psmouse->pt_deactivate = NULL;
}
/*
* Apply default settings to the psmouse structure and call specified
* protocol detection or initialization routine.
*/
static int psmouse_do_detect(int (*detect)(struct psmouse *psmouse,
bool set_properties),
struct psmouse *psmouse, bool set_properties)
{
if (set_properties)
psmouse_apply_defaults(psmouse);
return detect(psmouse, set_properties);
}
/*
* psmouse_extensions() probes for any extensions to the basic PS/2 protocol
* the mouse may have.
*/
static int psmouse_extensions(struct psmouse *psmouse,
unsigned int max_proto, bool set_properties)
{
bool synaptics_hardware = false;
/*
* We always check for lifebook because it does not disturb mouse
* (it only checks DMI information).
*/
if (psmouse_do_detect(lifebook_detect, psmouse, set_properties) == 0) {
if (max_proto > PSMOUSE_IMEX) {
if (!set_properties || lifebook_init(psmouse) == 0)
return PSMOUSE_LIFEBOOK;
}
}
/*
* Try Kensington ThinkingMouse (we try first, because synaptics probe
* upsets the thinkingmouse).
*/
if (max_proto > PSMOUSE_IMEX &&
psmouse_do_detect(thinking_detect, psmouse, set_properties) == 0) {
return PSMOUSE_THINKPS;
}
/*
* Try Synaptics TouchPad. Note that probing is done even if Synaptics protocol
* support is disabled in config - we need to know if it is synaptics so we
* can reset it properly after probing for intellimouse.
*/
if (max_proto > PSMOUSE_PS2 &&
psmouse_do_detect(synaptics_detect, psmouse, set_properties) == 0) {
synaptics_hardware = true;
if (max_proto > PSMOUSE_IMEX) {
/*
* Try activating protocol, but check if support is enabled first, since
* we try detecting Synaptics even when protocol is disabled.
*/
if (synaptics_supported() &&
(!set_properties || synaptics_init(psmouse) == 0)) {
return PSMOUSE_SYNAPTICS;
}
/*
* Some Synaptics touchpads can emulate extended protocols (like IMPS/2).
* Unfortunately Logitech/Genius probes confuse some firmware versions so
* we'll have to skip them.
*/
max_proto = PSMOUSE_IMEX;
}
/*
* Make sure that touchpad is in relative mode, gestures (taps) are enabled
*/
synaptics_reset(psmouse);
}
/*
* Try ALPS TouchPad
*/
if (max_proto > PSMOUSE_IMEX) {
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
if (psmouse_do_detect(alps_detect,
psmouse, set_properties) == 0) {
if (!set_properties || alps_init(psmouse) == 0)
return PSMOUSE_ALPS;
/*
* Init failed, try basic relative protocols
*/
max_proto = PSMOUSE_IMEX;
}
}
/*
* Try OLPC HGPK touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
psmouse_do_detect(hgpk_detect, psmouse, set_properties) == 0) {
if (!set_properties || hgpk_init(psmouse) == 0)
return PSMOUSE_HGPK;
/*
* Init failed, try basic relative protocols
*/
max_proto = PSMOUSE_IMEX;
}
/*
* Try Elantech touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
psmouse_do_detect(elantech_detect, psmouse, set_properties) == 0) {
if (!set_properties || elantech_init(psmouse) == 0)
return PSMOUSE_ELANTECH;
/*
* Init failed, try basic relative protocols
*/
max_proto = PSMOUSE_IMEX;
}
if (max_proto > PSMOUSE_IMEX) {
if (psmouse_do_detect(genius_detect,
psmouse, set_properties) == 0)
return PSMOUSE_GENPS;
if (psmouse_do_detect(ps2pp_init,
psmouse, set_properties) == 0)
return PSMOUSE_PS2PP;
if (psmouse_do_detect(trackpoint_detect,
psmouse, set_properties) == 0)
return PSMOUSE_TRACKPOINT;
if (psmouse_do_detect(touchkit_ps2_detect,
psmouse, set_properties) == 0)
return PSMOUSE_TOUCHKIT_PS2;
}
/*
* Try Finger Sensing Pad. We do it here because its probe upsets
* Trackpoint devices (causing TP_READ_ID command to time out).
*/
if (max_proto > PSMOUSE_IMEX) {
if (psmouse_do_detect(fsp_detect,
psmouse, set_properties) == 0) {
if (!set_properties || fsp_init(psmouse) == 0)
return PSMOUSE_FSP;
/*
* Init failed, try basic relative protocols
*/
max_proto = PSMOUSE_IMEX;
}
}
/*
* Reset to defaults in case the device got confused by extended
* protocol probes. Note that we follow up with full reset because
* some mice put themselves to sleep when they see PSMOUSE_RESET_DIS.
*/
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
psmouse_reset(psmouse);
if (max_proto >= PSMOUSE_IMEX &&
psmouse_do_detect(im_explorer_detect,
psmouse, set_properties) == 0) {
return PSMOUSE_IMEX;
}
if (max_proto >= PSMOUSE_IMPS &&
psmouse_do_detect(intellimouse_detect,
psmouse, set_properties) == 0) {
return PSMOUSE_IMPS;
}
/*
* Okay, all failed, we have a standard mouse here. The number of the buttons
* is still a question, though. We assume 3.
*/
psmouse_do_detect(ps2bare_detect, psmouse, set_properties);
if (synaptics_hardware) {
/*
* We detected Synaptics hardware but it did not respond to IMPS/2 probes.
* We need to reset the touchpad because if there is a track point on the
* pass through port it could get disabled while probing for protocol
* extensions.
*/
psmouse_reset(psmouse);
}
return PSMOUSE_PS2;
}
static const struct psmouse_protocol psmouse_protocols[] = {
{
.type = PSMOUSE_PS2,
.name = "PS/2",
.alias = "bare",
.maxproto = true,
.ignore_parity = true,
.detect = ps2bare_detect,
},
#ifdef CONFIG_MOUSE_PS2_LOGIPS2PP
{
.type = PSMOUSE_PS2PP,
.name = "PS2++",
.alias = "logitech",
.detect = ps2pp_init,
},
#endif
{
.type = PSMOUSE_THINKPS,
.name = "ThinkPS/2",
.alias = "thinkps",
.detect = thinking_detect,
},
{
.type = PSMOUSE_GENPS,
.name = "GenPS/2",
.alias = "genius",
.detect = genius_detect,
},
{
.type = PSMOUSE_IMPS,
.name = "ImPS/2",
.alias = "imps",
.maxproto = true,
.ignore_parity = true,
.detect = intellimouse_detect,
},
{
.type = PSMOUSE_IMEX,
.name = "ImExPS/2",
.alias = "exps",
.maxproto = true,
.ignore_parity = true,
.detect = im_explorer_detect,
},
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
{
.type = PSMOUSE_SYNAPTICS,
.name = "SynPS/2",
.alias = "synaptics",
.detect = synaptics_detect,
.init = synaptics_init,
},
{
.type = PSMOUSE_SYNAPTICS_RELATIVE,
.name = "SynRelPS/2",
.alias = "synaptics-relative",
.detect = synaptics_detect,
.init = synaptics_init_relative,
},
#endif
#ifdef CONFIG_MOUSE_PS2_ALPS
{
.type = PSMOUSE_ALPS,
.name = "AlpsPS/2",
.alias = "alps",
.detect = alps_detect,
.init = alps_init,
},
#endif
#ifdef CONFIG_MOUSE_PS2_LIFEBOOK
{
.type = PSMOUSE_LIFEBOOK,
.name = "LBPS/2",
.alias = "lifebook",
.init = lifebook_init,
},
#endif
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
{
.type = PSMOUSE_TRACKPOINT,
.name = "TPPS/2",
.alias = "trackpoint",
.detect = trackpoint_detect,
},
#endif
#ifdef CONFIG_MOUSE_PS2_TOUCHKIT
{
.type = PSMOUSE_TOUCHKIT_PS2,
.name = "touchkitPS/2",
.alias = "touchkit",
.detect = touchkit_ps2_detect,
},
#endif
#ifdef CONFIG_MOUSE_PS2_OLPC
{
.type = PSMOUSE_HGPK,
.name = "OLPC HGPK",
.alias = "hgpk",
.detect = hgpk_detect,
},
#endif
#ifdef CONFIG_MOUSE_PS2_ELANTECH
{
.type = PSMOUSE_ELANTECH,
.name = "ETPS/2",
.alias = "elantech",
.detect = elantech_detect,
.init = elantech_init,
},
#endif
#ifdef CONFIG_MOUSE_PS2_SENTELIC
{
.type = PSMOUSE_FSP,
.name = "FSPPS/2",
.alias = "fsp",
.detect = fsp_detect,
.init = fsp_init,
},
#endif
{
.type = PSMOUSE_CORTRON,
.name = "CortronPS/2",
.alias = "cortps",
.detect = cortron_detect,
},
{
.type = PSMOUSE_AUTO,
.name = "auto",
.alias = "any",
.maxproto = true,
},
};
static const struct psmouse_protocol *psmouse_protocol_by_type(enum psmouse_type type)
{
int i;
for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++)
if (psmouse_protocols[i].type == type)
return &psmouse_protocols[i];
WARN_ON(1);
return &psmouse_protocols[0];
}
static const struct psmouse_protocol *psmouse_protocol_by_name(const char *name, size_t len)
{
const struct psmouse_protocol *p;
int i;
for (i = 0; i < ARRAY_SIZE(psmouse_protocols); i++) {
p = &psmouse_protocols[i];
if ((strlen(p->name) == len && !strncmp(p->name, name, len)) ||
(strlen(p->alias) == len && !strncmp(p->alias, name, len)))
return &psmouse_protocols[i];
}
return NULL;
}
/*
* psmouse_probe() probes for a PS/2 mouse.
*/
static int psmouse_probe(struct psmouse *psmouse)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
unsigned char param[2];
/*
* First, we check if it's a mouse. It should send 0x00 or 0x03
* in case of an IntelliMouse in 4-byte mode or 0x04 for IM Explorer.
* Sunrex K8561 IR Keyboard/Mouse reports 0xff on second and subsequent
* ID queries, probably due to a firmware bug.
*/
param[0] = 0xa5;
if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETID))
return -1;
if (param[0] != 0x00 && param[0] != 0x03 &&
param[0] != 0x04 && param[0] != 0xff)
return -1;
/*
* Then we reset and disable the mouse so that it doesn't generate events.
*/
if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_DIS))
psmouse_warn(psmouse, "Failed to reset mouse on %s\n",
ps2dev->serio->phys);
return 0;
}
/*
* psmouse_initialize() initializes the mouse to a sane state.
*/
static void psmouse_initialize(struct psmouse *psmouse)
{
/*
* We set the mouse report rate, resolution and scaling.
*/
if (psmouse_max_proto != PSMOUSE_PS2) {
psmouse->set_rate(psmouse, psmouse->rate);
psmouse->set_resolution(psmouse, psmouse->resolution);
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11);
}
}
/*
* psmouse_activate() enables the mouse so that we get motion reports from it.
*/
int psmouse_activate(struct psmouse *psmouse)
{
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
psmouse_warn(psmouse, "Failed to enable mouse on %s\n",
psmouse->ps2dev.serio->phys);
return -1;
}
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
return 0;
}
/*
* psmouse_deactivate() puts the mouse into poll mode so that we don't get motion
* reports from it unless we explicitly request it.
*/
int psmouse_deactivate(struct psmouse *psmouse)
{
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE)) {
psmouse_warn(psmouse, "Failed to deactivate mouse on %s\n",
psmouse->ps2dev.serio->phys);
return -1;
}
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
return 0;
}
/*
* psmouse_resync() attempts to re-validate current protocol.
*/
static void psmouse_resync(struct work_struct *work)
{
struct psmouse *parent = NULL, *psmouse =
container_of(work, struct psmouse, resync_work.work);
struct serio *serio = psmouse->ps2dev.serio;
psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
bool failed = false, enabled = false;
int i;
mutex_lock(&psmouse_mutex);
if (psmouse->state != PSMOUSE_RESYNCING)
goto out;
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
/*
* Some mice don't ACK commands sent while they are in the middle of
* transmitting motion packet. To avoid delay we use ps2_sendbyte()
* instead of ps2_command() which would wait for 200ms for an ACK
* that may never come.
* As an additional quirk ALPS touchpads may not only forget to ACK
* disable command but will stop reporting taps, so if we see that
* mouse at least once ACKs disable we will do full reconnect if ACK
* is missing.
*/
psmouse->num_resyncs++;
if (ps2_sendbyte(&psmouse->ps2dev, PSMOUSE_CMD_DISABLE, 20)) {
if (psmouse->num_resyncs < 3 || psmouse->acks_disable_command)
failed = true;
} else
psmouse->acks_disable_command = true;
/*
* Poll the mouse. If it was reset the packet will be shorter than
* psmouse->pktsize and ps2_command will fail. We do not expect and
* do not handle scenario when mouse "upgrades" its protocol while
* disconnected since it would require additional delay. If we ever
* see a mouse that does it we'll adjust the code.
*/
if (!failed) {
if (psmouse->poll(psmouse))
failed = true;
else {
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
for (i = 0; i < psmouse->pktsize; i++) {
psmouse->pktcnt++;
rc = psmouse->protocol_handler(psmouse);
if (rc != PSMOUSE_GOOD_DATA)
break;
}
if (rc != PSMOUSE_FULL_PACKET)
failed = true;
psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
}
}
/*
* Now try to enable mouse. We try to do that even if poll failed and also
* repeat our attempts 5 times, otherwise we may be left out with disabled
* mouse.
*/
for (i = 0; i < 5; i++) {
if (!ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
enabled = true;
break;
}
msleep(200);
}
if (!enabled) {
psmouse_warn(psmouse, "failed to re-enable mouse on %s\n",
psmouse->ps2dev.serio->phys);
failed = true;
}
if (failed) {
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
psmouse_info(psmouse,
"resync failed, issuing reconnect request\n");
serio_reconnect(serio);
} else
psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
if (parent)
psmouse_activate(parent);
out:
mutex_unlock(&psmouse_mutex);
}
/*
* psmouse_cleanup() resets the mouse into power-on state.
*/
static void psmouse_cleanup(struct serio *serio)
{
struct psmouse *psmouse = serio_get_drvdata(serio);
struct psmouse *parent = NULL;
mutex_lock(&psmouse_mutex);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
/*
* Disable stream mode so cleanup routine can proceed undisturbed.
*/
if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_DISABLE))
psmouse_warn(psmouse, "Failed to disable mouse on %s\n",
psmouse->ps2dev.serio->phys);
if (psmouse->cleanup)
psmouse->cleanup(psmouse);
/*
* Reset the mouse to defaults (bare PS/2 protocol).
*/
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
/*
* Some boxes, such as HP nx7400, get terribly confused if mouse
* is not fully enabled before suspending/shutting down.
*/
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE);
if (parent) {
if (parent->pt_deactivate)
parent->pt_deactivate(parent);
psmouse_activate(parent);
}
mutex_unlock(&psmouse_mutex);
}
/*
* psmouse_disconnect() closes and frees.
*/
static void psmouse_disconnect(struct serio *serio)
{
struct psmouse *psmouse, *parent = NULL;
psmouse = serio_get_drvdata(serio);
sysfs_remove_group(&serio->dev.kobj, &psmouse_attribute_group);
mutex_lock(&psmouse_mutex);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
/* make sure we don't have a resync in progress */
mutex_unlock(&psmouse_mutex);
flush_workqueue(kpsmoused_wq);
mutex_lock(&psmouse_mutex);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
if (parent && parent->pt_deactivate)
parent->pt_deactivate(parent);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_unregister_device(psmouse->dev);
kfree(psmouse);
if (parent)
psmouse_activate(parent);
mutex_unlock(&psmouse_mutex);
}
static int psmouse_switch_protocol(struct psmouse *psmouse,
const struct psmouse_protocol *proto)
{
const struct psmouse_protocol *selected_proto;
struct input_dev *input_dev = psmouse->dev;
input_dev->dev.parent = &psmouse->ps2dev.serio->dev;
if (proto && (proto->detect || proto->init)) {
psmouse_apply_defaults(psmouse);
if (proto->detect && proto->detect(psmouse, true) < 0)
return -1;
if (proto->init && proto->init(psmouse) < 0)
return -1;
psmouse->type = proto->type;
selected_proto = proto;
} else {
psmouse->type = psmouse_extensions(psmouse,
psmouse_max_proto, true);
selected_proto = psmouse_protocol_by_type(psmouse->type);
}
psmouse->ignore_parity = selected_proto->ignore_parity;
/*
* If mouse's packet size is 3 there is no point in polling the
* device in hopes to detect protocol reset - we won't get less
* than 3 bytes response anyhow.
*/
if (psmouse->pktsize == 3)
psmouse->resync_time = 0;
/*
* Some smart KVMs fake response to POLL command returning just
* 3 bytes and messing up our resync logic, so if initial poll
* fails we won't try polling the device anymore. Hopefully
* such KVM will maintain initially selected protocol.
*/
if (psmouse->resync_time && psmouse->poll(psmouse))
psmouse->resync_time = 0;
snprintf(psmouse->devname, sizeof(psmouse->devname), "%s %s %s",
selected_proto->name, psmouse->vendor, psmouse->name);
input_dev->name = psmouse->devname;
input_dev->phys = psmouse->phys;
input_dev->id.bustype = BUS_I8042;
input_dev->id.vendor = 0x0002;
input_dev->id.product = psmouse->type;
input_dev->id.version = psmouse->model;
return 0;
}
/*
* psmouse_connect() is a callback from the serio module when
* an unhandled serio port is found.
*/
static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
{
struct psmouse *psmouse, *parent = NULL;
struct input_dev *input_dev;
int retval = 0, error = -ENOMEM;
mutex_lock(&psmouse_mutex);
/*
* If this is a pass-through port deactivate parent so the device
* connected to this port can be successfully identified
*/
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse = kzalloc(sizeof(struct psmouse), GFP_KERNEL);
input_dev = input_allocate_device();
if (!psmouse || !input_dev)
goto err_free;
ps2_init(&psmouse->ps2dev, serio);
INIT_DELAYED_WORK(&psmouse->resync_work, psmouse_resync);
psmouse->dev = input_dev;
snprintf(psmouse->phys, sizeof(psmouse->phys), "%s/input0", serio->phys);
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
serio_set_drvdata(serio, psmouse);
error = serio_open(serio, drv);
if (error)
goto err_clear_drvdata;
if (psmouse_probe(psmouse) < 0) {
error = -ENODEV;
goto err_close_serio;
}
psmouse->rate = psmouse_rate;
psmouse->resolution = psmouse_resolution;
psmouse->resetafter = psmouse_resetafter;
psmouse->resync_time = parent ? 0 : psmouse_resync_time;
psmouse->smartscroll = psmouse_smartscroll;
psmouse_switch_protocol(psmouse, NULL);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
psmouse_initialize(psmouse);
error = input_register_device(psmouse->dev);
if (error)
goto err_protocol_disconnect;
if (parent && parent->pt_activate)
parent->pt_activate(parent);
error = sysfs_create_group(&serio->dev.kobj, &psmouse_attribute_group);
if (error)
goto err_pt_deactivate;
psmouse_activate(psmouse);
out:
/* If this is a pass-through port the parent needs to be re-activated */
if (parent)
psmouse_activate(parent);
mutex_unlock(&psmouse_mutex);
return retval;
err_pt_deactivate:
if (parent && parent->pt_deactivate)
parent->pt_deactivate(parent);
input_unregister_device(psmouse->dev);
input_dev = NULL; /* so we don't try to free it below */
err_protocol_disconnect:
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
err_close_serio:
serio_close(serio);
err_clear_drvdata:
serio_set_drvdata(serio, NULL);
err_free:
input_free_device(input_dev);
kfree(psmouse);
retval = error;
goto out;
}
static int psmouse_reconnect(struct serio *serio)
{
struct psmouse *psmouse = serio_get_drvdata(serio);
struct psmouse *parent = NULL;
struct serio_driver *drv = serio->drv;
unsigned char type;
int rc = -1;
if (!drv || !psmouse) {
printk(KERN_DEBUG "psmouse: reconnect request, but serio is disconnected, ignoring...\n");
return -1;
}
mutex_lock(&psmouse_mutex);
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
if (psmouse->reconnect) {
if (psmouse->reconnect(psmouse))
goto out;
} else {
psmouse_reset(psmouse);
if (psmouse_probe(psmouse) < 0)
goto out;
type = psmouse_extensions(psmouse, psmouse_max_proto, false);
if (psmouse->type != type)
goto out;
}
/*
* OK, the device type (and capabilities) match the old one,
* we can continue using it, complete initialization
*/
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
psmouse_initialize(psmouse);
if (parent && parent->pt_activate)
parent->pt_activate(parent);
psmouse_activate(psmouse);
rc = 0;
out:
/* If this is a pass-through port the parent waits to be activated */
if (parent)
psmouse_activate(parent);
mutex_unlock(&psmouse_mutex);
return rc;
}
static struct serio_device_id psmouse_serio_ids[] = {
{
.type = SERIO_8042,
.proto = SERIO_ANY,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{
.type = SERIO_PS_PSTHRU,
.proto = SERIO_ANY,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, psmouse_serio_ids);
static struct serio_driver psmouse_drv = {
.driver = {
.name = "psmouse",
},
.description = DRIVER_DESC,
.id_table = psmouse_serio_ids,
.interrupt = psmouse_interrupt,
.connect = psmouse_connect,
.reconnect = psmouse_reconnect,
.disconnect = psmouse_disconnect,
.cleanup = psmouse_cleanup,
};
ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct serio *serio = to_serio_port(dev);
struct psmouse_attribute *attr = to_psmouse_attr(devattr);
struct psmouse *psmouse;
psmouse = serio_get_drvdata(serio);
return attr->show(psmouse, attr->data, buf);
}
ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct serio *serio = to_serio_port(dev);
struct psmouse_attribute *attr = to_psmouse_attr(devattr);
struct psmouse *psmouse, *parent = NULL;
int retval;
retval = mutex_lock_interruptible(&psmouse_mutex);
if (retval)
goto out;
psmouse = serio_get_drvdata(serio);
if (attr->protect) {
if (psmouse->state == PSMOUSE_IGNORE) {
retval = -ENODEV;
goto out_unlock;
}
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
psmouse_deactivate(parent);
}
psmouse_deactivate(psmouse);
}
retval = attr->set(psmouse, attr->data, buf, count);
if (attr->protect) {
if (retval != -ENODEV)
psmouse_activate(psmouse);
if (parent)
psmouse_activate(parent);
}
out_unlock:
mutex_unlock(&psmouse_mutex);
out:
return retval;
}
static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char *buf)
{
unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset);
return sprintf(buf, "%u\n", *field);
}
static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count)
{
unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset);
unsigned int value;
int err;
err = kstrtouint(buf, 10, &value);
if (err)
return err;
*field = value;
return count;
}
static ssize_t psmouse_attr_show_protocol(struct psmouse *psmouse, void *data, char *buf)
{
return sprintf(buf, "%s\n", psmouse_protocol_by_type(psmouse->type)->name);
}
static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
struct serio *serio = psmouse->ps2dev.serio;
struct psmouse *parent = NULL;
struct input_dev *old_dev, *new_dev;
const struct psmouse_protocol *proto, *old_proto;
int error;
int retry = 0;
proto = psmouse_protocol_by_name(buf, count);
if (!proto)
return -EINVAL;
if (psmouse->type == proto->type)
return count;
new_dev = input_allocate_device();
if (!new_dev)
return -ENOMEM;
while (!list_empty(&serio->children)) {
if (++retry > 3) {
psmouse_warn(psmouse,
"failed to destroy children ports, protocol change aborted.\n");
input_free_device(new_dev);
return -EIO;
}
mutex_unlock(&psmouse_mutex);
serio_unregister_child_port(serio);
mutex_lock(&psmouse_mutex);
if (serio->drv != &psmouse_drv) {
input_free_device(new_dev);
return -ENODEV;
}
if (psmouse->type == proto->type) {
input_free_device(new_dev);
return count; /* switched by other thread */
}
}
if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
parent = serio_get_drvdata(serio->parent);
if (parent->pt_deactivate)
parent->pt_deactivate(parent);
}
old_dev = psmouse->dev;
old_proto = psmouse_protocol_by_type(psmouse->type);
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
psmouse->dev = new_dev;
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
if (psmouse_switch_protocol(psmouse, proto) < 0) {
psmouse_reset(psmouse);
/* default to PSMOUSE_PS2 */
psmouse_switch_protocol(psmouse, &psmouse_protocols[0]);
}
psmouse_initialize(psmouse);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
error = input_register_device(psmouse->dev);
if (error) {
if (psmouse->disconnect)
psmouse->disconnect(psmouse);
psmouse_set_state(psmouse, PSMOUSE_IGNORE);
input_free_device(new_dev);
psmouse->dev = old_dev;
psmouse_set_state(psmouse, PSMOUSE_INITIALIZING);
psmouse_switch_protocol(psmouse, old_proto);
psmouse_initialize(psmouse);
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
return error;
}
input_unregister_device(old_dev);
if (parent && parent->pt_activate)
parent->pt_activate(parent);
return count;
}
static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
unsigned int value;
int err;
err = kstrtouint(buf, 10, &value);
if (err)
return err;
psmouse->set_rate(psmouse, value);
return count;
}
static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
unsigned int value;
int err;
err = kstrtouint(buf, 10, &value);
if (err)
return err;
psmouse->set_resolution(psmouse, value);
return count;
}
static int psmouse_set_maxproto(const char *val, const struct kernel_param *kp)
{
const struct psmouse_protocol *proto;
if (!val)
return -EINVAL;
proto = psmouse_protocol_by_name(val, strlen(val));
if (!proto || !proto->maxproto)
return -EINVAL;
*((unsigned int *)kp->arg) = proto->type;
return 0;
}
static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
{
int type = *((unsigned int *)kp->arg);
return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
}
static int __init psmouse_init(void)
{
int err;
lifebook_module_init();
synaptics_module_init();
hgpk_module_init();
kpsmoused_wq = create_singlethread_workqueue("kpsmoused");
if (!kpsmoused_wq) {
pr_err("failed to create kpsmoused workqueue\n");
return -ENOMEM;
}
err = serio_register_driver(&psmouse_drv);
if (err)
destroy_workqueue(kpsmoused_wq);
return err;
}
static void __exit psmouse_exit(void)
{
serio_unregister_driver(&psmouse_drv);
destroy_workqueue(kpsmoused_wq);
}
module_init(psmouse_init);
module_exit(psmouse_exit);
| gpl-2.0 |
mturquette/linux-omap-android | arch/ia64/kernel/mca_drv.c | 738 | 21845 | /*
* File: mca_drv.c
* Purpose: Generic MCA handling layer
*
* Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) 2004 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
* Copyright (C) 2005 Silicon Graphics, Inc
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
* Copyright (C) 2006 Russ Anderson <rja@sgi.com>
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/workqueue.h>
#include <linux/mm.h>
#include <asm/delay.h>
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/sal.h>
#include <asm/mca.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
#include "mca_drv.h"
/* max size of SAL error record (default) */
static int sal_rec_max = 10000;
/* from mca_drv_asm.S */
extern void *mca_handler_bhhook(void);
static DEFINE_SPINLOCK(mca_bh_lock);
typedef enum {
MCA_IS_LOCAL = 0,
MCA_IS_GLOBAL = 1
} mca_type_t;
#define MAX_PAGE_ISOLATE 1024
static struct page *page_isolate[MAX_PAGE_ISOLATE];
static int num_page_isolate = 0;
typedef enum {
ISOLATE_NG,
ISOLATE_OK,
ISOLATE_NONE
} isolate_status_t;
typedef enum {
MCA_NOT_RECOVERED = 0,
MCA_RECOVERED = 1
} recovery_status_t;
/*
* This pool keeps pointers to the section part of SAL error record
*/
static struct {
slidx_list_t *buffer; /* section pointer list pool */
int cur_idx; /* Current index of section pointer list pool */
int max_idx; /* Maximum index of section pointer list pool */
} slidx_pool;
static int
fatal_mca(const char *fmt, ...)
{
va_list args;
char buf[256];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
ia64_mca_printk(KERN_ALERT "MCA: %s\n", buf);
return MCA_NOT_RECOVERED;
}
static int
mca_recovered(const char *fmt, ...)
{
va_list args;
char buf[256];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
ia64_mca_printk(KERN_INFO "MCA: %s\n", buf);
return MCA_RECOVERED;
}
/**
* mca_page_isolate - isolate a poisoned page in order not to use it later
* @paddr: poisoned memory location
*
* Return value:
* one of isolate_status_t, ISOLATE_OK/NG/NONE.
*/
static isolate_status_t
mca_page_isolate(unsigned long paddr)
{
int i;
struct page *p;
/* whether physical address is valid or not */
if (!ia64_phys_addr_valid(paddr))
return ISOLATE_NONE;
if (!pfn_valid(paddr >> PAGE_SHIFT))
return ISOLATE_NONE;
/* convert physical address to physical page number */
p = pfn_to_page(paddr>>PAGE_SHIFT);
/* check whether a page number have been already registered or not */
for (i = 0; i < num_page_isolate; i++)
if (page_isolate[i] == p)
return ISOLATE_OK; /* already listed */
/* limitation check */
if (num_page_isolate == MAX_PAGE_ISOLATE)
return ISOLATE_NG;
/* kick pages having attribute 'SLAB' or 'Reserved' */
if (PageSlab(p) || PageReserved(p))
return ISOLATE_NG;
/* add attribute 'Reserved' and register the page */
get_page(p);
SetPageReserved(p);
page_isolate[num_page_isolate++] = p;
return ISOLATE_OK;
}
/**
* mca_hanlder_bh - Kill the process which occurred memory read error
* @paddr: poisoned address received from MCA Handler
*/
void
mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
{
ia64_mlogbuf_dump();
printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, "
"iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n",
raw_smp_processor_id(), current->pid, current_uid(),
iip, ipsr, paddr, current->comm);
spin_lock(&mca_bh_lock);
switch (mca_page_isolate(paddr)) {
case ISOLATE_OK:
printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr);
break;
case ISOLATE_NG:
printk(KERN_CRIT "Page isolation: ( %lx ) failure.\n", paddr);
break;
default:
break;
}
spin_unlock(&mca_bh_lock);
/* This process is about to be killed itself */
do_exit(SIGKILL);
}
/**
* mca_make_peidx - Make index of processor error section
* @slpi: pointer to record of processor error section
* @peidx: pointer to index of processor error section
*/
static void
mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx)
{
/*
* calculate the start address of
* "struct cpuid_info" and "sal_processor_static_info_t".
*/
u64 total_check_num = slpi->valid.num_cache_check
+ slpi->valid.num_tlb_check
+ slpi->valid.num_bus_check
+ slpi->valid.num_reg_file_check
+ slpi->valid.num_ms_check;
u64 head_size = sizeof(sal_log_mod_error_info_t) * total_check_num
+ sizeof(sal_log_processor_info_t);
u64 mid_size = slpi->valid.cpuid_info * sizeof(struct sal_cpuid_info);
peidx_head(peidx) = slpi;
peidx_mid(peidx) = (struct sal_cpuid_info *)
(slpi->valid.cpuid_info ? ((char*)slpi + head_size) : NULL);
peidx_bottom(peidx) = (sal_processor_static_info_t *)
(slpi->valid.psi_static_struct ?
((char*)slpi + head_size + mid_size) : NULL);
}
/**
* mca_make_slidx - Make index of SAL error record
* @buffer: pointer to SAL error record
* @slidx: pointer to index of SAL error record
*
* Return value:
* 1 if record has platform error / 0 if not
*/
#define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \
{slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \
hl->hdr = ptr; \
list_add(&hl->list, &(sect)); \
slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; }
static int
mca_make_slidx(void *buffer, slidx_table_t *slidx)
{
int platform_err = 0;
int record_len = ((sal_log_record_header_t*)buffer)->len;
u32 ercd_pos;
int sects;
sal_log_section_hdr_t *sp;
/*
* Initialize index referring current record
*/
INIT_LIST_HEAD(&(slidx->proc_err));
INIT_LIST_HEAD(&(slidx->mem_dev_err));
INIT_LIST_HEAD(&(slidx->sel_dev_err));
INIT_LIST_HEAD(&(slidx->pci_bus_err));
INIT_LIST_HEAD(&(slidx->smbios_dev_err));
INIT_LIST_HEAD(&(slidx->pci_comp_err));
INIT_LIST_HEAD(&(slidx->plat_specific_err));
INIT_LIST_HEAD(&(slidx->host_ctlr_err));
INIT_LIST_HEAD(&(slidx->plat_bus_err));
INIT_LIST_HEAD(&(slidx->unsupported));
/*
* Extract a Record Header
*/
slidx->header = buffer;
/*
* Extract each section records
* (arranged from "int ia64_log_platform_info_print()")
*/
for (ercd_pos = sizeof(sal_log_record_header_t), sects = 0;
ercd_pos < record_len; ercd_pos += sp->len, sects++) {
sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos);
if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) {
LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp);
} else if (!efi_guidcmp(sp->guid,
SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) {
platform_err = 1;
LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp);
} else if (!efi_guidcmp(sp->guid,
SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) {
platform_err = 1;
LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp);
} else if (!efi_guidcmp(sp->guid,
SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) {
platform_err = 1;
LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp);
} else if (!efi_guidcmp(sp->guid,
SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) {
platform_err = 1;
LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp);
} else if (!efi_guidcmp(sp->guid,
SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) {
platform_err = 1;
LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp);
} else if (!efi_guidcmp(sp->guid,
SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) {
platform_err = 1;
LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp);
} else if (!efi_guidcmp(sp->guid,
SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) {
platform_err = 1;
LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp);
} else if (!efi_guidcmp(sp->guid,
SAL_PLAT_BUS_ERR_SECT_GUID)) {
platform_err = 1;
LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp);
} else {
LOG_INDEX_ADD_SECT_PTR(slidx->unsupported, sp);
}
}
slidx->n_sections = sects;
return platform_err;
}
/**
* init_record_index_pools - Initialize pool of lists for SAL record index
*
* Return value:
* 0 on Success / -ENOMEM on Failure
*/
static int
init_record_index_pools(void)
{
int i;
int rec_max_size; /* Maximum size of SAL error records */
int sect_min_size; /* Minimum size of SAL error sections */
/* minimum size table of each section */
static int sal_log_sect_min_sizes[] = {
sizeof(sal_log_processor_info_t)
+ sizeof(sal_processor_static_info_t),
sizeof(sal_log_mem_dev_err_info_t),
sizeof(sal_log_sel_dev_err_info_t),
sizeof(sal_log_pci_bus_err_info_t),
sizeof(sal_log_smbios_dev_err_info_t),
sizeof(sal_log_pci_comp_err_info_t),
sizeof(sal_log_plat_specific_err_info_t),
sizeof(sal_log_host_ctlr_err_info_t),
sizeof(sal_log_plat_bus_err_info_t),
};
/*
* MCA handler cannot allocate new memory on flight,
* so we preallocate enough memory to handle a SAL record.
*
* Initialize a handling set of slidx_pool:
* 1. Pick up the max size of SAL error records
* 2. Pick up the min size of SAL error sections
* 3. Allocate the pool as enough to 2 SAL records
* (now we can estimate the maxinum of section in a record.)
*/
/* - 1 - */
rec_max_size = sal_rec_max;
/* - 2 - */
sect_min_size = sal_log_sect_min_sizes[0];
for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
if (sect_min_size > sal_log_sect_min_sizes[i])
sect_min_size = sal_log_sect_min_sizes[i];
/* - 3 - */
slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1;
slidx_pool.buffer = (slidx_list_t *)
kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);
return slidx_pool.buffer ? 0 : -ENOMEM;
}
/*****************************************************************************
* Recovery functions *
*****************************************************************************/
/**
* is_mca_global - Check whether this MCA is global or not
* @peidx: pointer of index of processor error section
* @pbci: pointer to pal_bus_check_info_t
* @sos: pointer to hand off struct between SAL and OS
*
* Return value:
* MCA_IS_LOCAL / MCA_IS_GLOBAL
*/
static mca_type_t
is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{
pal_processor_state_info_t *psp =
(pal_processor_state_info_t*)peidx_psp(peidx);
/*
* PAL can request a rendezvous, if the MCA has a global scope.
* If "rz_always" flag is set, SAL requests MCA rendezvous
* in spite of global MCA.
* Therefore it is local MCA when rendezvous has not been requested.
* Failed to rendezvous, the system must be down.
*/
switch (sos->rv_rc) {
case -1: /* SAL rendezvous unsuccessful */
return MCA_IS_GLOBAL;
case 0: /* SAL rendezvous not required */
return MCA_IS_LOCAL;
case 1: /* SAL rendezvous successful int */
case 2: /* SAL rendezvous successful int with init */
default:
break;
}
/*
* If One or more Cache/TLB/Reg_File/Uarch_Check is here,
* it would be a local MCA. (i.e. processor internal error)
*/
if (psp->tc || psp->cc || psp->rc || psp->uc)
return MCA_IS_LOCAL;
/*
* Bus_Check structure with Bus_Check.ib (internal bus error) flag set
* would be a global MCA. (e.g. a system bus address parity error)
*/
if (!pbci || pbci->ib)
return MCA_IS_GLOBAL;
/*
* Bus_Check structure with Bus_Check.eb (external bus error) flag set
* could be either a local MCA or a global MCA.
*
* Referring Bus_Check.bsi:
* 0: Unknown/unclassified
* 1: BERR#
* 2: BINIT#
* 3: Hard Fail
* (FIXME: Are these SGI specific or generic bsi values?)
*/
if (pbci->eb)
switch (pbci->bsi) {
case 0:
/* e.g. a load from poisoned memory */
return MCA_IS_LOCAL;
case 1:
case 2:
case 3:
return MCA_IS_GLOBAL;
}
return MCA_IS_GLOBAL;
}
/**
* get_target_identifier - Get the valid Cache or Bus check target identifier.
* @peidx: pointer of index of processor error section
*
* Return value:
* target address on Success / 0 on Failure
*/
static u64
get_target_identifier(peidx_table_t *peidx)
{
u64 target_address = 0;
sal_log_mod_error_info_t *smei;
pal_cache_check_info_t *pcci;
int i, level = 9;
/*
* Look through the cache checks for a valid target identifier
* If more than one valid target identifier, return the one
* with the lowest cache level.
*/
for (i = 0; i < peidx_cache_check_num(peidx); i++) {
smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i);
if (smei->valid.target_identifier && smei->target_identifier) {
pcci = (pal_cache_check_info_t *)&(smei->check_info);
if (!target_address || (pcci->level < level)) {
target_address = smei->target_identifier;
level = pcci->level;
continue;
}
}
}
if (target_address)
return target_address;
/*
* Look at the bus check for a valid target identifier
*/
smei = peidx_bus_check(peidx, 0);
if (smei && smei->valid.target_identifier)
return smei->target_identifier;
return 0;
}
/**
* recover_from_read_error - Try to recover the errors which type are "read"s.
* @slidx: pointer of index of SAL error record
* @peidx: pointer of index of processor error section
* @pbci: pointer of pal_bus_check_info
* @sos: pointer to hand off struct between SAL and OS
*
* Return value:
* 1 on Success / 0 on Failure
*/
static int
recover_from_read_error(slidx_table_t *slidx,
peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{
u64 target_identifier;
pal_min_state_area_t *pmsa;
struct ia64_psr *psr1, *psr2;
ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
/* Is target address valid? */
target_identifier = get_target_identifier(peidx);
if (!target_identifier)
return fatal_mca("target address not valid");
/*
* cpu read or memory-mapped io read
*
* offending process affected process OS MCA do
* kernel mode kernel mode down system
* kernel mode user mode kill the process
* user mode kernel mode down system (*)
* user mode user mode kill the process
*
* (*) You could terminate offending user-mode process
* if (pbci->pv && pbci->pl != 0) *and* if you sure
* the process not have any locks of kernel.
*/
/* Is minstate valid? */
if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
return fatal_mca("minstate not valid");
psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr);
/*
* Check the privilege level of interrupted context.
* If it is user-mode, then terminate affected process.
*/
pmsa = sos->pal_min_state;
if (psr1->cpl != 0 ||
((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
/*
* setup for resume to bottom half of MCA,
* "mca_handler_bhhook"
*/
/* pass to bhhook as argument (gr8, ...) */
pmsa->pmsa_gr[8-1] = target_identifier;
pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
/* set interrupted return address (but no use) */
pmsa->pmsa_br0 = pmsa->pmsa_iip;
/* change resume address to bottom half */
pmsa->pmsa_iip = mca_hdlr_bh->fp;
pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
/* set cpl with kernel mode */
psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
psr2->cpl = 0;
psr2->ri = 0;
psr2->bn = 1;
psr2->i = 0;
return mca_recovered("user memory corruption. "
"kill affected process - recovered.");
}
return fatal_mca("kernel context not recovered, iip 0x%lx\n",
pmsa->pmsa_iip);
}
/**
* recover_from_platform_error - Recover from platform error.
* @slidx: pointer of index of SAL error record
* @peidx: pointer of index of processor error section
* @pbci: pointer of pal_bus_check_info
* @sos: pointer to hand off struct between SAL and OS
*
* Return value:
* 1 on Success / 0 on Failure
*/
static int
recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx,
pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{
int status = 0;
pal_processor_state_info_t *psp =
(pal_processor_state_info_t*)peidx_psp(peidx);
if (psp->bc && pbci->eb && pbci->bsi == 0) {
switch(pbci->type) {
case 1: /* partial read */
case 3: /* full line(cpu) read */
case 9: /* I/O space read */
status = recover_from_read_error(slidx, peidx, pbci,
sos);
break;
case 0: /* unknown */
case 2: /* partial write */
case 4: /* full line write */
case 5: /* implicit or explicit write-back operation */
case 6: /* snoop probe */
case 7: /* incoming or outgoing ptc.g */
case 8: /* write coalescing transactions */
case 10: /* I/O space write */
case 11: /* inter-processor interrupt message(IPI) */
case 12: /* interrupt acknowledge or
external task priority cycle */
default:
break;
}
} else if (psp->cc && !psp->bc) { /* Cache error */
status = recover_from_read_error(slidx, peidx, pbci, sos);
}
return status;
}
/*
* recover_from_tlb_check
* @peidx: pointer of index of processor error section
*
* Return value:
* 1 on Success / 0 on Failure
*/
static int
recover_from_tlb_check(peidx_table_t *peidx)
{
sal_log_mod_error_info_t *smei;
pal_tlb_check_info_t *ptci;
smei = (sal_log_mod_error_info_t *)peidx_tlb_check(peidx, 0);
ptci = (pal_tlb_check_info_t *)&(smei->check_info);
/*
* Look for signature of a duplicate TLB DTC entry, which is
* a SW bug and always fatal.
*/
if (ptci->op == PAL_TLB_CHECK_OP_PURGE
&& !(ptci->itr || ptci->dtc || ptci->itc))
return fatal_mca("Duplicate TLB entry");
return mca_recovered("TLB check recovered");
}
/**
* recover_from_processor_error
* @platform: whether there are some platform error section or not
* @slidx: pointer of index of SAL error record
* @peidx: pointer of index of processor error section
* @pbci: pointer of pal_bus_check_info
* @sos: pointer to hand off struct between SAL and OS
*
* Return value:
* 1 on Success / 0 on Failure
*/
static int
recover_from_processor_error(int platform, slidx_table_t *slidx,
peidx_table_t *peidx, pal_bus_check_info_t *pbci,
struct ia64_sal_os_state *sos)
{
pal_processor_state_info_t *psp =
(pal_processor_state_info_t*)peidx_psp(peidx);
/*
* Processor recovery status must key off of the PAL recovery
* status in the Processor State Parameter.
*/
/*
* The machine check is corrected.
*/
if (psp->cm == 1)
return mca_recovered("machine check is already corrected.");
/*
* The error was not contained. Software must be reset.
*/
if (psp->us || psp->ci == 0)
return fatal_mca("error not contained");
/*
* Look for recoverable TLB check
*/
if (psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc))
return recover_from_tlb_check(peidx);
/*
* The cache check and bus check bits have four possible states
* cc bc
* 1 1 Memory error, attempt recovery
* 1 0 Cache error, attempt recovery
* 0 1 I/O error, attempt recovery
* 0 0 Other error type, not recovered
*/
if (psp->cc == 0 && (psp->bc == 0 || pbci == NULL))
return fatal_mca("No cache or bus check");
/*
* Cannot handle more than one bus check.
*/
if (peidx_bus_check_num(peidx) > 1)
return fatal_mca("Too many bus checks");
if (pbci->ib)
return fatal_mca("Internal Bus error");
if (pbci->eb && pbci->bsi > 0)
return fatal_mca("External bus check fatal status");
/*
* This is a local MCA and estimated as a recoverable error.
*/
if (platform)
return recover_from_platform_error(slidx, peidx, pbci, sos);
/*
* On account of strange SAL error record, we cannot recover.
*/
return fatal_mca("Strange SAL record");
}
/**
* mca_try_to_recover - Try to recover from MCA
* @rec: pointer to a SAL error record
* @sos: pointer to hand off struct between SAL and OS
*
* Return value:
* 1 on Success / 0 on Failure
*/
static int
mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos)
{
int platform_err;
int n_proc_err;
slidx_table_t slidx;
peidx_table_t peidx;
pal_bus_check_info_t pbci;
/* Make index of SAL error record */
platform_err = mca_make_slidx(rec, &slidx);
/* Count processor error sections */
n_proc_err = slidx_count(&slidx, proc_err);
/* Now, OS can recover when there is one processor error section */
if (n_proc_err > 1)
return fatal_mca("Too Many Errors");
else if (n_proc_err == 0)
/* Weird SAL record ... We can't do anything */
return fatal_mca("Weird SAL record");
/* Make index of processor error section */
mca_make_peidx((sal_log_processor_info_t*)
slidx_first_entry(&slidx.proc_err)->hdr, &peidx);
/* Extract Processor BUS_CHECK[0] */
*((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0);
/* Check whether MCA is global or not */
if (is_mca_global(&peidx, &pbci, sos))
return fatal_mca("global MCA");
/* Try to recover a processor error */
return recover_from_processor_error(platform_err, &slidx, &peidx,
&pbci, sos);
}
/*
* =============================================================================
*/
int __init mca_external_handler_init(void)
{
if (init_record_index_pools())
return -ENOMEM;
/* register external mca handlers */
if (ia64_reg_MCA_extension(mca_try_to_recover)) {
printk(KERN_ERR "ia64_reg_MCA_extension failed.\n");
kfree(slidx_pool.buffer);
return -EFAULT;
}
return 0;
}
void __exit mca_external_handler_exit(void)
{
/* unregister external mca handlers */
ia64_unreg_MCA_extension();
kfree(slidx_pool.buffer);
}
module_init(mca_external_handler_init);
module_exit(mca_external_handler_exit);
module_param(sal_rec_max, int, 0644);
MODULE_PARM_DESC(sal_rec_max, "Max size of SAL error record");
MODULE_DESCRIPTION("ia64 platform dependent mca handler driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
BrokenDev/kernel_samsung_msm8660-common | net/sunrpc/rpcb_clnt.c | 994 | 28229 | /*
* In-kernel rpcbind client supporting versions 2, 3, and 4 of the rpcbind
* protocol
*
* Based on RFC 1833: "Binding Protocols for ONC RPC Version 2" and
* RFC 3530: "Network File System (NFS) version 4 Protocol"
*
* Original: Gilles Quillard, Bull Open Source, 2005 <gilles.quillard@bull.net>
* Updated: Chuck Lever, Oracle Corporation, 2007 <chuck.lever@oracle.com>
*
* Descended from net/sunrpc/pmap_clnt.c,
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <net/ipv6.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xprtsock.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_BIND
#endif
#define RPCBIND_SOCK_PATHNAME "/var/run/rpcbind.sock"
#define RPCBIND_PROGRAM (100000u)
#define RPCBIND_PORT (111u)
#define RPCBVERS_2 (2u)
#define RPCBVERS_3 (3u)
#define RPCBVERS_4 (4u)
enum {
RPCBPROC_NULL,
RPCBPROC_SET,
RPCBPROC_UNSET,
RPCBPROC_GETPORT,
RPCBPROC_GETADDR = 3, /* alias for GETPORT */
RPCBPROC_DUMP,
RPCBPROC_CALLIT,
RPCBPROC_BCAST = 5, /* alias for CALLIT */
RPCBPROC_GETTIME,
RPCBPROC_UADDR2TADDR,
RPCBPROC_TADDR2UADDR,
RPCBPROC_GETVERSADDR,
RPCBPROC_INDIRECT,
RPCBPROC_GETADDRLIST,
RPCBPROC_GETSTAT,
};
/*
* r_owner
*
* The "owner" is allowed to unset a service in the rpcbind database.
*
* For AF_LOCAL SET/UNSET requests, rpcbind treats this string as a
* UID which it maps to a local user name via a password lookup.
* In all other cases it is ignored.
*
* For SET/UNSET requests, user space provides a value, even for
* network requests, and GETADDR uses an empty string. We follow
* those precedents here.
*/
#define RPCB_OWNER_STRING "0"
#define RPCB_MAXOWNERLEN sizeof(RPCB_OWNER_STRING)
/*
* XDR data type sizes
*/
#define RPCB_program_sz (1)
#define RPCB_version_sz (1)
#define RPCB_protocol_sz (1)
#define RPCB_port_sz (1)
#define RPCB_boolean_sz (1)
#define RPCB_netid_sz (1 + XDR_QUADLEN(RPCBIND_MAXNETIDLEN))
#define RPCB_addr_sz (1 + XDR_QUADLEN(RPCBIND_MAXUADDRLEN))
#define RPCB_ownerstring_sz (1 + XDR_QUADLEN(RPCB_MAXOWNERLEN))
/*
* XDR argument and result sizes
*/
#define RPCB_mappingargs_sz (RPCB_program_sz + RPCB_version_sz + \
RPCB_protocol_sz + RPCB_port_sz)
#define RPCB_getaddrargs_sz (RPCB_program_sz + RPCB_version_sz + \
RPCB_netid_sz + RPCB_addr_sz + \
RPCB_ownerstring_sz)
#define RPCB_getportres_sz RPCB_port_sz
#define RPCB_setres_sz RPCB_boolean_sz
/*
* Note that RFC 1833 does not put any size restrictions on the
* address string returned by the remote rpcbind database.
*/
#define RPCB_getaddrres_sz RPCB_addr_sz
static void rpcb_getport_done(struct rpc_task *, void *);
static void rpcb_map_release(void *data);
static struct rpc_program rpcb_program;
static struct rpc_clnt * rpcb_local_clnt;
static struct rpc_clnt * rpcb_local_clnt4;
struct rpcbind_args {
struct rpc_xprt * r_xprt;
u32 r_prog;
u32 r_vers;
u32 r_prot;
unsigned short r_port;
const char * r_netid;
const char * r_addr;
const char * r_owner;
int r_status;
};
static struct rpc_procinfo rpcb_procedures2[];
static struct rpc_procinfo rpcb_procedures3[];
static struct rpc_procinfo rpcb_procedures4[];
struct rpcb_info {
u32 rpc_vers;
struct rpc_procinfo * rpc_proc;
};
static struct rpcb_info rpcb_next_version[];
static struct rpcb_info rpcb_next_version6[];
static const struct rpc_call_ops rpcb_getport_ops = {
.rpc_call_done = rpcb_getport_done,
.rpc_release = rpcb_map_release,
};
static void rpcb_wake_rpcbind_waiters(struct rpc_xprt *xprt, int status)
{
xprt_clear_binding(xprt);
rpc_wake_up_status(&xprt->binding, status);
}
static void rpcb_map_release(void *data)
{
struct rpcbind_args *map = data;
rpcb_wake_rpcbind_waiters(map->r_xprt, map->r_status);
xprt_put(map->r_xprt);
kfree(map->r_addr);
kfree(map);
}
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
static int rpcb_create_local_unix(void)
{
static const struct sockaddr_un rpcb_localaddr_rpcbind = {
.sun_family = AF_LOCAL,
.sun_path = RPCBIND_SOCK_PATHNAME,
};
struct rpc_create_args args = {
.net = &init_net,
.protocol = XPRT_TRANSPORT_LOCAL,
.address = (struct sockaddr *)&rpcb_localaddr_rpcbind,
.addrsize = sizeof(rpcb_localaddr_rpcbind),
.servername = "localhost",
.program = &rpcb_program,
.version = RPCBVERS_2,
.authflavor = RPC_AUTH_NULL,
};
struct rpc_clnt *clnt, *clnt4;
int result = 0;
/*
* Because we requested an RPC PING at transport creation time,
* this works only if the user space portmapper is rpcbind, and
* it's listening on AF_LOCAL on the named socket.
*/
clnt = rpc_create(&args);
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create AF_LOCAL rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
result = PTR_ERR(clnt);
goto out;
}
clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
if (IS_ERR(clnt4)) {
dprintk("RPC: failed to bind second program to "
"rpcbind v4 client (errno %ld).\n",
PTR_ERR(clnt4));
clnt4 = NULL;
}
/* Protected by rpcb_create_local_mutex */
rpcb_local_clnt = clnt;
rpcb_local_clnt4 = clnt4;
out:
return result;
}
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
static int rpcb_create_local_net(void)
{
static const struct sockaddr_in rpcb_inaddr_loopback = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_LOOPBACK),
.sin_port = htons(RPCBIND_PORT),
};
struct rpc_create_args args = {
.net = &init_net,
.protocol = XPRT_TRANSPORT_TCP,
.address = (struct sockaddr *)&rpcb_inaddr_loopback,
.addrsize = sizeof(rpcb_inaddr_loopback),
.servername = "localhost",
.program = &rpcb_program,
.version = RPCBVERS_2,
.authflavor = RPC_AUTH_UNIX,
.flags = RPC_CLNT_CREATE_NOPING,
};
struct rpc_clnt *clnt, *clnt4;
int result = 0;
clnt = rpc_create(&args);
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create local rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
result = PTR_ERR(clnt);
goto out;
}
/*
* This results in an RPC ping. On systems running portmapper,
* the v4 ping will fail. Proceed anyway, but disallow rpcb
* v4 upcalls.
*/
clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
if (IS_ERR(clnt4)) {
dprintk("RPC: failed to bind second program to "
"rpcbind v4 client (errno %ld).\n",
PTR_ERR(clnt4));
clnt4 = NULL;
}
/* Protected by rpcb_create_local_mutex */
rpcb_local_clnt = clnt;
rpcb_local_clnt4 = clnt4;
out:
return result;
}
/*
* Returns zero on success, otherwise a negative errno value
* is returned.
*/
static int rpcb_create_local(void)
{
static DEFINE_MUTEX(rpcb_create_local_mutex);
int result = 0;
if (rpcb_local_clnt)
return result;
mutex_lock(&rpcb_create_local_mutex);
if (rpcb_local_clnt)
goto out;
if (rpcb_create_local_unix() != 0)
result = rpcb_create_local_net();
out:
mutex_unlock(&rpcb_create_local_mutex);
return result;
}
static struct rpc_clnt *rpcb_create(char *hostname, struct sockaddr *srvaddr,
size_t salen, int proto, u32 version)
{
struct rpc_create_args args = {
.net = &init_net,
.protocol = proto,
.address = srvaddr,
.addrsize = salen,
.servername = hostname,
.program = &rpcb_program,
.version = version,
.authflavor = RPC_AUTH_UNIX,
.flags = (RPC_CLNT_CREATE_NOPING |
RPC_CLNT_CREATE_NONPRIVPORT),
};
switch (srvaddr->sa_family) {
case AF_INET:
((struct sockaddr_in *)srvaddr)->sin_port = htons(RPCBIND_PORT);
break;
case AF_INET6:
((struct sockaddr_in6 *)srvaddr)->sin6_port = htons(RPCBIND_PORT);
break;
default:
return ERR_PTR(-EAFNOSUPPORT);
}
return rpc_create(&args);
}
static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
{
int result, error = 0;
msg->rpc_resp = &result;
error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
return error;
}
if (!result)
return -EACCES;
return 0;
}
/**
* rpcb_register - set or unset a port registration with the local rpcbind svc
* @prog: RPC program number to bind
* @vers: RPC version number to bind
* @prot: transport protocol to register
* @port: port value to register
*
* Returns zero if the registration request was dispatched successfully
* and the rpcbind daemon returned success. Otherwise, returns an errno
* value that reflects the nature of the error (request could not be
* dispatched, timed out, or rpcbind returned an error).
*
* RPC services invoke this function to advertise their contact
* information via the system's rpcbind daemon. RPC services
* invoke this function once for each [program, version, transport]
* tuple they wish to advertise.
*
* Callers may also unregister RPC services that are no longer
* available by setting the passed-in port to zero. This removes
* all registered transports for [program, version] from the local
* rpcbind database.
*
* This function uses rpcbind protocol version 2 to contact the
* local rpcbind daemon.
*
* Registration works over both AF_INET and AF_INET6, and services
* registered via this function are advertised as available for any
* address. If the local rpcbind daemon is listening on AF_INET6,
* services registered via this function will be advertised on
* IN6ADDR_ANY (ie available for all AF_INET and AF_INET6
* addresses).
*/
int rpcb_register(u32 prog, u32 vers, int prot, unsigned short port)
{
struct rpcbind_args map = {
.r_prog = prog,
.r_vers = vers,
.r_prot = prot,
.r_port = port,
};
struct rpc_message msg = {
.rpc_argp = &map,
};
int error;
error = rpcb_create_local();
if (error)
return error;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
prog, vers, prot, port);
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
if (port)
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
return rpcb_register_call(rpcb_local_clnt, &msg);
}
/*
* Fill in AF_INET family-specific arguments to register
*/
static int rpcb_register_inet4(const struct sockaddr *sap,
struct rpc_message *msg)
{
const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin->sin_port);
int result;
map->r_addr = rpc_sockaddr2uaddr(sap);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
map->r_prog, map->r_vers,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
result = rpcb_register_call(rpcb_local_clnt4, msg);
kfree(map->r_addr);
return result;
}
/*
* Fill in AF_INET6 family-specific arguments to register
*/
static int rpcb_register_inet6(const struct sockaddr *sap,
struct rpc_message *msg)
{
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin6->sin6_port);
int result;
map->r_addr = rpc_sockaddr2uaddr(sap);
dprintk("RPC: %sregistering [%u, %u, %s, '%s'] with "
"local rpcbind\n", (port ? "" : "un"),
map->r_prog, map->r_vers,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
if (port)
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
result = rpcb_register_call(rpcb_local_clnt4, msg);
kfree(map->r_addr);
return result;
}
static int rpcb_unregister_all_protofamilies(struct rpc_message *msg)
{
struct rpcbind_args *map = msg->rpc_argp;
dprintk("RPC: unregistering [%u, %u, '%s'] with "
"local rpcbind\n",
map->r_prog, map->r_vers, map->r_netid);
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
return rpcb_register_call(rpcb_local_clnt4, msg);
}
/**
* rpcb_v4_register - set or unset a port registration with the local rpcbind
* @program: RPC program number of service to (un)register
* @version: RPC version number of service to (un)register
* @address: address family, IP address, and port to (un)register
* @netid: netid of transport protocol to (un)register
*
* Returns zero if the registration request was dispatched successfully
* and the rpcbind daemon returned success. Otherwise, returns an errno
* value that reflects the nature of the error (request could not be
* dispatched, timed out, or rpcbind returned an error).
*
* RPC services invoke this function to advertise their contact
* information via the system's rpcbind daemon. RPC services
* invoke this function once for each [program, version, address,
* netid] tuple they wish to advertise.
*
* Callers may also unregister RPC services that are registered at a
* specific address by setting the port number in @address to zero.
* They may unregister all registered protocol families at once for
* a service by passing a NULL @address argument. If @netid is ""
* then all netids for [program, version, address] are unregistered.
*
* This function uses rpcbind protocol version 4 to contact the
* local rpcbind daemon. The local rpcbind daemon must support
* version 4 of the rpcbind protocol in order for these functions
* to register a service successfully.
*
* Supported netids include "udp" and "tcp" for UDP and TCP over
* IPv4, and "udp6" and "tcp6" for UDP and TCP over IPv6,
* respectively.
*
* The contents of @address determine the address family and the
* port to be registered. The usual practice is to pass INADDR_ANY
* as the raw address, but specifying a non-zero address is also
* supported by this API if the caller wishes to advertise an RPC
* service on a specific network interface.
*
* Note that passing in INADDR_ANY does not create the same service
* registration as IN6ADDR_ANY. The former advertises an RPC
* service on any IPv4 address, but not on IPv6. The latter
* advertises the service on all IPv4 and IPv6 addresses.
*/
int rpcb_v4_register(const u32 program, const u32 version,
const struct sockaddr *address, const char *netid)
{
struct rpcbind_args map = {
.r_prog = program,
.r_vers = version,
.r_netid = netid,
.r_owner = RPCB_OWNER_STRING,
};
struct rpc_message msg = {
.rpc_argp = &map,
};
int error;
error = rpcb_create_local();
if (error)
return error;
if (rpcb_local_clnt4 == NULL)
return -EPROTONOSUPPORT;
if (address == NULL)
return rpcb_unregister_all_protofamilies(&msg);
switch (address->sa_family) {
case AF_INET:
return rpcb_register_inet4(address, &msg);
case AF_INET6:
return rpcb_register_inet6(address, &msg);
}
return -EAFNOSUPPORT;
}
static struct rpc_task *rpcb_call_async(struct rpc_clnt *rpcb_clnt, struct rpcbind_args *map, struct rpc_procinfo *proc)
{
struct rpc_message msg = {
.rpc_proc = proc,
.rpc_argp = map,
.rpc_resp = map,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = rpcb_clnt,
.rpc_message = &msg,
.callback_ops = &rpcb_getport_ops,
.callback_data = map,
.flags = RPC_TASK_ASYNC | RPC_TASK_SOFTCONN,
};
return rpc_run_task(&task_setup_data);
}
/*
* In the case where rpc clients have been cloned, we want to make
* sure that we use the program number/version etc of the actual
* owner of the xprt. To do so, we walk back up the tree of parents
* to find whoever created the transport and/or whoever has the
* autobind flag set.
*/
static struct rpc_clnt *rpcb_find_transport_owner(struct rpc_clnt *clnt)
{
struct rpc_clnt *parent = clnt->cl_parent;
while (parent != clnt) {
if (parent->cl_xprt != clnt->cl_xprt)
break;
if (clnt->cl_autobind)
break;
clnt = parent;
parent = parent->cl_parent;
}
return clnt;
}
/**
* rpcb_getport_async - obtain the port for a given RPC service on a given host
* @task: task that is waiting for portmapper request
*
* This one can be called for an ongoing RPC request, and can be used in
* an async (rpciod) context.
*/
void rpcb_getport_async(struct rpc_task *task)
{
struct rpc_clnt *clnt;
struct rpc_procinfo *proc;
u32 bind_version;
struct rpc_xprt *xprt;
struct rpc_clnt *rpcb_clnt;
struct rpcbind_args *map;
struct rpc_task *child;
struct sockaddr_storage addr;
struct sockaddr *sap = (struct sockaddr *)&addr;
size_t salen;
int status;
clnt = rpcb_find_transport_owner(task->tk_client);
xprt = clnt->cl_xprt;
dprintk("RPC: %5u %s(%s, %u, %u, %d)\n",
task->tk_pid, __func__,
clnt->cl_server, clnt->cl_prog, clnt->cl_vers, xprt->prot);
/* Put self on the wait queue to ensure we get notified if
* some other task is already attempting to bind the port */
rpc_sleep_on(&xprt->binding, task, NULL);
if (xprt_test_and_set_binding(xprt)) {
dprintk("RPC: %5u %s: waiting for another binder\n",
task->tk_pid, __func__);
return;
}
/* Someone else may have bound if we slept */
if (xprt_bound(xprt)) {
status = 0;
dprintk("RPC: %5u %s: already bound\n",
task->tk_pid, __func__);
goto bailout_nofree;
}
/* Parent transport's destination address */
salen = rpc_peeraddr(clnt, sap, sizeof(addr));
/* Don't ever use rpcbind v2 for AF_INET6 requests */
switch (sap->sa_family) {
case AF_INET:
proc = rpcb_next_version[xprt->bind_index].rpc_proc;
bind_version = rpcb_next_version[xprt->bind_index].rpc_vers;
break;
case AF_INET6:
proc = rpcb_next_version6[xprt->bind_index].rpc_proc;
bind_version = rpcb_next_version6[xprt->bind_index].rpc_vers;
break;
default:
status = -EAFNOSUPPORT;
dprintk("RPC: %5u %s: bad address family\n",
task->tk_pid, __func__);
goto bailout_nofree;
}
if (proc == NULL) {
xprt->bind_index = 0;
status = -EPFNOSUPPORT;
dprintk("RPC: %5u %s: no more getport versions available\n",
task->tk_pid, __func__);
goto bailout_nofree;
}
dprintk("RPC: %5u %s: trying rpcbind version %u\n",
task->tk_pid, __func__, bind_version);
rpcb_clnt = rpcb_create(clnt->cl_server, sap, salen, xprt->prot,
bind_version);
if (IS_ERR(rpcb_clnt)) {
status = PTR_ERR(rpcb_clnt);
dprintk("RPC: %5u %s: rpcb_create failed, error %ld\n",
task->tk_pid, __func__, PTR_ERR(rpcb_clnt));
goto bailout_nofree;
}
map = kzalloc(sizeof(struct rpcbind_args), GFP_ATOMIC);
if (!map) {
status = -ENOMEM;
dprintk("RPC: %5u %s: no memory available\n",
task->tk_pid, __func__);
goto bailout_release_client;
}
map->r_prog = clnt->cl_prog;
map->r_vers = clnt->cl_vers;
map->r_prot = xprt->prot;
map->r_port = 0;
map->r_xprt = xprt_get(xprt);
map->r_status = -EIO;
switch (bind_version) {
case RPCBVERS_4:
case RPCBVERS_3:
map->r_netid = rpc_peeraddr2str(clnt, RPC_DISPLAY_NETID);
map->r_addr = rpc_sockaddr2uaddr(sap);
map->r_owner = "";
break;
case RPCBVERS_2:
map->r_addr = NULL;
break;
default:
BUG();
}
child = rpcb_call_async(rpcb_clnt, map, proc);
rpc_release_client(rpcb_clnt);
if (IS_ERR(child)) {
/* rpcb_map_release() has freed the arguments */
dprintk("RPC: %5u %s: rpc_run_task failed\n",
task->tk_pid, __func__);
return;
}
xprt->stat.bind_count++;
rpc_put_task(child);
return;
bailout_release_client:
rpc_release_client(rpcb_clnt);
bailout_nofree:
rpcb_wake_rpcbind_waiters(xprt, status);
task->tk_status = status;
}
EXPORT_SYMBOL_GPL(rpcb_getport_async);
/*
* Rpcbind child task calls this callback via tk_exit.
*/
static void rpcb_getport_done(struct rpc_task *child, void *data)
{
struct rpcbind_args *map = data;
struct rpc_xprt *xprt = map->r_xprt;
int status = child->tk_status;
/* Garbage reply: retry with a lesser rpcbind version */
if (status == -EIO)
status = -EPROTONOSUPPORT;
/* rpcbind server doesn't support this rpcbind protocol version */
if (status == -EPROTONOSUPPORT)
xprt->bind_index++;
if (status < 0) {
/* rpcbind server not available on remote host? */
xprt->ops->set_port(xprt, 0);
} else if (map->r_port == 0) {
/* Requested RPC service wasn't registered on remote host */
xprt->ops->set_port(xprt, 0);
status = -EACCES;
} else {
/* Succeeded */
xprt->ops->set_port(xprt, map->r_port);
xprt_set_bound(xprt);
status = 0;
}
dprintk("RPC: %5u rpcb_getport_done(status %d, port %u)\n",
child->tk_pid, status, map->r_port);
map->r_status = status;
}
/*
* XDR functions for rpcbind
*/
static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
__be32 *p;
dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
*p++ = cpu_to_be32(rpcb->r_prog);
*p++ = cpu_to_be32(rpcb->r_vers);
*p++ = cpu_to_be32(rpcb->r_prot);
*p = cpu_to_be32(rpcb->r_port);
}
static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
unsigned long port;
__be32 *p;
rpcb->r_port = 0;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
port = be32_to_cpup(p);
dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
task->tk_msg.rpc_proc->p_name, port);
if (unlikely(port > USHRT_MAX))
return -EIO;
rpcb->r_port = port;
return 0;
}
static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
unsigned int *boolp)
{
struct rpc_task *task = req->rq_task;
__be32 *p;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
return -EIO;
*boolp = 0;
if (*p != xdr_zero)
*boolp = 1;
dprintk("RPC: %5u RPCB_%s call %s\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
(*boolp ? "succeeded" : "failed"));
return 0;
}
static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
const u32 maxstrlen)
{
__be32 *p;
u32 len;
len = strlen(string);
BUG_ON(len > maxstrlen);
p = xdr_reserve_space(xdr, 4 + len);
xdr_encode_opaque(p, string, len);
}
static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct rpcbind_args *rpcb)
{
struct rpc_task *task = req->rq_task;
__be32 *p;
dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name,
rpcb->r_prog, rpcb->r_vers,
rpcb->r_netid, rpcb->r_addr);
p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
*p++ = cpu_to_be32(rpcb->r_prog);
*p = cpu_to_be32(rpcb->r_vers);
encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
}
static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
struct rpcbind_args *rpcb)
{
struct sockaddr_storage address;
struct sockaddr *sap = (struct sockaddr *)&address;
struct rpc_task *task = req->rq_task;
__be32 *p;
u32 len;
rpcb->r_port = 0;
p = xdr_inline_decode(xdr, 4);
if (unlikely(p == NULL))
goto out_fail;
len = be32_to_cpup(p);
/*
* If the returned universal address is a null string,
* the requested RPC service was not registered.
*/
if (len == 0) {
dprintk("RPC: %5u RPCB reply: program not registered\n",
task->tk_pid);
return 0;
}
if (unlikely(len > RPCBIND_MAXUADDRLEN))
goto out_fail;
p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
goto out_fail;
dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
task->tk_msg.rpc_proc->p_name, (char *)p);
if (rpc_uaddr2sockaddr((char *)p, len, sap, sizeof(address)) == 0)
goto out_fail;
rpcb->r_port = rpc_get_port(sap);
return 0;
out_fail:
dprintk("RPC: %5u malformed RPCB_%s reply\n",
task->tk_pid, task->tk_msg.rpc_proc->p_name);
return -EIO;
}
/*
* Not all rpcbind procedures described in RFC 1833 are implemented
* since the Linux kernel RPC code requires only these.
*/
static struct rpc_procinfo rpcb_procedures2[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
.p_encode = (kxdreproc_t)rpcb_enc_mapping,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
.p_timer = 0,
.p_name = "SET",
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
.p_encode = (kxdreproc_t)rpcb_enc_mapping,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
.p_timer = 0,
.p_name = "UNSET",
},
[RPCBPROC_GETPORT] = {
.p_proc = RPCBPROC_GETPORT,
.p_encode = (kxdreproc_t)rpcb_enc_mapping,
.p_decode = (kxdrdproc_t)rpcb_dec_getport,
.p_arglen = RPCB_mappingargs_sz,
.p_replen = RPCB_getportres_sz,
.p_statidx = RPCBPROC_GETPORT,
.p_timer = 0,
.p_name = "GETPORT",
},
};
static struct rpc_procinfo rpcb_procedures3[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
.p_timer = 0,
.p_name = "SET",
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
.p_timer = 0,
.p_name = "UNSET",
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
.p_timer = 0,
.p_name = "GETADDR",
},
};
static struct rpc_procinfo rpcb_procedures4[] = {
[RPCBPROC_SET] = {
.p_proc = RPCBPROC_SET,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_SET,
.p_timer = 0,
.p_name = "SET",
},
[RPCBPROC_UNSET] = {
.p_proc = RPCBPROC_UNSET,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_set,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_setres_sz,
.p_statidx = RPCBPROC_UNSET,
.p_timer = 0,
.p_name = "UNSET",
},
[RPCBPROC_GETADDR] = {
.p_proc = RPCBPROC_GETADDR,
.p_encode = (kxdreproc_t)rpcb_enc_getaddr,
.p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
.p_arglen = RPCB_getaddrargs_sz,
.p_replen = RPCB_getaddrres_sz,
.p_statidx = RPCBPROC_GETADDR,
.p_timer = 0,
.p_name = "GETADDR",
},
};
static struct rpcb_info rpcb_next_version[] = {
{
.rpc_vers = RPCBVERS_2,
.rpc_proc = &rpcb_procedures2[RPCBPROC_GETPORT],
},
{
.rpc_proc = NULL,
},
};
static struct rpcb_info rpcb_next_version6[] = {
{
.rpc_vers = RPCBVERS_4,
.rpc_proc = &rpcb_procedures4[RPCBPROC_GETADDR],
},
{
.rpc_vers = RPCBVERS_3,
.rpc_proc = &rpcb_procedures3[RPCBPROC_GETADDR],
},
{
.rpc_proc = NULL,
},
};
static struct rpc_version rpcb_version2 = {
.number = RPCBVERS_2,
.nrprocs = ARRAY_SIZE(rpcb_procedures2),
.procs = rpcb_procedures2
};
static struct rpc_version rpcb_version3 = {
.number = RPCBVERS_3,
.nrprocs = ARRAY_SIZE(rpcb_procedures3),
.procs = rpcb_procedures3
};
static struct rpc_version rpcb_version4 = {
.number = RPCBVERS_4,
.nrprocs = ARRAY_SIZE(rpcb_procedures4),
.procs = rpcb_procedures4
};
static struct rpc_version *rpcb_version[] = {
NULL,
NULL,
&rpcb_version2,
&rpcb_version3,
&rpcb_version4
};
static struct rpc_stat rpcb_stats;
static struct rpc_program rpcb_program = {
.name = "rpcbind",
.number = RPCBIND_PROGRAM,
.nrvers = ARRAY_SIZE(rpcb_version),
.version = rpcb_version,
.stats = &rpcb_stats,
};
/**
* cleanup_rpcb_clnt - remove xprtsock's sysctls, unregister
*
*/
void cleanup_rpcb_clnt(void)
{
if (rpcb_local_clnt4)
rpc_shutdown_client(rpcb_local_clnt4);
if (rpcb_local_clnt)
rpc_shutdown_client(rpcb_local_clnt);
}
| gpl-2.0 |
kevin78/linux-kevin | lib/decompress_inflate.c | 994 | 3880 | #ifdef STATIC
/* Pre-boot environment: included */
/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
* errors about console_printk etc... on ARM */
#define _LINUX_KERNEL_H
#include "zlib_inflate/inftrees.c"
#include "zlib_inflate/inffast.c"
#include "zlib_inflate/inflate.c"
#else /* STATIC */
/* initramfs et al: linked */
#include <linux/zutil.h>
#include "zlib_inflate/inftrees.h"
#include "zlib_inflate/inffast.h"
#include "zlib_inflate/inflate.h"
#include "zlib_inflate/infutil.h"
#include <linux/decompress/inflate.h>
#endif /* STATIC */
#include <linux/decompress/mm.h>
#define GZIP_IOBUF_SIZE (16*1024)
static long INIT nofill(void *buffer, unsigned long len)
{
return -1;
}
/* Included from initramfs et al code */
STATIC int INIT gunzip(unsigned char *buf, long len,
long (*fill)(void*, unsigned long),
long (*flush)(void*, unsigned long),
unsigned char *out_buf,
long *pos,
void(*error)(char *x)) {
u8 *zbuf;
struct z_stream_s *strm;
int rc;
size_t out_len;
rc = -1;
if (flush) {
out_len = 0x8000; /* 32 K */
out_buf = malloc(out_len);
} else {
out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
}
if (!out_buf) {
error("Out of memory while allocating output buffer");
goto gunzip_nomem1;
}
if (buf)
zbuf = buf;
else {
zbuf = malloc(GZIP_IOBUF_SIZE);
len = 0;
}
if (!zbuf) {
error("Out of memory while allocating input buffer");
goto gunzip_nomem2;
}
strm = malloc(sizeof(*strm));
if (strm == NULL) {
error("Out of memory while allocating z_stream");
goto gunzip_nomem3;
}
strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
sizeof(struct inflate_state));
if (strm->workspace == NULL) {
error("Out of memory while allocating workspace");
goto gunzip_nomem4;
}
if (!fill)
fill = nofill;
if (len == 0)
len = fill(zbuf, GZIP_IOBUF_SIZE);
/* verify the gzip header */
if (len < 10 ||
zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
if (pos)
*pos = 0;
error("Not a gzip file");
goto gunzip_5;
}
/* skip over gzip header (1f,8b,08... 10 bytes total +
* possible asciz filename)
*/
strm->next_in = zbuf + 10;
strm->avail_in = len - 10;
/* skip over asciz filename */
if (zbuf[3] & 0x8) {
do {
/*
* If the filename doesn't fit into the buffer,
* the file is very probably corrupt. Don't try
* to read more data.
*/
if (strm->avail_in == 0) {
error("header error");
goto gunzip_5;
}
--strm->avail_in;
} while (*strm->next_in++);
}
strm->next_out = out_buf;
strm->avail_out = out_len;
rc = zlib_inflateInit2(strm, -MAX_WBITS);
if (!flush) {
WS(strm)->inflate_state.wsize = 0;
WS(strm)->inflate_state.window = NULL;
}
while (rc == Z_OK) {
if (strm->avail_in == 0) {
/* TODO: handle case where both pos and fill are set */
len = fill(zbuf, GZIP_IOBUF_SIZE);
if (len < 0) {
rc = -1;
error("read error");
break;
}
strm->next_in = zbuf;
strm->avail_in = len;
}
rc = zlib_inflate(strm, 0);
/* Write any data generated */
if (flush && strm->next_out > out_buf) {
long l = strm->next_out - out_buf;
if (l != flush(out_buf, l)) {
rc = -1;
error("write error");
break;
}
strm->next_out = out_buf;
strm->avail_out = out_len;
}
/* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
if (rc == Z_STREAM_END) {
rc = 0;
break;
} else if (rc != Z_OK) {
error("uncompression error");
rc = -1;
}
}
zlib_inflateEnd(strm);
if (pos)
/* add + 8 to skip over trailer */
*pos = strm->next_in - zbuf+8;
gunzip_5:
free(strm->workspace);
gunzip_nomem4:
free(strm);
gunzip_nomem3:
if (!buf)
free(zbuf);
gunzip_nomem2:
if (flush)
free(out_buf);
gunzip_nomem1:
return rc; /* returns Z_OK (0) if successful */
}
#define decompress gunzip
| gpl-2.0 |
btolfa/kernel_tion_pro28 | drivers/serial/cpm_uart/cpm_uart_cpm1.c | 1250 | 4085 | /*
* linux/drivers/serial/cpm_uart.c
*
* Driver for CPM (SCC/SMC) serial ports; CPM1 definitions
*
* Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2)
* Pantelis Antoniou (panto@intracom.gr) (CPM1)
*
* Copyright (C) 2004 Freescale Semiconductor, Inc.
* (C) 2004 Intracom, S.A.
* (C) 2006 MontaVista Software, Inc.
* Vitaly Bordug <vbordug@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/gfp.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/fs_pd.h>
#include <linux/serial_core.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include "cpm_uart.h"
/**************************************************************/
void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
{
cpm_command(port->command, cmd);
}
void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
struct device_node *np)
{
return of_iomap(np, 1);
}
void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram)
{
iounmap(pram);
}
/*
* Allocate DP-Ram and memory buffers. We need to allocate a transmit and
* receive buffer descriptors from dual port ram, and a character
* buffer area from host mem. If we are allocating for the console we need
* to do it from bootmem
*/
int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
{
int dpmemsz, memsz;
u8 *dp_mem;
unsigned long dp_offset;
u8 *mem_addr;
dma_addr_t dma_addr = 0;
pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
dp_offset = cpm_dpalloc(dpmemsz, 8);
if (IS_ERR_VALUE(dp_offset)) {
printk(KERN_ERR
"cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
return -ENOMEM;
}
dp_mem = cpm_dpram_addr(dp_offset);
memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
if (is_con) {
/* was hostalloc but changed cause it blows away the */
/* large tlb mapping when pinning the kernel area */
mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8));
dma_addr = (u32)cpm_dpram_phys(mem_addr);
} else
mem_addr = dma_alloc_coherent(pinfo->port.dev, memsz, &dma_addr,
GFP_KERNEL);
if (mem_addr == NULL) {
cpm_dpfree(dp_offset);
printk(KERN_ERR
"cpm_uart_cpm1.c: could not allocate coherent memory\n");
return -ENOMEM;
}
pinfo->dp_addr = dp_offset;
pinfo->mem_addr = mem_addr; /* virtual address*/
pinfo->dma_addr = dma_addr; /* physical address*/
pinfo->mem_size = memsz;
pinfo->rx_buf = mem_addr;
pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
* pinfo->rx_fifosize);
pinfo->rx_bd_base = (cbd_t __iomem __force *)dp_mem;
pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos;
return 0;
}
void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
{
dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos *
pinfo->rx_fifosize) +
L1_CACHE_ALIGN(pinfo->tx_nrfifos *
pinfo->tx_fifosize), pinfo->mem_addr,
pinfo->dma_addr);
cpm_dpfree(pinfo->dp_addr);
}
| gpl-2.0 |
Silentlys/android_kernel_xiaomi_redmi2 | arch/arm/mach-omap2/omap_device.c | 2018 | 23538 | /*
* omap_device implementation
*
* Copyright (C) 2009-2010 Nokia Corporation
* Paul Walmsley, Kevin Hilman
*
* Developed in collaboration with (alphabetical order): Benoit
* Cousson, Thara Gopinath, Tony Lindgren, Rajendra Nayak, Vikram
* Pandita, Sakari Poussa, Anand Sawant, Santosh Shilimkar, Richard
* Woodruff
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This code provides a consistent interface for OMAP device drivers
* to control power management and interconnect properties of their
* devices.
*
* In the medium- to long-term, this code should be implemented as a
* proper omap_bus/omap_device in Linux, no more platform_data func
* pointers
*
*
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/notifier.h>
#include "soc.h"
#include "omap_device.h"
#include "omap_hwmod.h"
/* Private functions */
static void _add_clkdev(struct omap_device *od, const char *clk_alias,
const char *clk_name)
{
struct clk *r;
struct clk_lookup *l;
if (!clk_alias || !clk_name)
return;
dev_dbg(&od->pdev->dev, "Creating %s -> %s\n", clk_alias, clk_name);
r = clk_get_sys(dev_name(&od->pdev->dev), clk_alias);
if (!IS_ERR(r)) {
dev_warn(&od->pdev->dev,
"alias %s already exists\n", clk_alias);
clk_put(r);
return;
}
r = clk_get(NULL, clk_name);
if (IS_ERR(r)) {
dev_err(&od->pdev->dev,
"clk_get for %s failed\n", clk_name);
return;
}
l = clkdev_alloc(r, clk_alias, dev_name(&od->pdev->dev));
if (!l) {
dev_err(&od->pdev->dev,
"clkdev_alloc for %s failed\n", clk_alias);
return;
}
clkdev_add(l);
}
/**
* _add_hwmod_clocks_clkdev - Add clkdev entry for hwmod optional clocks
* and main clock
* @od: struct omap_device *od
* @oh: struct omap_hwmod *oh
*
* For the main clock and every optional clock present per hwmod per
* omap_device, this function adds an entry in the clkdev table of the
* form <dev-id=dev_name, con-id=role> if it does not exist already.
*
* The function is called from inside omap_device_build_ss(), after
* omap_device_register.
*
* This allows drivers to get a pointer to its optional clocks based on its role
* by calling clk_get(<dev*>, <role>).
* In the case of the main clock, a "fck" alias is used.
*
* No return value.
*/
static void _add_hwmod_clocks_clkdev(struct omap_device *od,
struct omap_hwmod *oh)
{
int i;
_add_clkdev(od, "fck", oh->main_clk);
for (i = 0; i < oh->opt_clks_cnt; i++)
_add_clkdev(od, oh->opt_clks[i].role, oh->opt_clks[i].clk);
}
/**
* omap_device_build_from_dt - build an omap_device with multiple hwmods
* @pdev_name: name of the platform_device driver to use
* @pdev_id: this platform_device's connection ID
* @oh: ptr to the single omap_hwmod that backs this omap_device
* @pdata: platform_data ptr to associate with the platform_device
* @pdata_len: amount of memory pointed to by @pdata
*
* Function for building an omap_device already registered from device-tree
*
* Returns 0 or PTR_ERR() on error.
*/
static int omap_device_build_from_dt(struct platform_device *pdev)
{
struct omap_hwmod **hwmods;
struct omap_device *od;
struct omap_hwmod *oh;
struct device_node *node = pdev->dev.of_node;
const char *oh_name;
int oh_cnt, i, ret = 0;
oh_cnt = of_property_count_strings(node, "ti,hwmods");
if (oh_cnt <= 0) {
dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n");
return -ENODEV;
}
hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
if (!hwmods) {
ret = -ENOMEM;
goto odbfd_exit;
}
for (i = 0; i < oh_cnt; i++) {
of_property_read_string_index(node, "ti,hwmods", i, &oh_name);
oh = omap_hwmod_lookup(oh_name);
if (!oh) {
dev_err(&pdev->dev, "Cannot lookup hwmod '%s'\n",
oh_name);
ret = -EINVAL;
goto odbfd_exit1;
}
hwmods[i] = oh;
}
od = omap_device_alloc(pdev, hwmods, oh_cnt);
if (!od) {
dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n",
oh_name);
ret = PTR_ERR(od);
goto odbfd_exit1;
}
/* Fix up missing resource names */
for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
if (r->name == NULL)
r->name = dev_name(&pdev->dev);
}
if (of_get_property(node, "ti,no_idle_on_suspend", NULL))
omap_device_disable_idle_on_suspend(pdev);
pdev->dev.pm_domain = &omap_device_pm_domain;
odbfd_exit1:
kfree(hwmods);
odbfd_exit:
return ret;
}
static int _omap_device_notifier_call(struct notifier_block *nb,
unsigned long event, void *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od;
switch (event) {
case BUS_NOTIFY_DEL_DEVICE:
if (pdev->archdata.od)
omap_device_delete(pdev->archdata.od);
break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
/* fall through */
default:
od = to_omap_device(pdev);
if (od)
od->_driver_status = event;
}
return NOTIFY_DONE;
}
/**
* _omap_device_enable_hwmods - call omap_hwmod_enable() on all hwmods
* @od: struct omap_device *od
*
* Enable all underlying hwmods. Returns 0.
*/
static int _omap_device_enable_hwmods(struct omap_device *od)
{
int i;
for (i = 0; i < od->hwmods_cnt; i++)
omap_hwmod_enable(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
}
/**
* _omap_device_idle_hwmods - call omap_hwmod_idle() on all hwmods
* @od: struct omap_device *od
*
* Idle all underlying hwmods. Returns 0.
*/
static int _omap_device_idle_hwmods(struct omap_device *od)
{
int i;
for (i = 0; i < od->hwmods_cnt; i++)
omap_hwmod_idle(od->hwmods[i]);
/* XXX pass along return value here? */
return 0;
}
/* Public functions for use by core code */
/**
* omap_device_get_context_loss_count - get lost context count
* @od: struct omap_device *
*
* Using the primary hwmod, query the context loss count for this
* device.
*
* Callers should consider context for this device lost any time this
* function returns a value different than the value the caller got
* the last time it called this function.
*
* If any hwmods exist for the omap_device assoiated with @pdev,
* return the context loss counter for that hwmod, otherwise return
* zero.
*/
int omap_device_get_context_loss_count(struct platform_device *pdev)
{
struct omap_device *od;
u32 ret = 0;
od = to_omap_device(pdev);
if (od->hwmods_cnt)
ret = omap_hwmod_get_context_loss_count(od->hwmods[0]);
return ret;
}
/**
* omap_device_count_resources - count number of struct resource entries needed
* @od: struct omap_device *
* @flags: Type of resources to include when counting (IRQ/DMA/MEM)
*
* Count the number of struct resource entries needed for this
* omap_device @od. Used by omap_device_build_ss() to determine how
* much memory to allocate before calling
* omap_device_fill_resources(). Returns the count.
*/
static int omap_device_count_resources(struct omap_device *od,
unsigned long flags)
{
int c = 0;
int i;
for (i = 0; i < od->hwmods_cnt; i++)
c += omap_hwmod_count_resources(od->hwmods[i], flags);
pr_debug("omap_device: %s: counted %d total resources across %d hwmods\n",
od->pdev->name, c, od->hwmods_cnt);
return c;
}
/**
* omap_device_fill_resources - fill in array of struct resource
* @od: struct omap_device *
* @res: pointer to an array of struct resource to be filled in
*
* Populate one or more empty struct resource pointed to by @res with
* the resource data for this omap_device @od. Used by
* omap_device_build_ss() after calling omap_device_count_resources().
* Ideally this function would not be needed at all. If omap_device
* replaces platform_device, then we can specify our own
* get_resource()/ get_irq()/etc functions that use the underlying
* omap_hwmod information. Or if platform_device is extended to use
* subarchitecture-specific function pointers, the various
* platform_device functions can simply call omap_device internal
* functions to get device resources. Hacking around the existing
* platform_device code wastes memory. Returns 0.
*/
static int omap_device_fill_resources(struct omap_device *od,
struct resource *res)
{
int i, r;
for (i = 0; i < od->hwmods_cnt; i++) {
r = omap_hwmod_fill_resources(od->hwmods[i], res);
res += r;
}
return 0;
}
/**
* _od_fill_dma_resources - fill in array of struct resource with dma resources
* @od: struct omap_device *
* @res: pointer to an array of struct resource to be filled in
*
* Populate one or more empty struct resource pointed to by @res with
* the dma resource data for this omap_device @od. Used by
* omap_device_alloc() after calling omap_device_count_resources().
*
* Ideally this function would not be needed at all. If we have
* mechanism to get dma resources from DT.
*
* Returns 0.
*/
static int _od_fill_dma_resources(struct omap_device *od,
struct resource *res)
{
int i, r;
for (i = 0; i < od->hwmods_cnt; i++) {
r = omap_hwmod_fill_dma_resources(od->hwmods[i], res);
res += r;
}
return 0;
}
/**
* omap_device_alloc - allocate an omap_device
* @pdev: platform_device that will be included in this omap_device
* @oh: ptr to the single omap_hwmod that backs this omap_device
* @pdata: platform_data ptr to associate with the platform_device
* @pdata_len: amount of memory pointed to by @pdata
*
* Convenience function for allocating an omap_device structure and filling
* hwmods, and resources.
*
* Returns an struct omap_device pointer or ERR_PTR() on error;
*/
struct omap_device *omap_device_alloc(struct platform_device *pdev,
struct omap_hwmod **ohs, int oh_cnt)
{
int ret = -ENOMEM;
struct omap_device *od;
struct resource *res = NULL;
int i, res_count;
struct omap_hwmod **hwmods;
od = kzalloc(sizeof(struct omap_device), GFP_KERNEL);
if (!od) {
ret = -ENOMEM;
goto oda_exit1;
}
od->hwmods_cnt = oh_cnt;
hwmods = kmemdup(ohs, sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
if (!hwmods)
goto oda_exit2;
od->hwmods = hwmods;
od->pdev = pdev;
/*
* Non-DT Boot:
* Here, pdev->num_resources = 0, and we should get all the
* resources from hwmod.
*
* DT Boot:
* OF framework will construct the resource structure (currently
* does for MEM & IRQ resource) and we should respect/use these
* resources, killing hwmod dependency.
* If pdev->num_resources > 0, we assume that MEM & IRQ resources
* have been allocated by OF layer already (through DTB).
* As preparation for the future we examine the OF provided resources
* to see if we have DMA resources provided already. In this case
* there is no need to update the resources for the device, we use the
* OF provided ones.
*
* TODO: Once DMA resource is available from OF layer, we should
* kill filling any resources from hwmod.
*/
if (!pdev->num_resources) {
/* Count all resources for the device */
res_count = omap_device_count_resources(od, IORESOURCE_IRQ |
IORESOURCE_DMA |
IORESOURCE_MEM);
} else {
/* Take a look if we already have DMA resource via DT */
for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
/* We have it, no need to touch the resources */
if (r->flags == IORESOURCE_DMA)
goto have_everything;
}
/* Count only DMA resources for the device */
res_count = omap_device_count_resources(od, IORESOURCE_DMA);
/* The device has no DMA resource, no need for update */
if (!res_count)
goto have_everything;
res_count += pdev->num_resources;
}
/* Allocate resources memory to account for new resources */
res = kzalloc(sizeof(struct resource) * res_count, GFP_KERNEL);
if (!res)
goto oda_exit3;
if (!pdev->num_resources) {
dev_dbg(&pdev->dev, "%s: using %d resources from hwmod\n",
__func__, res_count);
omap_device_fill_resources(od, res);
} else {
dev_dbg(&pdev->dev,
"%s: appending %d DMA resources from hwmod\n",
__func__, res_count - pdev->num_resources);
memcpy(res, pdev->resource,
sizeof(struct resource) * pdev->num_resources);
_od_fill_dma_resources(od, &res[pdev->num_resources]);
}
ret = platform_device_add_resources(pdev, res, res_count);
kfree(res);
if (ret)
goto oda_exit3;
have_everything:
pdev->archdata.od = od;
for (i = 0; i < oh_cnt; i++) {
hwmods[i]->od = od;
_add_hwmod_clocks_clkdev(od, hwmods[i]);
}
return od;
oda_exit3:
kfree(hwmods);
oda_exit2:
kfree(od);
oda_exit1:
dev_err(&pdev->dev, "omap_device: build failed (%d)\n", ret);
return ERR_PTR(ret);
}
void omap_device_delete(struct omap_device *od)
{
if (!od)
return;
od->pdev->archdata.od = NULL;
kfree(od->hwmods);
kfree(od);
}
/**
* omap_device_build - build and register an omap_device with one omap_hwmod
* @pdev_name: name of the platform_device driver to use
* @pdev_id: this platform_device's connection ID
* @oh: ptr to the single omap_hwmod that backs this omap_device
* @pdata: platform_data ptr to associate with the platform_device
* @pdata_len: amount of memory pointed to by @pdata
*
* Convenience function for building and registering a single
* omap_device record, which in turn builds and registers a
* platform_device record. See omap_device_build_ss() for more
* information. Returns ERR_PTR(-EINVAL) if @oh is NULL; otherwise,
* passes along the return value of omap_device_build_ss().
*/
struct platform_device __init *omap_device_build(const char *pdev_name,
int pdev_id,
struct omap_hwmod *oh,
void *pdata, int pdata_len)
{
struct omap_hwmod *ohs[] = { oh };
if (!oh)
return ERR_PTR(-EINVAL);
return omap_device_build_ss(pdev_name, pdev_id, ohs, 1, pdata,
pdata_len);
}
/**
* omap_device_build_ss - build and register an omap_device with multiple hwmods
* @pdev_name: name of the platform_device driver to use
* @pdev_id: this platform_device's connection ID
* @oh: ptr to the single omap_hwmod that backs this omap_device
* @pdata: platform_data ptr to associate with the platform_device
* @pdata_len: amount of memory pointed to by @pdata
*
* Convenience function for building and registering an omap_device
* subsystem record. Subsystem records consist of multiple
* omap_hwmods. This function in turn builds and registers a
* platform_device record. Returns an ERR_PTR() on error, or passes
* along the return value of omap_device_register().
*/
struct platform_device __init *omap_device_build_ss(const char *pdev_name,
int pdev_id,
struct omap_hwmod **ohs,
int oh_cnt, void *pdata,
int pdata_len)
{
int ret = -ENOMEM;
struct platform_device *pdev;
struct omap_device *od;
if (!ohs || oh_cnt == 0 || !pdev_name)
return ERR_PTR(-EINVAL);
if (!pdata && pdata_len > 0)
return ERR_PTR(-EINVAL);
pdev = platform_device_alloc(pdev_name, pdev_id);
if (!pdev) {
ret = -ENOMEM;
goto odbs_exit;
}
/* Set the dev_name early to allow dev_xxx in omap_device_alloc */
if (pdev->id != -1)
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
else
dev_set_name(&pdev->dev, "%s", pdev->name);
od = omap_device_alloc(pdev, ohs, oh_cnt);
if (IS_ERR(od))
goto odbs_exit1;
ret = platform_device_add_data(pdev, pdata, pdata_len);
if (ret)
goto odbs_exit2;
ret = omap_device_register(pdev);
if (ret)
goto odbs_exit2;
return pdev;
odbs_exit2:
omap_device_delete(od);
odbs_exit1:
platform_device_put(pdev);
odbs_exit:
pr_err("omap_device: %s: build failed (%d)\n", pdev_name, ret);
return ERR_PTR(ret);
}
#ifdef CONFIG_PM_RUNTIME
static int _od_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
int ret;
ret = pm_generic_runtime_suspend(dev);
if (!ret)
omap_device_idle(pdev);
return ret;
}
static int _od_runtime_idle(struct device *dev)
{
return pm_generic_runtime_idle(dev);
}
static int _od_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
omap_device_enable(pdev);
return pm_generic_runtime_resume(dev);
}
#endif
#ifdef CONFIG_SUSPEND
static int _od_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
int ret;
/* Don't attempt late suspend on a driver that is not bound */
if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER)
return 0;
ret = pm_generic_suspend_noirq(dev);
if (!ret && !pm_runtime_status_suspended(dev)) {
if (pm_generic_runtime_suspend(dev) == 0) {
if (!(od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND))
omap_device_idle(pdev);
od->flags |= OMAP_DEVICE_SUSPENDED;
}
}
return ret;
}
static int _od_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
!pm_runtime_status_suspended(dev)) {
od->flags &= ~OMAP_DEVICE_SUSPENDED;
if (!(od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND))
omap_device_enable(pdev);
pm_generic_runtime_resume(dev);
}
return pm_generic_resume_noirq(dev);
}
#else
#define _od_suspend_noirq NULL
#define _od_resume_noirq NULL
#endif
struct dev_pm_domain omap_device_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
_od_runtime_idle)
USE_PLATFORM_PM_SLEEP_OPS
.suspend_noirq = _od_suspend_noirq,
.resume_noirq = _od_resume_noirq,
}
};
/**
* omap_device_register - register an omap_device with one omap_hwmod
* @od: struct omap_device * to register
*
* Register the omap_device structure. This currently just calls
* platform_device_register() on the underlying platform_device.
* Returns the return value of platform_device_register().
*/
int omap_device_register(struct platform_device *pdev)
{
pr_debug("omap_device: %s: registering\n", pdev->name);
pdev->dev.pm_domain = &omap_device_pm_domain;
return platform_device_add(pdev);
}
/* Public functions for use by device drivers through struct platform_data */
/**
* omap_device_enable - fully activate an omap_device
* @od: struct omap_device * to activate
*
* Do whatever is necessary for the hwmods underlying omap_device @od
* to be accessible and ready to operate. This generally involves
* enabling clocks, setting SYSCONFIG registers; and in the future may
* involve remuxing pins. Device drivers should call this function
* indirectly via pm_runtime_get*(). Returns -EINVAL if called when
* the omap_device is already enabled, or passes along the return
* value of _omap_device_enable_hwmods().
*/
int omap_device_enable(struct platform_device *pdev)
{
int ret;
struct omap_device *od;
od = to_omap_device(pdev);
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
dev_warn(&pdev->dev,
"omap_device: %s() called from invalid state %d\n",
__func__, od->_state);
return -EINVAL;
}
ret = _omap_device_enable_hwmods(od);
od->_state = OMAP_DEVICE_STATE_ENABLED;
return ret;
}
/**
* omap_device_idle - idle an omap_device
* @od: struct omap_device * to idle
*
* Idle omap_device @od. Device drivers call this function indirectly
* via pm_runtime_put*(). Returns -EINVAL if the omap_device is not
* currently enabled, or passes along the return value of
* _omap_device_idle_hwmods().
*/
int omap_device_idle(struct platform_device *pdev)
{
int ret;
struct omap_device *od;
od = to_omap_device(pdev);
if (od->_state != OMAP_DEVICE_STATE_ENABLED) {
dev_warn(&pdev->dev,
"omap_device: %s() called from invalid state %d\n",
__func__, od->_state);
return -EINVAL;
}
ret = _omap_device_idle_hwmods(od);
od->_state = OMAP_DEVICE_STATE_IDLE;
return ret;
}
/**
* omap_device_assert_hardreset - set a device's hardreset line
* @pdev: struct platform_device * to reset
* @name: const char * name of the reset line
*
* Set the hardreset line identified by @name on the IP blocks
* associated with the hwmods backing the platform_device @pdev. All
* of the hwmods associated with @pdev must have the same hardreset
* line linked to them for this to work. Passes along the return value
* of omap_hwmod_assert_hardreset() in the event of any failure, or
* returns 0 upon success.
*/
int omap_device_assert_hardreset(struct platform_device *pdev, const char *name)
{
struct omap_device *od = to_omap_device(pdev);
int ret = 0;
int i;
for (i = 0; i < od->hwmods_cnt; i++) {
ret = omap_hwmod_assert_hardreset(od->hwmods[i], name);
if (ret)
break;
}
return ret;
}
/**
* omap_device_deassert_hardreset - release a device's hardreset line
* @pdev: struct platform_device * to reset
* @name: const char * name of the reset line
*
* Release the hardreset line identified by @name on the IP blocks
* associated with the hwmods backing the platform_device @pdev. All
* of the hwmods associated with @pdev must have the same hardreset
* line linked to them for this to work. Passes along the return
* value of omap_hwmod_deassert_hardreset() in the event of any
* failure, or returns 0 upon success.
*/
int omap_device_deassert_hardreset(struct platform_device *pdev,
const char *name)
{
struct omap_device *od = to_omap_device(pdev);
int ret = 0;
int i;
for (i = 0; i < od->hwmods_cnt; i++) {
ret = omap_hwmod_deassert_hardreset(od->hwmods[i], name);
if (ret)
break;
}
return ret;
}
/**
* omap_device_get_by_hwmod_name() - convert a hwmod name to
* device pointer.
* @oh_name: name of the hwmod device
*
* Returns back a struct device * pointer associated with a hwmod
* device represented by a hwmod_name
*/
struct device *omap_device_get_by_hwmod_name(const char *oh_name)
{
struct omap_hwmod *oh;
if (!oh_name) {
WARN(1, "%s: no hwmod name!\n", __func__);
return ERR_PTR(-EINVAL);
}
oh = omap_hwmod_lookup(oh_name);
if (!oh) {
WARN(1, "%s: no hwmod for %s\n", __func__,
oh_name);
return ERR_PTR(-ENODEV);
}
if (!oh->od) {
WARN(1, "%s: no omap_device for %s\n", __func__,
oh_name);
return ERR_PTR(-ENODEV);
}
return &oh->od->pdev->dev;
}
static struct notifier_block platform_nb = {
.notifier_call = _omap_device_notifier_call,
};
static int __init omap_device_init(void)
{
bus_register_notifier(&platform_bus_type, &platform_nb);
return 0;
}
omap_core_initcall(omap_device_init);
/**
* omap_device_late_idle - idle devices without drivers
* @dev: struct device * associated with omap_device
* @data: unused
*
* Check the driver bound status of this device, and idle it
* if there is no driver attached.
*/
static int __init omap_device_late_idle(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
if (!od)
return 0;
/*
* If omap_device state is enabled, but has no driver bound,
* idle it.
*/
if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
dev_warn(dev, "%s: enabled but no driver. Idling\n",
__func__);
omap_device_idle(pdev);
}
}
return 0;
}
static int __init omap_device_late_init(void)
{
bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle);
return 0;
}
omap_late_initcall_sync(omap_device_late_init);
| gpl-2.0 |
maxwen/primou-kernel-HELLBOY | arch/arm/mach-cns3xxx/core.c | 2530 | 6505 | /*
* Copyright 1999 - 2003 ARM Limited
* Copyright 2000 Deep Blue Solutions Ltd
* Copyright 2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
#include <asm/hardware/gic.h>
#include <mach/cns3xxx.h>
#include "core.h"
static struct map_desc cns3xxx_io_desc[] __initdata = {
{
.virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT,
.pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
.pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
.pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
.pfn = __phys_to_pfn(CNS3XXX_TIMER1_2_3_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_GPIOA_BASE_VIRT,
.pfn = __phys_to_pfn(CNS3XXX_GPIOA_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_GPIOB_BASE_VIRT,
.pfn = __phys_to_pfn(CNS3XXX_GPIOB_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_MISC_BASE_VIRT,
.pfn = __phys_to_pfn(CNS3XXX_MISC_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_PM_BASE_VIRT,
.pfn = __phys_to_pfn(CNS3XXX_PM_BASE),
.length = SZ_4K,
.type = MT_DEVICE,
},
};
void __init cns3xxx_map_io(void)
{
iotable_init(cns3xxx_io_desc, ARRAY_SIZE(cns3xxx_io_desc));
}
/* used by entry-macro.S */
void __init cns3xxx_init_irq(void)
{
gic_init(0, 29, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
__io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
}
void cns3xxx_power_off(void)
{
u32 __iomem *pm_base = __io(CNS3XXX_PM_BASE_VIRT);
u32 clkctrl;
printk(KERN_INFO "powering system down...\n");
clkctrl = readl(pm_base + PM_SYS_CLK_CTRL_OFFSET);
clkctrl &= 0xfffff1ff;
clkctrl |= (0x5 << 9); /* Hibernate */
writel(clkctrl, pm_base + PM_SYS_CLK_CTRL_OFFSET);
}
/*
* Timer
*/
static void __iomem *cns3xxx_tmr1;
static void cns3xxx_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
int pclk = cns3xxx_cpu_clock() / 8;
int reload;
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
reload = pclk * 20 / (3 * HZ) * 0x25000;
writel(reload, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
ctrl |= (1 << 0) | (1 << 2) | (1 << 9);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* period set, and timer enabled in 'next_event' hook */
ctrl |= (1 << 2) | (1 << 9);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
default:
ctrl = 0;
}
writel(ctrl, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
}
static int cns3xxx_timer_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
unsigned long ctrl = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
writel(evt, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
writel(ctrl | (1 << 0), cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
return 0;
}
static struct clock_event_device cns3xxx_tmr1_clockevent = {
.name = "cns3xxx timer1",
.shift = 8,
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = cns3xxx_timer_set_mode,
.set_next_event = cns3xxx_timer_set_next_event,
.rating = 350,
.cpumask = cpu_all_mask,
};
static void __init cns3xxx_clockevents_init(unsigned int timer_irq)
{
cns3xxx_tmr1_clockevent.irq = timer_irq;
cns3xxx_tmr1_clockevent.mult =
div_sc((cns3xxx_cpu_clock() >> 3) * 1000000, NSEC_PER_SEC,
cns3xxx_tmr1_clockevent.shift);
cns3xxx_tmr1_clockevent.max_delta_ns =
clockevent_delta2ns(0xffffffff, &cns3xxx_tmr1_clockevent);
cns3xxx_tmr1_clockevent.min_delta_ns =
clockevent_delta2ns(0xf, &cns3xxx_tmr1_clockevent);
clockevents_register_device(&cns3xxx_tmr1_clockevent);
}
/*
* IRQ handler for the timer
*/
static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &cns3xxx_tmr1_clockevent;
u32 __iomem *stat = cns3xxx_tmr1 + TIMER1_2_INTERRUPT_STATUS_OFFSET;
u32 val;
/* Clear the interrupt */
val = readl(stat);
writel(val & ~(1 << 2), stat);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct irqaction cns3xxx_timer_irq = {
.name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = cns3xxx_timer_interrupt,
};
/*
* Set up the clock source and clock events devices
*/
static void __init __cns3xxx_timer_init(unsigned int timer_irq)
{
u32 val;
u32 irq_mask;
/*
* Initialise to a known state (all timers off)
*/
/* disable timer1 and timer2 */
writel(0, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
/* stop free running timer3 */
writel(0, cns3xxx_tmr1 + TIMER_FREERUN_CONTROL_OFFSET);
/* timer1 */
writel(0x5C800, cns3xxx_tmr1 + TIMER1_COUNTER_OFFSET);
writel(0x5C800, cns3xxx_tmr1 + TIMER1_AUTO_RELOAD_OFFSET);
writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V1_OFFSET);
writel(0, cns3xxx_tmr1 + TIMER1_MATCH_V2_OFFSET);
/* mask irq, non-mask timer1 overflow */
irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
irq_mask &= ~(1 << 2);
irq_mask |= 0x03;
writel(irq_mask, cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
/* down counter */
val = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
val |= (1 << 9);
writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
/* timer2 */
writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V1_OFFSET);
writel(0, cns3xxx_tmr1 + TIMER2_MATCH_V2_OFFSET);
/* mask irq */
irq_mask = readl(cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
irq_mask |= ((1 << 3) | (1 << 4) | (1 << 5));
writel(irq_mask, cns3xxx_tmr1 + TIMER1_2_INTERRUPT_MASK_OFFSET);
/* down counter */
val = readl(cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
val |= (1 << 10);
writel(val, cns3xxx_tmr1 + TIMER1_2_CONTROL_OFFSET);
/* Make irqs happen for the system timer */
setup_irq(timer_irq, &cns3xxx_timer_irq);
cns3xxx_clockevents_init(timer_irq);
}
static void __init cns3xxx_timer_init(void)
{
cns3xxx_tmr1 = __io(CNS3XXX_TIMER1_2_3_BASE_VIRT);
__cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0);
}
struct sys_timer cns3xxx_timer = {
.init = cns3xxx_timer_init,
};
| gpl-2.0 |
WhiteDawn/Whatever-Flo-Android-Kernel | drivers/gpu/drm/nouveau/nouveau_fbcon.c | 3042 | 14816 | /*
* Copyright © 2007 David Airlie
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* David Airlie
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/screen_info.h>
#include <linux/vga_switcheroo.h>
#include <linux/console.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_crtc.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_dma.h"
static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
return;
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&dev_priv->channel->mutex)) {
if (dev_priv->card_type < NV_50)
ret = nv04_fbcon_fillrect(info, rect);
else
if (dev_priv->card_type < NV_C0)
ret = nv50_fbcon_fillrect(info, rect);
else
ret = nvc0_fbcon_fillrect(info, rect);
mutex_unlock(&dev_priv->channel->mutex);
}
if (ret == 0)
return;
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
cfb_fillrect(info, rect);
}
static void
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
return;
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&dev_priv->channel->mutex)) {
if (dev_priv->card_type < NV_50)
ret = nv04_fbcon_copyarea(info, image);
else
if (dev_priv->card_type < NV_C0)
ret = nv50_fbcon_copyarea(info, image);
else
ret = nvc0_fbcon_copyarea(info, image);
mutex_unlock(&dev_priv->channel->mutex);
}
if (ret == 0)
return;
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
cfb_copyarea(info, image);
}
static void
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
if (info->state != FBINFO_STATE_RUNNING)
return;
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
mutex_trylock(&dev_priv->channel->mutex)) {
if (dev_priv->card_type < NV_50)
ret = nv04_fbcon_imageblit(info, image);
else
if (dev_priv->card_type < NV_C0)
ret = nv50_fbcon_imageblit(info, image);
else
ret = nvc0_fbcon_imageblit(info, image);
mutex_unlock(&dev_priv->channel->mutex);
}
if (ret == 0)
return;
if (ret != -ENODEV)
nouveau_fbcon_gpu_lockup(info);
cfb_imageblit(info, image);
}
static int
nouveau_fbcon_sync(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
int ret, i;
if (!chan || !chan->accel_done || in_interrupt() ||
info->state != FBINFO_STATE_RUNNING ||
info->flags & FBINFO_HWACCEL_DISABLED)
return 0;
if (!mutex_trylock(&chan->mutex))
return 0;
ret = RING_SPACE(chan, 4);
if (ret) {
mutex_unlock(&chan->mutex);
nouveau_fbcon_gpu_lockup(info);
return 0;
}
if (dev_priv->card_type >= NV_C0) {
BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
OUT_RING (chan, 0);
BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
OUT_RING (chan, 0);
} else {
BEGIN_RING(chan, 0, 0x0104, 1);
OUT_RING (chan, 0);
BEGIN_RING(chan, 0, 0x0100, 1);
OUT_RING (chan, 0);
}
nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
FIRE_RING(chan);
mutex_unlock(&chan->mutex);
ret = -EBUSY;
for (i = 0; i < 100000; i++) {
if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
ret = 0;
break;
}
DRM_UDELAY(1);
}
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return 0;
}
chan->accel_done = false;
return 0;
}
static struct fb_ops nouveau_fbcon_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = nouveau_fbcon_fillrect,
.fb_copyarea = nouveau_fbcon_copyarea,
.fb_imageblit = nouveau_fbcon_imageblit,
.fb_sync = nouveau_fbcon_sync,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
static struct fb_ops nouveau_fbcon_sw_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
nv_crtc->lut.r[regno] = red;
nv_crtc->lut.g[regno] = green;
nv_crtc->lut.b[regno] = blue;
}
static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
*red = nv_crtc->lut.r[regno];
*green = nv_crtc->lut.g[regno];
*blue = nv_crtc->lut.b[regno];
}
static void
nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
struct fb_info *info = nfbdev->helper.fbdev;
struct fb_fillrect rect;
/* Clear the entire fbcon. The drm will program every connector
* with it's preferred mode. If the sizes differ, one display will
* quite likely have garbage around the console.
*/
rect.dx = rect.dy = 0;
rect.width = info->var.xres_virtual;
rect.height = info->var.yres_virtual;
rect.color = 0;
rect.rop = ROP_COPY;
info->fbops->fb_fillrect(info, &rect);
}
static int
nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = nfbdev->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
struct pci_dev *pdev = dev->pdev;
struct device *device = &pdev->dev;
int size, ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
size = mode_cmd.pitches[0] * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, &nvbo);
if (ret) {
NV_ERROR(dev, "failed to allocate framebuffer\n");
goto out;
}
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
if (ret) {
NV_ERROR(dev, "failed to pin fb: %d\n", ret);
nouveau_bo_ref(NULL, &nvbo);
goto out;
}
ret = nouveau_bo_map(nvbo);
if (ret) {
NV_ERROR(dev, "failed to map fb: %d\n", ret);
nouveau_bo_unpin(nvbo);
nouveau_bo_ref(NULL, &nvbo);
goto out;
}
chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
if (chan && dev_priv->card_type >= NV_50) {
ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
if (ret) {
NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
chan = NULL;
}
}
mutex_lock(&dev->struct_mutex);
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out_unref;
}
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out_unref;
}
info->par = nfbdev;
nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
nouveau_fb = &nfbdev->nouveau_fb;
fb = &nouveau_fb->base;
/* setup helper */
nfbdev->helper.fb = fb;
nfbdev->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (nouveau_nofbaccel)
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
else
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_sw_ops;
info->fix.smem_start = nvbo->bo.mem.bus.base +
nvbo->bo.mem.bus.offset;
info->fix.smem_len = size;
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
info->screen_size = size;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* Set aperture base/size for vesafb takeover */
info->apertures = dev_priv->apertures;
if (!info->apertures) {
ret = -ENOMEM;
goto out_unref;
}
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
mutex_unlock(&dev->struct_mutex);
if (dev_priv->channel && !nouveau_nofbaccel) {
ret = -ENODEV;
if (dev_priv->card_type < NV_50)
ret = nv04_fbcon_accel_init(info);
else
if (dev_priv->card_type < NV_C0)
ret = nv50_fbcon_accel_init(info);
else
ret = nvc0_fbcon_accel_init(info);
if (ret == 0)
info->fbops = &nouveau_fbcon_ops;
}
nouveau_fbcon_zfill(dev, nfbdev);
/* To allow resizeing without swapping buffers */
NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
nouveau_fb->base.width,
nouveau_fb->base.height,
nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unref:
mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
static int
nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
int new_fb = 0;
int ret;
if (!helper->fb) {
ret = nouveau_fbcon_create(nfbdev, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
}
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
struct fb_info *info;
if (nfbdev->helper.fbdev) {
info = nfbdev->helper.fbdev;
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo);
nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
drm_fb_helper_fini(&nfbdev->helper);
drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.gamma_set = nouveau_fbcon_gamma_set,
.gamma_get = nouveau_fbcon_gamma_get,
.fb_probe = nouveau_fbcon_find_or_create_single,
};
int nouveau_fbcon_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fbdev *nfbdev;
int preferred_bpp;
int ret;
nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
if (!nfbdev)
return -ENOMEM;
nfbdev->dev = dev;
dev_priv->nfbdev = nfbdev;
nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
ret = drm_fb_helper_init(dev, &nfbdev->helper,
nv_two_heads(dev) ? 2 : 1, 4);
if (ret) {
kfree(nfbdev);
return ret;
}
drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
if (dev_priv->vram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
else if (dev_priv->vram_size <= 64 * 1024 * 1024)
preferred_bpp = 16;
else
preferred_bpp = 32;
drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
return 0;
}
void nouveau_fbcon_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
if (!dev_priv->nfbdev)
return;
nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
kfree(dev_priv->nfbdev);
dev_priv->nfbdev = NULL;
}
void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
}
void nouveau_fbcon_restore_accel(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
}
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
console_lock();
if (state == 0)
nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
if (state == 1)
nouveau_fbcon_restore_accel(dev);
console_unlock();
}
void nouveau_fbcon_zfill_all(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
}
| gpl-2.0 |
bruce2728/android_kernel_htc_msm8660 | drivers/acpi/acpica/utdebug.c | 3042 | 19311 | /******************************************************************************
*
* Module Name: utdebug - Debug print routines
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdebug")
#ifdef ACPI_DEBUG_OUTPUT
static acpi_thread_id acpi_gbl_prev_thread_id;
static char *acpi_gbl_fn_entry_str = "----Entry";
static char *acpi_gbl_fn_exit_str = "----Exit-";
/* Local prototypes */
static const char *acpi_ut_trim_function_name(const char *function_name);
/*******************************************************************************
*
* FUNCTION: acpi_ut_init_stack_ptr_trace
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Save the current CPU stack pointer at subsystem startup
*
******************************************************************************/
void acpi_ut_init_stack_ptr_trace(void)
{
acpi_size current_sp;
acpi_gbl_entry_stack_pointer = ¤t_sp;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_track_stack_ptr
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Save the current CPU stack pointer
*
******************************************************************************/
void acpi_ut_track_stack_ptr(void)
{
acpi_size current_sp;
if (¤t_sp < acpi_gbl_lowest_stack_pointer) {
acpi_gbl_lowest_stack_pointer = ¤t_sp;
}
if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) {
acpi_gbl_deepest_nesting = acpi_gbl_nesting_level;
}
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_trim_function_name
*
* PARAMETERS: function_name - Ascii string containing a procedure name
*
* RETURN: Updated pointer to the function name
*
* DESCRIPTION: Remove the "Acpi" prefix from the function name, if present.
* This allows compiler macros such as __func__ to be used
* with no change to the debug output.
*
******************************************************************************/
static const char *acpi_ut_trim_function_name(const char *function_name)
{
/* All Function names are longer than 4 chars, check is safe */
if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_MIXED) {
/* This is the case where the original source has not been modified */
return (function_name + 4);
}
if (*(ACPI_CAST_PTR(u32, function_name)) == ACPI_PREFIX_LOWER) {
/* This is the case where the source has been 'linuxized' */
return (function_name + 5);
}
return (function_name);
}
/*******************************************************************************
*
* FUNCTION: acpi_debug_print
*
* PARAMETERS: requested_debug_level - Requested debug print level
* line_number - Caller's line number (for error output)
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
* Format - Printf format field
* ... - Optional printf arguments
*
* RETURN: None
*
* DESCRIPTION: Print error message with prefix consisting of the module name,
* line number, and component ID.
*
******************************************************************************/
void ACPI_INTERNAL_VAR_XFACE
acpi_debug_print(u32 requested_debug_level,
u32 line_number,
const char *function_name,
const char *module_name,
u32 component_id, const char *format, ...)
{
acpi_thread_id thread_id;
va_list args;
/*
* Stay silent if the debug level or component ID is disabled
*/
if (!(requested_debug_level & acpi_dbg_level) ||
!(component_id & acpi_dbg_layer)) {
return;
}
/*
* Thread tracking and context switch notification
*/
thread_id = acpi_os_get_thread_id();
if (thread_id != acpi_gbl_prev_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
("\n**** Context Switch from TID %u to TID %u ****\n\n",
(u32)acpi_gbl_prev_thread_id, (u32)thread_id);
}
acpi_gbl_prev_thread_id = thread_id;
}
/*
* Display the module name, current line number, thread ID (if requested),
* current procedure nesting level, and the current procedure name
*/
acpi_os_printf("%8s-%04ld ", module_name, line_number);
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf("[%u] ", (u32)thread_id);
}
acpi_os_printf("[%02ld] %-22.22s: ",
acpi_gbl_nesting_level,
acpi_ut_trim_function_name(function_name));
va_start(args, format);
acpi_os_vprintf(format, args);
va_end(args);
}
ACPI_EXPORT_SYMBOL(acpi_debug_print)
/*******************************************************************************
*
* FUNCTION: acpi_debug_print_raw
*
* PARAMETERS: requested_debug_level - Requested debug print level
* line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
* Format - Printf format field
* ... - Optional printf arguments
*
* RETURN: None
*
* DESCRIPTION: Print message with no headers. Has same interface as
* debug_print so that the same macros can be used.
*
******************************************************************************/
void ACPI_INTERNAL_VAR_XFACE
acpi_debug_print_raw(u32 requested_debug_level,
u32 line_number,
const char *function_name,
const char *module_name,
u32 component_id, const char *format, ...)
{
va_list args;
if (!(requested_debug_level & acpi_dbg_level) ||
!(component_id & acpi_dbg_layer)) {
return;
}
va_start(args, format);
acpi_os_vprintf(format, args);
va_end(args);
}
ACPI_EXPORT_SYMBOL(acpi_debug_print_raw)
/*******************************************************************************
*
* FUNCTION: acpi_ut_trace
*
* PARAMETERS: line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
*
* RETURN: None
*
* DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
void
acpi_ut_trace(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id)
{
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name, component_id,
"%s\n", acpi_gbl_fn_entry_str);
}
ACPI_EXPORT_SYMBOL(acpi_ut_trace)
/*******************************************************************************
*
* FUNCTION: acpi_ut_trace_ptr
*
* PARAMETERS: line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
* Pointer - Pointer to display
*
* RETURN: None
*
* DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
void
acpi_ut_trace_ptr(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, void *pointer)
{
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name, component_id,
"%s %p\n", acpi_gbl_fn_entry_str, pointer);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_trace_str
*
* PARAMETERS: line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
* String - Additional string to display
*
* RETURN: None
*
* DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
void
acpi_ut_trace_str(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, char *string)
{
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name, component_id,
"%s %s\n", acpi_gbl_fn_entry_str, string);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_trace_u32
*
* PARAMETERS: line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
* Integer - Integer to display
*
* RETURN: None
*
* DESCRIPTION: Function entry trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
void
acpi_ut_trace_u32(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, u32 integer)
{
acpi_gbl_nesting_level++;
acpi_ut_track_stack_ptr();
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name, component_id,
"%s %08X\n", acpi_gbl_fn_entry_str, integer);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_exit
*
* PARAMETERS: line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
*
* RETURN: None
*
* DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level
*
******************************************************************************/
void
acpi_ut_exit(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id)
{
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name, component_id,
"%s\n", acpi_gbl_fn_exit_str);
acpi_gbl_nesting_level--;
}
ACPI_EXPORT_SYMBOL(acpi_ut_exit)
/*******************************************************************************
*
* FUNCTION: acpi_ut_status_exit
*
* PARAMETERS: line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
* Status - Exit status code
*
* RETURN: None
*
* DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit status also.
*
******************************************************************************/
void
acpi_ut_status_exit(u32 line_number,
const char *function_name,
const char *module_name,
u32 component_id, acpi_status status)
{
if (ACPI_SUCCESS(status)) {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
component_id, "%s %s\n", acpi_gbl_fn_exit_str,
acpi_format_exception(status));
} else {
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name,
component_id, "%s ****Exception****: %s\n",
acpi_gbl_fn_exit_str,
acpi_format_exception(status));
}
acpi_gbl_nesting_level--;
}
ACPI_EXPORT_SYMBOL(acpi_ut_status_exit)
/*******************************************************************************
*
* FUNCTION: acpi_ut_value_exit
*
* PARAMETERS: line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
* Value - Value to be printed with exit msg
*
* RETURN: None
*
* DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit value also.
*
******************************************************************************/
void
acpi_ut_value_exit(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, u64 value)
{
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name, component_id,
"%s %8.8X%8.8X\n", acpi_gbl_fn_exit_str,
ACPI_FORMAT_UINT64(value));
acpi_gbl_nesting_level--;
}
ACPI_EXPORT_SYMBOL(acpi_ut_value_exit)
/*******************************************************************************
*
* FUNCTION: acpi_ut_ptr_exit
*
* PARAMETERS: line_number - Caller's line number
* function_name - Caller's procedure name
* module_name - Caller's module name
* component_id - Caller's component ID
* Ptr - Pointer to display
*
* RETURN: None
*
* DESCRIPTION: Function exit trace. Prints only if TRACE_FUNCTIONS bit is
* set in debug_level. Prints exit value also.
*
******************************************************************************/
void
acpi_ut_ptr_exit(u32 line_number,
const char *function_name,
const char *module_name, u32 component_id, u8 *ptr)
{
acpi_debug_print(ACPI_LV_FUNCTIONS,
line_number, function_name, module_name, component_id,
"%s %p\n", acpi_gbl_fn_exit_str, ptr);
acpi_gbl_nesting_level--;
}
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ut_dump_buffer
*
* PARAMETERS: Buffer - Buffer to dump
* Count - Amount to dump, in bytes
* Display - BYTE, WORD, DWORD, or QWORD display
* component_iD - Caller's component ID
*
* RETURN: None
*
* DESCRIPTION: Generic dump buffer in both hex and ascii.
*
******************************************************************************/
void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
{
u32 i = 0;
u32 j;
u32 temp32;
u8 buf_char;
if (!buffer) {
acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
return;
}
if ((count < 4) || (count & 0x01)) {
display = DB_BYTE_DISPLAY;
}
/* Nasty little dump buffer routine! */
while (i < count) {
/* Print current offset */
acpi_os_printf("%6.4X: ", i);
/* Print 16 hex chars */
for (j = 0; j < 16;) {
if (i + j >= count) {
/* Dump fill spaces */
acpi_os_printf("%*s", ((display * 2) + 1), " ");
j += display;
continue;
}
switch (display) {
case DB_BYTE_DISPLAY:
default: /* Default is BYTE display */
acpi_os_printf("%02X ",
buffer[(acpi_size) i + j]);
break;
case DB_WORD_DISPLAY:
ACPI_MOVE_16_TO_32(&temp32,
&buffer[(acpi_size) i + j]);
acpi_os_printf("%04X ", temp32);
break;
case DB_DWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
&buffer[(acpi_size) i + j]);
acpi_os_printf("%08X ", temp32);
break;
case DB_QWORD_DISPLAY:
ACPI_MOVE_32_TO_32(&temp32,
&buffer[(acpi_size) i + j]);
acpi_os_printf("%08X", temp32);
ACPI_MOVE_32_TO_32(&temp32,
&buffer[(acpi_size) i + j +
4]);
acpi_os_printf("%08X ", temp32);
break;
}
j += display;
}
/*
* Print the ASCII equivalent characters but watch out for the bad
* unprintable ones (printable chars are 0x20 through 0x7E)
*/
acpi_os_printf(" ");
for (j = 0; j < 16; j++) {
if (i + j >= count) {
acpi_os_printf("\n");
return;
}
buf_char = buffer[(acpi_size) i + j];
if (ACPI_IS_PRINT(buf_char)) {
acpi_os_printf("%c", buf_char);
} else {
acpi_os_printf(".");
}
}
/* Done with that line. */
acpi_os_printf("\n");
i += 16;
}
return;
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_dump_buffer
*
* PARAMETERS: Buffer - Buffer to dump
* Count - Amount to dump, in bytes
* Display - BYTE, WORD, DWORD, or QWORD display
* component_iD - Caller's component ID
*
* RETURN: None
*
* DESCRIPTION: Generic dump buffer in both hex and ascii.
*
******************************************************************************/
void acpi_ut_dump_buffer(u8 * buffer, u32 count, u32 display, u32 component_id)
{
/* Only dump the buffer if tracing is enabled */
if (!((ACPI_LV_TABLES & acpi_dbg_level) &&
(component_id & acpi_dbg_layer))) {
return;
}
acpi_ut_dump_buffer2(buffer, count, display);
}
| gpl-2.0 |
olexiyt/telechips-linux | drivers/media/video/hexium_orion.c | 3042 | 12980 | /*
hexium_orion.c - v4l2 driver for the Hexium Orion frame grabber cards
Visit http://www.mihu.de/linux/saa7146/ and follow the link
to "hexium" for further details about this card.
Copyright (C) 2003 Michael Hunold <michael@mihu.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define DEBUG_VARIABLE debug
#include <media/saa7146_vv.h>
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "debug verbosity");
/* global variables */
static int hexium_num;
#define HEXIUM_HV_PCI6_ORION 1
#define HEXIUM_ORION_1SVHS_3BNC 2
#define HEXIUM_ORION_4BNC 3
#define HEXIUM_INPUTS 9
static struct v4l2_input hexium_inputs[HEXIUM_INPUTS] = {
{ 0, "CVBS 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 1, "CVBS 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 2, "CVBS 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 3, "CVBS 4", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 4, "CVBS 5", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 5, "CVBS 6", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 6, "Y/C 1", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 7, "Y/C 2", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
{ 8, "Y/C 3", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG|V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD },
};
#define HEXIUM_AUDIOS 0
struct hexium_data
{
s8 adr;
u8 byte;
};
struct hexium
{
int type;
struct video_device *video_dev;
struct i2c_adapter i2c_adapter;
int cur_input; /* current input */
};
/* Philips SAA7110 decoder default registers */
static u8 hexium_saa7110[53]={
/*00*/ 0x4C,0x3C,0x0D,0xEF,0xBD,0xF0,0x00,0x00,
/*08*/ 0xF8,0xF8,0x60,0x60,0x40,0x86,0x18,0x90,
/*10*/ 0x00,0x2C,0x40,0x46,0x42,0x1A,0xFF,0xDA,
/*18*/ 0xF0,0x8B,0x00,0x00,0x00,0x00,0x00,0x00,
/*20*/ 0xD9,0x17,0x40,0x41,0x80,0x41,0x80,0x4F,
/*28*/ 0xFE,0x01,0x0F,0x0F,0x03,0x01,0x81,0x03,
/*30*/ 0x44,0x75,0x01,0x8C,0x03
};
static struct {
struct hexium_data data[8];
} hexium_input_select[] = {
{
{ /* cvbs 1 */
{ 0x06, 0x00 },
{ 0x20, 0xD9 },
{ 0x21, 0x17 }, // 0x16,
{ 0x22, 0x40 },
{ 0x2C, 0x03 },
{ 0x30, 0x44 },
{ 0x31, 0x75 }, // ??
{ 0x21, 0x16 }, // 0x03,
}
}, {
{ /* cvbs 2 */
{ 0x06, 0x00 },
{ 0x20, 0x78 },
{ 0x21, 0x07 }, // 0x03,
{ 0x22, 0xD2 },
{ 0x2C, 0x83 },
{ 0x30, 0x60 },
{ 0x31, 0xB5 }, // ?
{ 0x21, 0x03 },
}
}, {
{ /* cvbs 3 */
{ 0x06, 0x00 },
{ 0x20, 0xBA },
{ 0x21, 0x07 }, // 0x05,
{ 0x22, 0x91 },
{ 0x2C, 0x03 },
{ 0x30, 0x60 },
{ 0x31, 0xB5 }, // ??
{ 0x21, 0x05 }, // 0x03,
}
}, {
{ /* cvbs 4 */
{ 0x06, 0x00 },
{ 0x20, 0xD8 },
{ 0x21, 0x17 }, // 0x16,
{ 0x22, 0x40 },
{ 0x2C, 0x03 },
{ 0x30, 0x44 },
{ 0x31, 0x75 }, // ??
{ 0x21, 0x16 }, // 0x03,
}
}, {
{ /* cvbs 5 */
{ 0x06, 0x00 },
{ 0x20, 0xB8 },
{ 0x21, 0x07 }, // 0x05,
{ 0x22, 0x91 },
{ 0x2C, 0x03 },
{ 0x30, 0x60 },
{ 0x31, 0xB5 }, // ??
{ 0x21, 0x05 }, // 0x03,
}
}, {
{ /* cvbs 6 */
{ 0x06, 0x00 },
{ 0x20, 0x7C },
{ 0x21, 0x07 }, // 0x03
{ 0x22, 0xD2 },
{ 0x2C, 0x83 },
{ 0x30, 0x60 },
{ 0x31, 0xB5 }, // ??
{ 0x21, 0x03 },
}
}, {
{ /* y/c 1 */
{ 0x06, 0x80 },
{ 0x20, 0x59 },
{ 0x21, 0x17 },
{ 0x22, 0x42 },
{ 0x2C, 0xA3 },
{ 0x30, 0x44 },
{ 0x31, 0x75 },
{ 0x21, 0x12 },
}
}, {
{ /* y/c 2 */
{ 0x06, 0x80 },
{ 0x20, 0x9A },
{ 0x21, 0x17 },
{ 0x22, 0xB1 },
{ 0x2C, 0x13 },
{ 0x30, 0x60 },
{ 0x31, 0xB5 },
{ 0x21, 0x14 },
}
}, {
{ /* y/c 3 */
{ 0x06, 0x80 },
{ 0x20, 0x3C },
{ 0x21, 0x27 },
{ 0x22, 0xC1 },
{ 0x2C, 0x23 },
{ 0x30, 0x44 },
{ 0x31, 0x75 },
{ 0x21, 0x21 },
}
}
};
static struct saa7146_standard hexium_standards[] = {
{
.name = "PAL", .id = V4L2_STD_PAL,
.v_offset = 16, .v_field = 288,
.h_offset = 1, .h_pixels = 680,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "NTSC", .id = V4L2_STD_NTSC,
.v_offset = 16, .v_field = 240,
.h_offset = 1, .h_pixels = 640,
.v_max_out = 480, .h_max_out = 640,
}, {
.name = "SECAM", .id = V4L2_STD_SECAM,
.v_offset = 16, .v_field = 288,
.h_offset = 1, .h_pixels = 720,
.v_max_out = 576, .h_max_out = 768,
}
};
/* this is only called for old HV-PCI6/Orion cards
without eeprom */
static int hexium_probe(struct saa7146_dev *dev)
{
struct hexium *hexium = NULL;
union i2c_smbus_data data;
int err = 0;
DEB_EE((".\n"));
/* there are no hexium orion cards with revision 0 saa7146s */
if (0 == dev->revision) {
return -EFAULT;
}
hexium = kzalloc(sizeof(struct hexium), GFP_KERNEL);
if (NULL == hexium) {
printk("hexium_orion: hexium_probe: not enough kernel memory.\n");
return -ENOMEM;
}
/* enable i2c-port pins */
saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
saa7146_write(dev, DD1_INIT, 0x01000100);
saa7146_write(dev, DD1_STREAM_B, 0x00000000);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
hexium->i2c_adapter = (struct i2c_adapter) {
.name = "hexium orion",
};
saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
DEB_S(("cannot register i2c-device. skipping.\n"));
kfree(hexium);
return -EFAULT;
}
/* set SAA7110 control GPIO 0 */
saa7146_setgpio(dev, 0, SAA7146_GPIO_OUTHI);
/* set HWControl GPIO number 2 */
saa7146_setgpio(dev, 2, SAA7146_GPIO_OUTHI);
mdelay(10);
/* detect newer Hexium Orion cards by subsystem ids */
if (0x17c8 == dev->pci->subsystem_vendor && 0x0101 == dev->pci->subsystem_device) {
printk("hexium_orion: device is a Hexium Orion w/ 1 SVHS + 3 BNC inputs.\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
hexium->type = HEXIUM_ORION_1SVHS_3BNC;
return 0;
}
if (0x17c8 == dev->pci->subsystem_vendor && 0x2101 == dev->pci->subsystem_device) {
printk("hexium_orion: device is a Hexium Orion w/ 4 BNC inputs.\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
hexium->type = HEXIUM_ORION_4BNC;
return 0;
}
/* check if this is an old hexium Orion card by looking at
a saa7110 at address 0x4e */
if (0 == (err = i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_READ, 0x00, I2C_SMBUS_BYTE_DATA, &data))) {
printk("hexium_orion: device is a Hexium HV-PCI6/Orion (old).\n");
/* we store the pointer in our private data field */
dev->ext_priv = hexium;
hexium->type = HEXIUM_HV_PCI6_ORION;
return 0;
}
i2c_del_adapter(&hexium->i2c_adapter);
kfree(hexium);
return -EFAULT;
}
/* bring hardware to a sane state. this has to be done, just in case someone
wants to capture from this device before it has been properly initialized.
the capture engine would badly fail, because no valid signal arrives on the
saa7146, thus leading to timeouts and stuff. */
static int hexium_init_done(struct saa7146_dev *dev)
{
struct hexium *hexium = (struct hexium *) dev->ext_priv;
union i2c_smbus_data data;
int i = 0;
DEB_D(("hexium_init_done called.\n"));
/* initialize the helper ics to useful values */
for (i = 0; i < sizeof(hexium_saa7110); i++) {
data.byte = hexium_saa7110[i];
if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_WRITE, i, I2C_SMBUS_BYTE_DATA, &data)) {
printk("hexium_orion: failed for address 0x%02x\n", i);
}
}
return 0;
}
static int hexium_set_input(struct hexium *hexium, int input)
{
union i2c_smbus_data data;
int i = 0;
DEB_D((".\n"));
for (i = 0; i < 8; i++) {
int adr = hexium_input_select[input].data[i].adr;
data.byte = hexium_input_select[input].data[i].byte;
if (0 != i2c_smbus_xfer(&hexium->i2c_adapter, 0x4e, 0, I2C_SMBUS_WRITE, adr, I2C_SMBUS_BYTE_DATA, &data)) {
return -1;
}
printk("%d: 0x%02x => 0x%02x\n",input, adr,data.byte);
}
return 0;
}
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
DEB_EE(("VIDIOC_ENUMINPUT %d.\n", i->index));
if (i->index >= HEXIUM_INPUTS)
return -EINVAL;
memcpy(i, &hexium_inputs[i->index], sizeof(struct v4l2_input));
DEB_D(("v4l2_ioctl: VIDIOC_ENUMINPUT %d.\n", i->index));
return 0;
}
static int vidioc_g_input(struct file *file, void *fh, unsigned int *input)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct hexium *hexium = (struct hexium *) dev->ext_priv;
*input = hexium->cur_input;
DEB_D(("VIDIOC_G_INPUT: %d\n", *input));
return 0;
}
static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
{
struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev;
struct hexium *hexium = (struct hexium *) dev->ext_priv;
if (input >= HEXIUM_INPUTS)
return -EINVAL;
hexium->cur_input = input;
hexium_set_input(hexium, input);
return 0;
}
static struct saa7146_ext_vv vv_data;
/* this function only gets called when the probing was successful */
static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info)
{
struct hexium *hexium = (struct hexium *) dev->ext_priv;
DEB_EE((".\n"));
saa7146_vv_init(dev, &vv_data);
vv_data.ops.vidioc_enum_input = vidioc_enum_input;
vv_data.ops.vidioc_g_input = vidioc_g_input;
vv_data.ops.vidioc_s_input = vidioc_s_input;
if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
return -1;
}
printk("hexium_orion: found 'hexium orion' frame grabber-%d.\n", hexium_num);
hexium_num++;
/* the rest */
hexium->cur_input = 0;
hexium_init_done(dev);
return 0;
}
static int hexium_detach(struct saa7146_dev *dev)
{
struct hexium *hexium = (struct hexium *) dev->ext_priv;
DEB_EE(("dev:%p\n", dev));
saa7146_unregister_device(&hexium->video_dev, dev);
saa7146_vv_release(dev);
hexium_num--;
i2c_del_adapter(&hexium->i2c_adapter);
kfree(hexium);
return 0;
}
static int std_callback(struct saa7146_dev *dev, struct saa7146_standard *std)
{
return 0;
}
static struct saa7146_extension extension;
static struct saa7146_pci_extension_data hexium_hv_pci6 = {
.ext_priv = "Hexium HV-PCI6 / Orion",
.ext = &extension,
};
static struct saa7146_pci_extension_data hexium_orion_1svhs_3bnc = {
.ext_priv = "Hexium HV-PCI6 / Orion (1 SVHS/3 BNC)",
.ext = &extension,
};
static struct saa7146_pci_extension_data hexium_orion_4bnc = {
.ext_priv = "Hexium HV-PCI6 / Orion (4 BNC)",
.ext = &extension,
};
static struct pci_device_id pci_tbl[] = {
{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7146,
.subvendor = 0x0000,
.subdevice = 0x0000,
.driver_data = (unsigned long) &hexium_hv_pci6,
},
{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7146,
.subvendor = 0x17c8,
.subdevice = 0x0101,
.driver_data = (unsigned long) &hexium_orion_1svhs_3bnc,
},
{
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7146,
.subvendor = 0x17c8,
.subdevice = 0x2101,
.driver_data = (unsigned long) &hexium_orion_4bnc,
},
{
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_ext_vv vv_data = {
.inputs = HEXIUM_INPUTS,
.capabilities = 0,
.stds = &hexium_standards[0],
.num_stds = sizeof(hexium_standards) / sizeof(struct saa7146_standard),
.std_callback = &std_callback,
};
static struct saa7146_extension extension = {
.name = "hexium HV-PCI6 Orion",
.flags = 0, // SAA7146_USE_I2C_IRQ,
.pci_tbl = &pci_tbl[0],
.module = THIS_MODULE,
.probe = hexium_probe,
.attach = hexium_attach,
.detach = hexium_detach,
.irq_mask = 0,
.irq_func = NULL,
};
static int __init hexium_init_module(void)
{
if (0 != saa7146_register_extension(&extension)) {
DEB_S(("failed to register extension.\n"));
return -ENODEV;
}
return 0;
}
static void __exit hexium_cleanup_module(void)
{
saa7146_unregister_extension(&extension);
}
module_init(hexium_init_module);
module_exit(hexium_cleanup_module);
MODULE_DESCRIPTION("video4linux-2 driver for Hexium Orion frame grabber cards");
MODULE_AUTHOR("Michael Hunold <michael@mihu.de>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/android_kernel_sony_msm8x60 | fs/fcntl.c | 4322 | 19144 | /*
* linux/fs/fcntl.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/syscalls.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/capability.h>
#include <linux/dnotify.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pipe_fs_i.h>
#include <linux/security.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
#include <asm/poll.h>
#include <asm/siginfo.h>
#include <asm/uaccess.h>
void set_close_on_exec(unsigned int fd, int flag)
{
struct files_struct *files = current->files;
struct fdtable *fdt;
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (flag)
__set_close_on_exec(fd, fdt);
else
__clear_close_on_exec(fd, fdt);
spin_unlock(&files->file_lock);
}
static bool get_close_on_exec(unsigned int fd)
{
struct files_struct *files = current->files;
struct fdtable *fdt;
bool res;
rcu_read_lock();
fdt = files_fdtable(files);
res = close_on_exec(fd, fdt);
rcu_read_unlock();
return res;
}
SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
{
int err = -EBADF;
struct file * file, *tofree;
struct files_struct * files = current->files;
struct fdtable *fdt;
if ((flags & ~O_CLOEXEC) != 0)
return -EINVAL;
if (unlikely(oldfd == newfd))
return -EINVAL;
spin_lock(&files->file_lock);
err = expand_files(files, newfd);
file = fcheck(oldfd);
if (unlikely(!file))
goto Ebadf;
if (unlikely(err < 0)) {
if (err == -EMFILE)
goto Ebadf;
goto out_unlock;
}
/*
* We need to detect attempts to do dup2() over allocated but still
* not finished descriptor. NB: OpenBSD avoids that at the price of
* extra work in their equivalent of fget() - they insert struct
* file immediately after grabbing descriptor, mark it larval if
* more work (e.g. actual opening) is needed and make sure that
* fget() treats larval files as absent. Potentially interesting,
* but while extra work in fget() is trivial, locking implications
* and amount of surgery on open()-related paths in VFS are not.
* FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
* deadlocks in rather amusing ways, AFAICS. All of that is out of
* scope of POSIX or SUS, since neither considers shared descriptor
* tables and this condition does not arise without those.
*/
err = -EBUSY;
fdt = files_fdtable(files);
tofree = fdt->fd[newfd];
if (!tofree && fd_is_open(newfd, fdt))
goto out_unlock;
get_file(file);
rcu_assign_pointer(fdt->fd[newfd], file);
__set_open_fd(newfd, fdt);
if (flags & O_CLOEXEC)
__set_close_on_exec(newfd, fdt);
else
__clear_close_on_exec(newfd, fdt);
spin_unlock(&files->file_lock);
if (tofree)
filp_close(tofree, files);
return newfd;
Ebadf:
err = -EBADF;
out_unlock:
spin_unlock(&files->file_lock);
return err;
}
SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
{
if (unlikely(newfd == oldfd)) { /* corner case */
struct files_struct *files = current->files;
int retval = oldfd;
rcu_read_lock();
if (!fcheck_files(files, oldfd))
retval = -EBADF;
rcu_read_unlock();
return retval;
}
return sys_dup3(oldfd, newfd, 0);
}
SYSCALL_DEFINE1(dup, unsigned int, fildes)
{
int ret = -EBADF;
struct file *file = fget_raw(fildes);
if (file) {
ret = get_unused_fd();
if (ret >= 0)
fd_install(ret, file);
else
fput(file);
}
return ret;
}
#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
static int setfl(int fd, struct file * filp, unsigned long arg)
{
struct inode * inode = filp->f_path.dentry->d_inode;
int error = 0;
/*
* O_APPEND cannot be cleared if the file is marked as append-only
* and the file is open for write.
*/
if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
return -EPERM;
/* O_NOATIME can only be set by the owner or superuser */
if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
if (!inode_owner_or_capable(inode))
return -EPERM;
/* required for strict SunOS emulation */
if (O_NONBLOCK != O_NDELAY)
if (arg & O_NDELAY)
arg |= O_NONBLOCK;
if (arg & O_DIRECT) {
if (!filp->f_mapping || !filp->f_mapping->a_ops ||
!filp->f_mapping->a_ops->direct_IO)
return -EINVAL;
}
if (filp->f_op && filp->f_op->check_flags)
error = filp->f_op->check_flags(arg);
if (error)
return error;
/*
* ->fasync() is responsible for setting the FASYNC bit.
*/
if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op &&
filp->f_op->fasync) {
error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
if (error < 0)
goto out;
if (error > 0)
error = 0;
}
spin_lock(&filp->f_lock);
filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
spin_unlock(&filp->f_lock);
out:
return error;
}
static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
{
write_lock_irq(&filp->f_owner.lock);
if (force || !filp->f_owner.pid) {
put_pid(filp->f_owner.pid);
filp->f_owner.pid = get_pid(pid);
filp->f_owner.pid_type = type;
if (pid) {
const struct cred *cred = current_cred();
filp->f_owner.uid = cred->uid;
filp->f_owner.euid = cred->euid;
}
}
write_unlock_irq(&filp->f_owner.lock);
}
int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
int force)
{
int err;
err = security_file_set_fowner(filp);
if (err)
return err;
f_modown(filp, pid, type, force);
return 0;
}
EXPORT_SYMBOL(__f_setown);
int f_setown(struct file *filp, unsigned long arg, int force)
{
enum pid_type type;
struct pid *pid;
int who = arg;
int result;
type = PIDTYPE_PID;
if (who < 0) {
type = PIDTYPE_PGID;
who = -who;
}
rcu_read_lock();
pid = find_vpid(who);
result = __f_setown(filp, pid, type, force);
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL(f_setown);
void f_delown(struct file *filp)
{
f_modown(filp, NULL, PIDTYPE_PID, 1);
}
pid_t f_getown(struct file *filp)
{
pid_t pid;
read_lock(&filp->f_owner.lock);
pid = pid_vnr(filp->f_owner.pid);
if (filp->f_owner.pid_type == PIDTYPE_PGID)
pid = -pid;
read_unlock(&filp->f_owner.lock);
return pid;
}
static int f_setown_ex(struct file *filp, unsigned long arg)
{
struct f_owner_ex * __user owner_p = (void * __user)arg;
struct f_owner_ex owner;
struct pid *pid;
int type;
int ret;
ret = copy_from_user(&owner, owner_p, sizeof(owner));
if (ret)
return -EFAULT;
switch (owner.type) {
case F_OWNER_TID:
type = PIDTYPE_MAX;
break;
case F_OWNER_PID:
type = PIDTYPE_PID;
break;
case F_OWNER_PGRP:
type = PIDTYPE_PGID;
break;
default:
return -EINVAL;
}
rcu_read_lock();
pid = find_vpid(owner.pid);
if (owner.pid && !pid)
ret = -ESRCH;
else
ret = __f_setown(filp, pid, type, 1);
rcu_read_unlock();
return ret;
}
static int f_getown_ex(struct file *filp, unsigned long arg)
{
struct f_owner_ex * __user owner_p = (void * __user)arg;
struct f_owner_ex owner;
int ret = 0;
read_lock(&filp->f_owner.lock);
owner.pid = pid_vnr(filp->f_owner.pid);
switch (filp->f_owner.pid_type) {
case PIDTYPE_MAX:
owner.type = F_OWNER_TID;
break;
case PIDTYPE_PID:
owner.type = F_OWNER_PID;
break;
case PIDTYPE_PGID:
owner.type = F_OWNER_PGRP;
break;
default:
WARN_ON(1);
ret = -EINVAL;
break;
}
read_unlock(&filp->f_owner.lock);
if (!ret) {
ret = copy_to_user(owner_p, &owner, sizeof(owner));
if (ret)
ret = -EFAULT;
}
return ret;
}
static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
struct file *filp)
{
long err = -EINVAL;
switch (cmd) {
case F_DUPFD:
case F_DUPFD_CLOEXEC:
if (arg >= rlimit(RLIMIT_NOFILE))
break;
err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
if (err >= 0) {
get_file(filp);
fd_install(err, filp);
}
break;
case F_GETFD:
err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
break;
case F_SETFD:
err = 0;
set_close_on_exec(fd, arg & FD_CLOEXEC);
break;
case F_GETFL:
err = filp->f_flags;
break;
case F_SETFL:
err = setfl(fd, filp, arg);
break;
case F_GETLK:
err = fcntl_getlk(filp, (struct flock __user *) arg);
break;
case F_SETLK:
case F_SETLKW:
err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
break;
case F_GETOWN:
/*
* XXX If f_owner is a process group, the
* negative return value will get converted
* into an error. Oops. If we keep the
* current syscall conventions, the only way
* to fix this will be in libc.
*/
err = f_getown(filp);
force_successful_syscall_return();
break;
case F_SETOWN:
err = f_setown(filp, arg, 1);
break;
case F_GETOWN_EX:
err = f_getown_ex(filp, arg);
break;
case F_SETOWN_EX:
err = f_setown_ex(filp, arg);
break;
case F_GETSIG:
err = filp->f_owner.signum;
break;
case F_SETSIG:
/* arg == 0 restores default behaviour. */
if (!valid_signal(arg)) {
break;
}
err = 0;
filp->f_owner.signum = arg;
break;
case F_GETLEASE:
err = fcntl_getlease(filp);
break;
case F_SETLEASE:
err = fcntl_setlease(fd, filp, arg);
break;
case F_NOTIFY:
err = fcntl_dirnotify(fd, filp, arg);
break;
case F_SETPIPE_SZ:
case F_GETPIPE_SZ:
err = pipe_fcntl(filp, cmd, arg);
break;
default:
break;
}
return err;
}
static int check_fcntl_cmd(unsigned cmd)
{
switch (cmd) {
case F_DUPFD:
case F_DUPFD_CLOEXEC:
case F_GETFD:
case F_SETFD:
case F_GETFL:
return 1;
}
return 0;
}
SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
{
struct file *filp;
long err = -EBADF;
filp = fget_raw(fd);
if (!filp)
goto out;
if (unlikely(filp->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd)) {
fput(filp);
goto out;
}
}
err = security_file_fcntl(filp, cmd, arg);
if (err) {
fput(filp);
return err;
}
err = do_fcntl(fd, cmd, arg, filp);
fput(filp);
out:
return err;
}
#if BITS_PER_LONG == 32
SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
unsigned long, arg)
{
struct file * filp;
long err;
err = -EBADF;
filp = fget_raw(fd);
if (!filp)
goto out;
if (unlikely(filp->f_mode & FMODE_PATH)) {
if (!check_fcntl_cmd(cmd)) {
fput(filp);
goto out;
}
}
err = security_file_fcntl(filp, cmd, arg);
if (err) {
fput(filp);
return err;
}
err = -EBADF;
switch (cmd) {
case F_GETLK64:
err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
break;
case F_SETLK64:
case F_SETLKW64:
err = fcntl_setlk64(fd, filp, cmd,
(struct flock64 __user *) arg);
break;
default:
err = do_fcntl(fd, cmd, arg, filp);
break;
}
fput(filp);
out:
return err;
}
#endif
/* Table to convert sigio signal codes into poll band bitmaps */
static const long band_table[NSIGPOLL] = {
POLLIN | POLLRDNORM, /* POLL_IN */
POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
POLLERR, /* POLL_ERR */
POLLPRI | POLLRDBAND, /* POLL_PRI */
POLLHUP | POLLERR /* POLL_HUP */
};
static inline int sigio_perm(struct task_struct *p,
struct fown_struct *fown, int sig)
{
const struct cred *cred;
int ret;
rcu_read_lock();
cred = __task_cred(p);
ret = ((fown->euid == 0 ||
fown->euid == cred->suid || fown->euid == cred->uid ||
fown->uid == cred->suid || fown->uid == cred->uid) &&
!security_file_send_sigiotask(p, fown, sig));
rcu_read_unlock();
return ret;
}
static void send_sigio_to_task(struct task_struct *p,
struct fown_struct *fown,
int fd, int reason, int group)
{
/*
* F_SETSIG can change ->signum lockless in parallel, make
* sure we read it once and use the same value throughout.
*/
int signum = ACCESS_ONCE(fown->signum);
if (!sigio_perm(p, fown, signum))
return;
switch (signum) {
siginfo_t si;
default:
/* Queue a rt signal with the appropriate fd as its
value. We use SI_SIGIO as the source, not
SI_KERNEL, since kernel signals always get
delivered even if we can't queue. Failure to
queue in this case _should_ be reported; we fall
back to SIGIO in that case. --sct */
si.si_signo = signum;
si.si_errno = 0;
si.si_code = reason;
/* Make sure we are called with one of the POLL_*
reasons, otherwise we could leak kernel stack into
userspace. */
BUG_ON((reason & __SI_MASK) != __SI_POLL);
if (reason - POLL_IN >= NSIGPOLL)
si.si_band = ~0L;
else
si.si_band = band_table[reason - POLL_IN];
si.si_fd = fd;
if (!do_send_sig_info(signum, &si, p, group))
break;
/* fall-through: fall back on the old plain SIGIO signal */
case 0:
do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, group);
}
}
void send_sigio(struct fown_struct *fown, int fd, int band)
{
struct task_struct *p;
enum pid_type type;
struct pid *pid;
int group = 1;
read_lock(&fown->lock);
type = fown->pid_type;
if (type == PIDTYPE_MAX) {
group = 0;
type = PIDTYPE_PID;
}
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
read_lock(&tasklist_lock);
do_each_pid_task(pid, type, p) {
send_sigio_to_task(p, fown, fd, band, group);
} while_each_pid_task(pid, type, p);
read_unlock(&tasklist_lock);
out_unlock_fown:
read_unlock(&fown->lock);
}
static void send_sigurg_to_task(struct task_struct *p,
struct fown_struct *fown, int group)
{
if (sigio_perm(p, fown, SIGURG))
do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, group);
}
int send_sigurg(struct fown_struct *fown)
{
struct task_struct *p;
enum pid_type type;
struct pid *pid;
int group = 1;
int ret = 0;
read_lock(&fown->lock);
type = fown->pid_type;
if (type == PIDTYPE_MAX) {
group = 0;
type = PIDTYPE_PID;
}
pid = fown->pid;
if (!pid)
goto out_unlock_fown;
ret = 1;
read_lock(&tasklist_lock);
do_each_pid_task(pid, type, p) {
send_sigurg_to_task(p, fown, group);
} while_each_pid_task(pid, type, p);
read_unlock(&tasklist_lock);
out_unlock_fown:
read_unlock(&fown->lock);
return ret;
}
static DEFINE_SPINLOCK(fasync_lock);
static struct kmem_cache *fasync_cache __read_mostly;
static void fasync_free_rcu(struct rcu_head *head)
{
kmem_cache_free(fasync_cache,
container_of(head, struct fasync_struct, fa_rcu));
}
/*
* Remove a fasync entry. If successfully removed, return
* positive and clear the FASYNC flag. If no entry exists,
* do nothing and return 0.
*
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
*
*/
int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
int result = 0;
spin_lock(&filp->f_lock);
spin_lock(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file != filp)
continue;
spin_lock_irq(&fa->fa_lock);
fa->fa_file = NULL;
spin_unlock_irq(&fa->fa_lock);
*fp = fa->fa_next;
call_rcu(&fa->fa_rcu, fasync_free_rcu);
filp->f_flags &= ~FASYNC;
result = 1;
break;
}
spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
struct fasync_struct *fasync_alloc(void)
{
return kmem_cache_alloc(fasync_cache, GFP_KERNEL);
}
/*
* NOTE! This can be used only for unused fasync entries:
* entries that actually got inserted on the fasync list
* need to be released by rcu - see fasync_remove_entry.
*/
void fasync_free(struct fasync_struct *new)
{
kmem_cache_free(fasync_cache, new);
}
/*
* Insert a new entry into the fasync list. Return the pointer to the
* old one if we didn't use the new one.
*
* NOTE! It is very important that the FASYNC flag always
* match the state "is the filp on a fasync list".
*/
struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new)
{
struct fasync_struct *fa, **fp;
spin_lock(&filp->f_lock);
spin_lock(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
if (fa->fa_file != filp)
continue;
spin_lock_irq(&fa->fa_lock);
fa->fa_fd = fd;
spin_unlock_irq(&fa->fa_lock);
goto out;
}
spin_lock_init(&new->fa_lock);
new->magic = FASYNC_MAGIC;
new->fa_file = filp;
new->fa_fd = fd;
new->fa_next = *fapp;
rcu_assign_pointer(*fapp, new);
filp->f_flags |= FASYNC;
out:
spin_unlock(&fasync_lock);
spin_unlock(&filp->f_lock);
return fa;
}
/*
* Add a fasync entry. Return negative on error, positive if
* added, and zero if did nothing but change an existing one.
*/
static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *new;
new = fasync_alloc();
if (!new)
return -ENOMEM;
/*
* fasync_insert_entry() returns the old (update) entry if
* it existed.
*
* So free the (unused) new entry and return 0 to let the
* caller know that we didn't add any new fasync entries.
*/
if (fasync_insert_entry(fd, filp, fapp, new)) {
fasync_free(new);
return 0;
}
return 1;
}
/*
* fasync_helper() is used by almost all character device drivers
* to set up the fasync queue, and for regular files by the file
* lease code. It returns negative on error, 0 if it did no changes
* and positive if it added/deleted the entry.
*/
int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
{
if (!on)
return fasync_remove_entry(filp, fapp);
return fasync_add_entry(fd, filp, fapp);
}
EXPORT_SYMBOL(fasync_helper);
/*
* rcu_read_lock() is held
*/
static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
{
while (fa) {
struct fown_struct *fown;
unsigned long flags;
if (fa->magic != FASYNC_MAGIC) {
printk(KERN_ERR "kill_fasync: bad magic number in "
"fasync_struct!\n");
return;
}
spin_lock_irqsave(&fa->fa_lock, flags);
if (fa->fa_file) {
fown = &fa->fa_file->f_owner;
/* Don't send SIGURG to processes which have not set a
queued signum: SIGURG has its own default signalling
mechanism. */
if (!(sig == SIGURG && fown->signum == 0))
send_sigio(fown, fa->fa_fd, band);
}
spin_unlock_irqrestore(&fa->fa_lock, flags);
fa = rcu_dereference(fa->fa_next);
}
}
void kill_fasync(struct fasync_struct **fp, int sig, int band)
{
/* First a quick test without locking: usually
* the list is empty.
*/
if (*fp) {
rcu_read_lock();
kill_fasync_rcu(rcu_dereference(*fp), sig, band);
rcu_read_unlock();
}
}
EXPORT_SYMBOL(kill_fasync);
static int __init fcntl_init(void)
{
/*
* Please add new bits here to ensure allocation uniqueness.
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
__FMODE_EXEC | O_PATH
));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
return 0;
}
module_init(fcntl_init)
| gpl-2.0 |
songbaby/linux | drivers/platform/x86/intel_scu_ipcutil.c | 8674 | 3046 | /*
* intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
*
* (C) Copyright 2008-2010 Intel Corporation
* Author: Sreedhara DS (sreedhara.ds@intel.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*
* This driver provides ioctl interfaces to call intel scu ipc driver api
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/fcntl.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <asm/intel_scu_ipc.h>
static int major;
/* ioctl commnds */
#define INTE_SCU_IPC_REGISTER_READ 0
#define INTE_SCU_IPC_REGISTER_WRITE 1
#define INTE_SCU_IPC_REGISTER_UPDATE 2
struct scu_ipc_data {
u32 count; /* No. of registers */
u16 addr[5]; /* Register addresses */
u8 data[5]; /* Register data */
u8 mask; /* Valid for read-modify-write */
};
/**
* scu_reg_access - implement register access ioctls
* @cmd: command we are doing (read/write/update)
* @data: kernel copy of ioctl data
*
* Allow the user to perform register accesses on the SCU via the
* kernel interface
*/
static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
{
int count = data->count;
if (count == 0 || count == 3 || count > 4)
return -EINVAL;
switch (cmd) {
case INTE_SCU_IPC_REGISTER_READ:
return intel_scu_ipc_readv(data->addr, data->data, count);
case INTE_SCU_IPC_REGISTER_WRITE:
return intel_scu_ipc_writev(data->addr, data->data, count);
case INTE_SCU_IPC_REGISTER_UPDATE:
return intel_scu_ipc_update_register(data->addr[0],
data->data[0], data->mask);
default:
return -ENOTTY;
}
}
/**
* scu_ipc_ioctl - control ioctls for the SCU
* @fp: file handle of the SCU device
* @cmd: ioctl coce
* @arg: pointer to user passed structure
*
* Support the I/O and firmware flashing interfaces of the SCU
*/
static long scu_ipc_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
int ret;
struct scu_ipc_data data;
void __user *argp = (void __user *)arg;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
if (copy_from_user(&data, argp, sizeof(struct scu_ipc_data)))
return -EFAULT;
ret = scu_reg_access(cmd, &data);
if (ret < 0)
return ret;
if (copy_to_user(argp, &data, sizeof(struct scu_ipc_data)))
return -EFAULT;
return 0;
}
static const struct file_operations scu_ipc_fops = {
.unlocked_ioctl = scu_ipc_ioctl,
};
static int __init ipc_module_init(void)
{
major = register_chrdev(0, "intel_mid_scu", &scu_ipc_fops);
if (major < 0)
return major;
return 0;
}
static void __exit ipc_module_exit(void)
{
unregister_chrdev(major, "intel_mid_scu");
}
module_init(ipc_module_init);
module_exit(ipc_module_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Utility driver for intel scu ipc");
MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
| gpl-2.0 |
jderrick/linux-block | arch/sh/kernel/cpu/sh3/clock-sh7705.c | 9186 | 2170 | /*
* arch/sh/kernel/cpu/sh3/clock-sh7705.c
*
* SH7705 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* FRQCR parsing hacked out of arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
/*
* SH7705 uses the same divisors as the generic SH-3 case, it's just the
* FRQCR layout that is a bit different..
*/
static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
}
static struct sh_clk_ops sh7705_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = __raw_readw(FRQCR) & 0x0003;
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7705_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
return clk->parent->rate / stc_multipliers[idx];
}
static struct sh_clk_ops sh7705_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7705_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7705_clk_ops[] = {
&sh7705_master_clk_ops,
&sh7705_module_clk_ops,
&sh7705_bus_clk_ops,
&sh7705_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7705_clk_ops))
*ops = sh7705_clk_ops[idx];
}
| gpl-2.0 |
LuckJC/cubie-linux | arch/sh/kernel/cpu/sh3/clock-sh7705.c | 9186 | 2170 | /*
* arch/sh/kernel/cpu/sh3/clock-sh7705.c
*
* SH7705 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* FRQCR parsing hacked out of arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
/*
* SH7705 uses the same divisors as the generic SH-3 case, it's just the
* FRQCR layout that is a bit different..
*/
static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
}
static struct sh_clk_ops sh7705_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = __raw_readw(FRQCR) & 0x0003;
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7705_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
return clk->parent->rate / stc_multipliers[idx];
}
static struct sh_clk_ops sh7705_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7705_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7705_clk_ops[] = {
&sh7705_master_clk_ops,
&sh7705_module_clk_ops,
&sh7705_bus_clk_ops,
&sh7705_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7705_clk_ops))
*ops = sh7705_clk_ops[idx];
}
| gpl-2.0 |
LeeDroid-/LeeDrOiD-Hima-M9 | arch/sh/kernel/cpu/sh4/clock-sh4.c | 9186 | 1997 | /*
* arch/sh/kernel/cpu/sh4/clock-sh4.c
*
* Generic SH-4 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* FRQCR parsing hacked out of arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
static int ifc_divisors[] = { 1, 2, 3, 4, 6, 8, 1, 1 };
#define bfc_divisors ifc_divisors /* Same */
static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007];
}
static struct sh_clk_ops sh4_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0007);
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh4_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) >> 3) & 0x0007;
return clk->parent->rate / bfc_divisors[idx];
}
static struct sh_clk_ops sh4_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) >> 6) & 0x0007;
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh4_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh4_clk_ops[] = {
&sh4_master_clk_ops,
&sh4_module_clk_ops,
&sh4_bus_clk_ops,
&sh4_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh4_clk_ops))
*ops = sh4_clk_ops[idx];
}
| gpl-2.0 |
redmi/android_kernel_HM2014811 | arch/um/drivers/mconsole_user.c | 9698 | 5347 | /*
* Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org)
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/uio.h>
#include <sys/un.h>
#include "mconsole.h"
static struct mconsole_command commands[] = {
/*
* With uts namespaces, uts information becomes process-specific, so
* we need a process context. If we try handling this in interrupt
* context, we may hit an exiting process without a valid uts
* namespace.
*/
{ "version", mconsole_version, MCONSOLE_PROC },
{ "halt", mconsole_halt, MCONSOLE_PROC },
{ "reboot", mconsole_reboot, MCONSOLE_PROC },
{ "config", mconsole_config, MCONSOLE_PROC },
{ "remove", mconsole_remove, MCONSOLE_PROC },
{ "sysrq", mconsole_sysrq, MCONSOLE_INTR },
{ "help", mconsole_help, MCONSOLE_INTR },
{ "cad", mconsole_cad, MCONSOLE_INTR },
{ "stop", mconsole_stop, MCONSOLE_PROC },
{ "go", mconsole_go, MCONSOLE_INTR },
{ "log", mconsole_log, MCONSOLE_INTR },
{ "proc", mconsole_proc, MCONSOLE_PROC },
{ "stack", mconsole_stack, MCONSOLE_INTR },
};
/* Initialized in mconsole_init, which is an initcall */
char mconsole_socket_name[256];
static int mconsole_reply_v0(struct mc_request *req, char *reply)
{
struct iovec iov;
struct msghdr msg;
iov.iov_base = reply;
iov.iov_len = strlen(reply);
msg.msg_name = &(req->origin);
msg.msg_namelen = req->originlen;
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
return sendmsg(req->originating_fd, &msg, 0);
}
static struct mconsole_command *mconsole_parse(struct mc_request *req)
{
struct mconsole_command *cmd;
int i;
for (i = 0; i < ARRAY_SIZE(commands); i++) {
cmd = &commands[i];
if (!strncmp(req->request.data, cmd->command,
strlen(cmd->command))) {
return cmd;
}
}
return NULL;
}
#define MIN(a,b) ((a)<(b) ? (a):(b))
#define STRINGX(x) #x
#define STRING(x) STRINGX(x)
int mconsole_get_request(int fd, struct mc_request *req)
{
int len;
req->originlen = sizeof(req->origin);
req->len = recvfrom(fd, &req->request, sizeof(req->request), 0,
(struct sockaddr *) req->origin, &req->originlen);
if (req->len < 0)
return 0;
req->originating_fd = fd;
if (req->request.magic != MCONSOLE_MAGIC) {
/* Unversioned request */
len = MIN(sizeof(req->request.data) - 1,
strlen((char *) &req->request));
memmove(req->request.data, &req->request, len);
req->request.data[len] = '\0';
req->request.magic = MCONSOLE_MAGIC;
req->request.version = 0;
req->request.len = len;
mconsole_reply_v0(req, "ERR Version 0 mconsole clients are "
"not supported by this driver");
return 0;
}
if (req->request.len >= MCONSOLE_MAX_DATA) {
mconsole_reply(req, "Request too large", 1, 0);
return 0;
}
if (req->request.version != MCONSOLE_VERSION) {
mconsole_reply(req, "This driver only supports version "
STRING(MCONSOLE_VERSION) " clients", 1, 0);
}
req->request.data[req->request.len] = '\0';
req->cmd = mconsole_parse(req);
if (req->cmd == NULL) {
mconsole_reply(req, "Unknown command", 1, 0);
return 0;
}
return 1;
}
int mconsole_reply_len(struct mc_request *req, const char *str, int total,
int err, int more)
{
/*
* XXX This is a stack consumption problem. It'd be nice to
* make it global and serialize access to it, but there are a
* ton of callers to this function.
*/
struct mconsole_reply reply;
int len, n;
do {
reply.err = err;
/* err can only be true on the first packet */
err = 0;
len = MIN(total, MCONSOLE_MAX_DATA - 1);
if (len == total) reply.more = more;
else reply.more = 1;
memcpy(reply.data, str, len);
reply.data[len] = '\0';
total -= len;
str += len;
reply.len = len + 1;
len = sizeof(reply) + reply.len - sizeof(reply.data);
n = sendto(req->originating_fd, &reply, len, 0,
(struct sockaddr *) req->origin, req->originlen);
if (n < 0)
return -errno;
} while (total > 0);
return 0;
}
int mconsole_reply(struct mc_request *req, const char *str, int err, int more)
{
return mconsole_reply_len(req, str, strlen(str), err, more);
}
int mconsole_unlink_socket(void)
{
unlink(mconsole_socket_name);
return 0;
}
static int notify_sock = -1;
int mconsole_notify(char *sock_name, int type, const void *data, int len)
{
struct sockaddr_un target;
struct mconsole_notify packet;
int n, err = 0;
lock_notify();
if (notify_sock < 0) {
notify_sock = socket(PF_UNIX, SOCK_DGRAM, 0);
if (notify_sock < 0) {
err = -errno;
printk(UM_KERN_ERR "mconsole_notify - socket failed, "
"errno = %d\n", errno);
}
}
unlock_notify();
if (err)
return err;
target.sun_family = AF_UNIX;
strcpy(target.sun_path, sock_name);
packet.magic = MCONSOLE_MAGIC;
packet.version = MCONSOLE_VERSION;
packet.type = type;
len = (len > sizeof(packet.data)) ? sizeof(packet.data) : len;
packet.len = len;
memcpy(packet.data, data, len);
err = 0;
len = sizeof(packet) + packet.len - sizeof(packet.data);
n = sendto(notify_sock, &packet, len, 0, (struct sockaddr *) &target,
sizeof(target));
if (n < 0) {
err = -errno;
printk(UM_KERN_ERR "mconsole_notify - sendto failed, "
"errno = %d\n", errno);
}
return err;
}
| gpl-2.0 |
p500-ics-cm9/caf-msm-kernel | drivers/media/dvb/mantis/mantis_vp3030.c | 11234 | 2653 | /*
Mantis VP-3030 driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "zl10353.h"
#include "tda665x.h"
#include "mantis_common.h"
#include "mantis_ioc.h"
#include "mantis_dvb.h"
#include "mantis_vp3030.h"
struct zl10353_config mantis_vp3030_config = {
.demod_address = 0x0f,
};
struct tda665x_config env57h12d5_config = {
.name = "ENV57H12D5 (ET-50DT)",
.addr = 0x60,
.frequency_min = 47000000,
.frequency_max = 862000000,
.frequency_offst = 3616667,
.ref_multiplier = 6, /* 1/6 MHz */
.ref_divider = 100000, /* 1/6 MHz */
};
#define MANTIS_MODEL_NAME "VP-3030"
#define MANTIS_DEV_TYPE "DVB-T"
static int vp3030_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
{
struct i2c_adapter *adapter = &mantis->adapter;
struct mantis_hwconfig *config = mantis->hwconfig;
int err = 0;
mantis_gpio_set_bits(mantis, config->reset, 0);
msleep(100);
err = mantis_frontend_power(mantis, POWER_ON);
msleep(100);
mantis_gpio_set_bits(mantis, config->reset, 1);
if (err == 0) {
msleep(250);
dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
fe = dvb_attach(zl10353_attach, &mantis_vp3030_config, adapter);
if (!fe)
return -1;
dvb_attach(tda665x_attach, fe, &env57h12d5_config, adapter);
} else {
dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
adapter->name,
err);
return -EIO;
}
mantis->fe = fe;
dprintk(MANTIS_ERROR, 1, "Done!");
return 0;
}
struct mantis_hwconfig vp3030_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_188,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
.frontend_init = vp3030_frontend_init,
.power = GPIF_A12,
.reset = GPIF_A13,
.i2c_mode = MANTIS_BYTE_MODE
};
| gpl-2.0 |
santod/NuK3rn3l_m7_sense_lollipop | drivers/media/dvb/mantis/mantis_vp3030.c | 11234 | 2653 | /*
Mantis VP-3030 driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "zl10353.h"
#include "tda665x.h"
#include "mantis_common.h"
#include "mantis_ioc.h"
#include "mantis_dvb.h"
#include "mantis_vp3030.h"
struct zl10353_config mantis_vp3030_config = {
.demod_address = 0x0f,
};
struct tda665x_config env57h12d5_config = {
.name = "ENV57H12D5 (ET-50DT)",
.addr = 0x60,
.frequency_min = 47000000,
.frequency_max = 862000000,
.frequency_offst = 3616667,
.ref_multiplier = 6, /* 1/6 MHz */
.ref_divider = 100000, /* 1/6 MHz */
};
#define MANTIS_MODEL_NAME "VP-3030"
#define MANTIS_DEV_TYPE "DVB-T"
static int vp3030_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
{
struct i2c_adapter *adapter = &mantis->adapter;
struct mantis_hwconfig *config = mantis->hwconfig;
int err = 0;
mantis_gpio_set_bits(mantis, config->reset, 0);
msleep(100);
err = mantis_frontend_power(mantis, POWER_ON);
msleep(100);
mantis_gpio_set_bits(mantis, config->reset, 1);
if (err == 0) {
msleep(250);
dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
fe = dvb_attach(zl10353_attach, &mantis_vp3030_config, adapter);
if (!fe)
return -1;
dvb_attach(tda665x_attach, fe, &env57h12d5_config, adapter);
} else {
dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
adapter->name,
err);
return -EIO;
}
mantis->fe = fe;
dprintk(MANTIS_ERROR, 1, "Done!");
return 0;
}
struct mantis_hwconfig vp3030_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_188,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
.frontend_init = vp3030_frontend_init,
.power = GPIF_A12,
.reset = GPIF_A13,
.i2c_mode = MANTIS_BYTE_MODE
};
| gpl-2.0 |
tamlok/linux | drivers/pinctrl/intel/pinctrl-intel.c | 227 | 29169 | /*
* Intel pinctrl/GPIO core driver.
*
* Copyright (C) 2015, Intel Corporation
* Authors: Mathias Nyman <mathias.nyman@linux.intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include "pinctrl-intel.h"
/* Maximum number of pads in each group */
#define NPADS_IN_GPP 24
/* Offset from regs */
#define PADBAR 0x00c
#define GPI_IS 0x100
#define GPI_GPE_STS 0x140
#define GPI_GPE_EN 0x160
#define PADOWN_BITS 4
#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p))
/* Offset from pad_regs */
#define PADCFG0 0x000
#define PADCFG0_RXEVCFG_SHIFT 25
#define PADCFG0_RXEVCFG_MASK (3 << PADCFG0_RXEVCFG_SHIFT)
#define PADCFG0_RXEVCFG_LEVEL 0
#define PADCFG0_RXEVCFG_EDGE 1
#define PADCFG0_RXEVCFG_DISABLED 2
#define PADCFG0_RXEVCFG_EDGE_BOTH 3
#define PADCFG0_RXINV BIT(23)
#define PADCFG0_GPIROUTIOXAPIC BIT(20)
#define PADCFG0_GPIROUTSCI BIT(19)
#define PADCFG0_GPIROUTSMI BIT(18)
#define PADCFG0_GPIROUTNMI BIT(17)
#define PADCFG0_PMODE_SHIFT 10
#define PADCFG0_PMODE_MASK (0xf << PADCFG0_PMODE_SHIFT)
#define PADCFG0_GPIORXDIS BIT(9)
#define PADCFG0_GPIOTXDIS BIT(8)
#define PADCFG0_GPIORXSTATE BIT(1)
#define PADCFG0_GPIOTXSTATE BIT(0)
#define PADCFG1 0x004
#define PADCFG1_TERM_UP BIT(13)
#define PADCFG1_TERM_SHIFT 10
#define PADCFG1_TERM_MASK (7 << PADCFG1_TERM_SHIFT)
#define PADCFG1_TERM_20K 4
#define PADCFG1_TERM_2K 3
#define PADCFG1_TERM_5K 2
#define PADCFG1_TERM_1K 1
struct intel_pad_context {
u32 padcfg0;
u32 padcfg1;
};
struct intel_community_context {
u32 *intmask;
};
struct intel_pinctrl_context {
struct intel_pad_context *pads;
struct intel_community_context *communities;
};
/**
* struct intel_pinctrl - Intel pinctrl private structure
* @dev: Pointer to the device structure
* @lock: Lock to serialize register access
* @pctldesc: Pin controller description
* @pctldev: Pointer to the pin controller device
* @chip: GPIO chip in this pin controller
* @soc: SoC/PCH specific pin configuration data
* @communities: All communities in this pin controller
* @ncommunities: Number of communities in this pin controller
* @context: Configuration saved over system sleep
*/
struct intel_pinctrl {
struct device *dev;
spinlock_t lock;
struct pinctrl_desc pctldesc;
struct pinctrl_dev *pctldev;
struct gpio_chip chip;
const struct intel_pinctrl_soc_data *soc;
struct intel_community *communities;
size_t ncommunities;
struct intel_pinctrl_context context;
};
#define gpiochip_to_pinctrl(c) container_of(c, struct intel_pinctrl, chip)
#define pin_to_padno(c, p) ((p) - (c)->pin_base)
static struct intel_community *intel_get_community(struct intel_pinctrl *pctrl,
unsigned pin)
{
struct intel_community *community;
int i;
for (i = 0; i < pctrl->ncommunities; i++) {
community = &pctrl->communities[i];
if (pin >= community->pin_base &&
pin < community->pin_base + community->npins)
return community;
}
dev_warn(pctrl->dev, "failed to find community for pin %u\n", pin);
return NULL;
}
static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
unsigned reg)
{
const struct intel_community *community;
unsigned padno;
community = intel_get_community(pctrl, pin);
if (!community)
return NULL;
padno = pin_to_padno(community, pin);
return community->pad_regs + reg + padno * 8;
}
static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
{
const struct intel_community *community;
unsigned padno, gpp, gpp_offset, offset;
void __iomem *padown;
community = intel_get_community(pctrl, pin);
if (!community)
return false;
if (!community->padown_offset)
return true;
padno = pin_to_padno(community, pin);
gpp = padno / NPADS_IN_GPP;
gpp_offset = padno % NPADS_IN_GPP;
offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4;
padown = community->regs + offset;
return !(readl(padown) & PADOWN_MASK(padno));
}
static bool intel_pad_reserved_for_acpi(struct intel_pinctrl *pctrl,
unsigned pin)
{
const struct intel_community *community;
unsigned padno, gpp, offset;
void __iomem *hostown;
community = intel_get_community(pctrl, pin);
if (!community)
return true;
if (!community->hostown_offset)
return false;
padno = pin_to_padno(community, pin);
gpp = padno / NPADS_IN_GPP;
offset = community->hostown_offset + gpp * 4;
hostown = community->regs + offset;
return !(readl(hostown) & BIT(padno % NPADS_IN_GPP));
}
static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
{
struct intel_community *community;
unsigned padno, gpp, offset;
u32 value;
community = intel_get_community(pctrl, pin);
if (!community)
return true;
if (!community->padcfglock_offset)
return false;
padno = pin_to_padno(community, pin);
gpp = padno / NPADS_IN_GPP;
/*
* If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad,
* the pad is considered unlocked. Any other case means that it is
* either fully or partially locked and we don't touch it.
*/
offset = community->padcfglock_offset + gpp * 8;
value = readl(community->regs + offset);
if (value & BIT(pin % NPADS_IN_GPP))
return true;
offset = community->padcfglock_offset + 4 + gpp * 8;
value = readl(community->regs + offset);
if (value & BIT(pin % NPADS_IN_GPP))
return true;
return false;
}
static bool intel_pad_usable(struct intel_pinctrl *pctrl, unsigned pin)
{
return intel_pad_owned_by_host(pctrl, pin) &&
!intel_pad_reserved_for_acpi(pctrl, pin) &&
!intel_pad_locked(pctrl, pin);
}
static int intel_get_groups_count(struct pinctrl_dev *pctldev)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->ngroups;
}
static const char *intel_get_group_name(struct pinctrl_dev *pctldev,
unsigned group)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->groups[group].name;
}
static int intel_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
const unsigned **pins, unsigned *npins)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
*pins = pctrl->soc->groups[group].pins;
*npins = pctrl->soc->groups[group].npins;
return 0;
}
static void intel_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned pin)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
u32 cfg0, cfg1, mode;
bool locked, acpi;
if (!intel_pad_owned_by_host(pctrl, pin)) {
seq_puts(s, "not available");
return;
}
cfg0 = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
cfg1 = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
mode = (cfg0 & PADCFG0_PMODE_MASK) >> PADCFG0_PMODE_SHIFT;
if (!mode)
seq_puts(s, "GPIO ");
else
seq_printf(s, "mode %d ", mode);
seq_printf(s, "0x%08x 0x%08x", cfg0, cfg1);
locked = intel_pad_locked(pctrl, pin);
acpi = intel_pad_reserved_for_acpi(pctrl, pin);
if (locked || acpi) {
seq_puts(s, " [");
if (locked) {
seq_puts(s, "LOCKED");
if (acpi)
seq_puts(s, ", ");
}
if (acpi)
seq_puts(s, "ACPI");
seq_puts(s, "]");
}
}
static const struct pinctrl_ops intel_pinctrl_ops = {
.get_groups_count = intel_get_groups_count,
.get_group_name = intel_get_group_name,
.get_group_pins = intel_get_group_pins,
.pin_dbg_show = intel_pin_dbg_show,
};
static int intel_get_functions_count(struct pinctrl_dev *pctldev)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->nfunctions;
}
static const char *intel_get_function_name(struct pinctrl_dev *pctldev,
unsigned function)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
return pctrl->soc->functions[function].name;
}
static int intel_get_function_groups(struct pinctrl_dev *pctldev,
unsigned function,
const char * const **groups,
unsigned * const ngroups)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
*groups = pctrl->soc->functions[function].groups;
*ngroups = pctrl->soc->functions[function].ngroups;
return 0;
}
static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
unsigned group)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
const struct intel_pingroup *grp = &pctrl->soc->groups[group];
unsigned long flags;
int i;
spin_lock_irqsave(&pctrl->lock, flags);
/*
* All pins in the groups needs to be accessible and writable
* before we can enable the mux for this group.
*/
for (i = 0; i < grp->npins; i++) {
if (!intel_pad_usable(pctrl, grp->pins[i])) {
spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
}
/* Now enable the mux setting for each pin in the group */
for (i = 0; i < grp->npins; i++) {
void __iomem *padcfg0;
u32 value;
padcfg0 = intel_get_padcfg(pctrl, grp->pins[i], PADCFG0);
value = readl(padcfg0);
value &= ~PADCFG0_PMODE_MASK;
value |= grp->mode << PADCFG0_PMODE_SHIFT;
writel(value, padcfg0);
}
spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
u32 value;
spin_lock_irqsave(&pctrl->lock, flags);
if (!intel_pad_usable(pctrl, pin)) {
spin_unlock_irqrestore(&pctrl->lock, flags);
return -EBUSY;
}
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
/* Put the pad into GPIO mode */
value = readl(padcfg0) & ~PADCFG0_PMODE_MASK;
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
/* Disable TX buffer and enable RX (this will be input) */
value &= ~PADCFG0_GPIORXDIS;
value |= PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin, bool input)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
u32 value;
spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
value = readl(padcfg0);
if (input)
value |= PADCFG0_GPIOTXDIS;
else
value &= ~PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
static const struct pinmux_ops intel_pinmux_ops = {
.get_functions_count = intel_get_functions_count,
.get_function_name = intel_get_function_name,
.get_function_groups = intel_get_function_groups,
.set_mux = intel_pinmux_set_mux,
.gpio_request_enable = intel_gpio_request_enable,
.gpio_set_direction = intel_gpio_set_direction,
};
static int intel_config_get(struct pinctrl_dev *pctldev, unsigned pin,
unsigned long *config)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
enum pin_config_param param = pinconf_to_config_param(*config);
u32 value, term;
u16 arg = 0;
if (!intel_pad_owned_by_host(pctrl, pin))
return -ENOTSUPP;
value = readl(intel_get_padcfg(pctrl, pin, PADCFG1));
term = (value & PADCFG1_TERM_MASK) >> PADCFG1_TERM_SHIFT;
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
if (term)
return -EINVAL;
break;
case PIN_CONFIG_BIAS_PULL_UP:
if (!term || !(value & PADCFG1_TERM_UP))
return -EINVAL;
switch (term) {
case PADCFG1_TERM_1K:
arg = 1000;
break;
case PADCFG1_TERM_2K:
arg = 2000;
break;
case PADCFG1_TERM_5K:
arg = 5000;
break;
case PADCFG1_TERM_20K:
arg = 20000;
break;
}
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
if (!term || value & PADCFG1_TERM_UP)
return -EINVAL;
switch (term) {
case PADCFG1_TERM_5K:
arg = 5000;
break;
case PADCFG1_TERM_20K:
arg = 20000;
break;
}
break;
default:
return -ENOTSUPP;
}
*config = pinconf_to_config_packed(param, arg);
return 0;
}
static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned pin,
unsigned long config)
{
unsigned param = pinconf_to_config_param(config);
unsigned arg = pinconf_to_config_argument(config);
void __iomem *padcfg1;
unsigned long flags;
int ret = 0;
u32 value;
spin_lock_irqsave(&pctrl->lock, flags);
padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1);
value = readl(padcfg1);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
value &= ~(PADCFG1_TERM_MASK | PADCFG1_TERM_UP);
break;
case PIN_CONFIG_BIAS_PULL_UP:
value &= ~PADCFG1_TERM_MASK;
value |= PADCFG1_TERM_UP;
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
break;
case 5000:
value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
break;
case 2000:
value |= PADCFG1_TERM_2K << PADCFG1_TERM_SHIFT;
break;
case 1000:
value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
break;
default:
ret = -EINVAL;
}
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
break;
case 5000:
value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
break;
default:
ret = -EINVAL;
}
break;
}
if (!ret)
writel(value, padcfg1);
spin_unlock_irqrestore(&pctrl->lock, flags);
return ret;
}
static int intel_config_set(struct pinctrl_dev *pctldev, unsigned pin,
unsigned long *configs, unsigned nconfigs)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
int i, ret;
if (!intel_pad_usable(pctrl, pin))
return -ENOTSUPP;
for (i = 0; i < nconfigs; i++) {
switch (pinconf_to_config_param(configs[i])) {
case PIN_CONFIG_BIAS_DISABLE:
case PIN_CONFIG_BIAS_PULL_UP:
case PIN_CONFIG_BIAS_PULL_DOWN:
ret = intel_config_set_pull(pctrl, pin, configs[i]);
if (ret)
return ret;
break;
default:
return -ENOTSUPP;
}
}
return 0;
}
static const struct pinconf_ops intel_pinconf_ops = {
.is_generic = true,
.pin_config_get = intel_config_get,
.pin_config_set = intel_config_set,
};
static const struct pinctrl_desc intel_pinctrl_desc = {
.pctlops = &intel_pinctrl_ops,
.pmxops = &intel_pinmux_ops,
.confops = &intel_pinconf_ops,
.owner = THIS_MODULE,
};
static int intel_gpio_request(struct gpio_chip *chip, unsigned offset)
{
return pinctrl_request_gpio(chip->base + offset);
}
static void intel_gpio_free(struct gpio_chip *chip, unsigned offset)
{
pinctrl_free_gpio(chip->base + offset);
}
static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
void __iomem *reg;
reg = intel_get_padcfg(pctrl, offset, PADCFG0);
if (!reg)
return -EINVAL;
return !!(readl(reg) & PADCFG0_GPIORXSTATE);
}
static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
void __iomem *reg;
reg = intel_get_padcfg(pctrl, offset, PADCFG0);
if (reg) {
unsigned long flags;
u32 padcfg0;
spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = readl(reg);
if (value)
padcfg0 |= PADCFG0_GPIOTXSTATE;
else
padcfg0 &= ~PADCFG0_GPIOTXSTATE;
writel(padcfg0, reg);
spin_unlock_irqrestore(&pctrl->lock, flags);
}
}
static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
return pinctrl_gpio_direction_input(chip->base + offset);
}
static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
intel_gpio_set(chip, offset, value);
return pinctrl_gpio_direction_output(chip->base + offset);
}
static const struct gpio_chip intel_gpio_chip = {
.owner = THIS_MODULE,
.request = intel_gpio_request,
.free = intel_gpio_free,
.direction_input = intel_gpio_direction_input,
.direction_output = intel_gpio_direction_output,
.get = intel_gpio_get,
.set = intel_gpio_set,
};
static void intel_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
spin_lock(&pctrl->lock);
community = intel_get_community(pctrl, pin);
if (community) {
unsigned padno = pin_to_padno(community, pin);
unsigned gpp_offset = padno % NPADS_IN_GPP;
unsigned gpp = padno / NPADS_IN_GPP;
writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
}
spin_unlock(&pctrl->lock);
}
static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
unsigned long flags;
spin_lock_irqsave(&pctrl->lock, flags);
community = intel_get_community(pctrl, pin);
if (community) {
unsigned padno = pin_to_padno(community, pin);
unsigned gpp_offset = padno % NPADS_IN_GPP;
unsigned gpp = padno / NPADS_IN_GPP;
void __iomem *reg;
u32 value;
reg = community->regs + community->ie_offset + gpp * 4;
value = readl(reg);
if (mask)
value &= ~BIT(gpp_offset);
else
value |= BIT(gpp_offset);
writel(value, reg);
}
spin_unlock_irqrestore(&pctrl->lock, flags);
}
static void intel_gpio_irq_mask(struct irq_data *d)
{
intel_gpio_irq_mask_unmask(d, true);
}
static void intel_gpio_irq_unmask(struct irq_data *d)
{
intel_gpio_irq_mask_unmask(d, false);
}
static int intel_gpio_irq_type(struct irq_data *d, unsigned type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
unsigned pin = irqd_to_hwirq(d);
unsigned long flags;
void __iomem *reg;
u32 value;
reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return -EINVAL;
spin_lock_irqsave(&pctrl->lock, flags);
value = readl(reg);
value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
value |= PADCFG0_RXEVCFG_EDGE_BOTH << PADCFG0_RXEVCFG_SHIFT;
} else if (type & IRQ_TYPE_EDGE_FALLING) {
value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
value |= PADCFG0_RXINV;
} else if (type & IRQ_TYPE_EDGE_RISING) {
value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT;
} else if (type & IRQ_TYPE_LEVEL_LOW) {
value |= PADCFG0_RXINV;
} else {
value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT;
}
writel(value, reg);
if (type & IRQ_TYPE_EDGE_BOTH)
__irq_set_handler_locked(d->irq, handle_edge_irq);
else if (type & IRQ_TYPE_LEVEL_MASK)
__irq_set_handler_locked(d->irq, handle_level_irq);
spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
}
static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
const struct intel_community *community;
unsigned pin = irqd_to_hwirq(d);
unsigned padno, gpp, gpp_offset;
u32 gpe_en;
community = intel_get_community(pctrl, pin);
if (!community)
return -EINVAL;
padno = pin_to_padno(community, pin);
gpp = padno / NPADS_IN_GPP;
gpp_offset = padno % NPADS_IN_GPP;
/* Clear the existing wake status */
writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4);
/*
* The controller will generate wake when GPE of the corresponding
* pad is enabled and it is not routed to SCI (GPIROUTSCI is not
* set).
*/
gpe_en = readl(community->regs + GPI_GPE_EN + gpp * 4);
if (on)
gpe_en |= BIT(gpp_offset);
else
gpe_en &= ~BIT(gpp_offset);
writel(gpe_en, community->regs + GPI_GPE_EN + gpp * 4);
dev_dbg(pctrl->dev, "%sable wake for pin %u\n", on ? "en" : "dis", pin);
return 0;
}
static void intel_gpio_community_irq_handler(struct gpio_chip *gc,
const struct intel_community *community)
{
int gpp;
for (gpp = 0; gpp < community->ngpps; gpp++) {
unsigned long pending, enabled, gpp_offset;
pending = readl(community->regs + GPI_IS + gpp * 4);
enabled = readl(community->regs + community->ie_offset +
gpp * 4);
/* Only interrupts that are enabled */
pending &= enabled;
for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) {
unsigned padno, irq;
/*
* The last group in community can have less pins
* than NPADS_IN_GPP.
*/
padno = gpp_offset + gpp * NPADS_IN_GPP;
if (padno >= community->npins)
break;
irq = irq_find_mapping(gc->irqdomain,
community->pin_base + padno);
generic_handle_irq(irq);
}
}
}
static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
struct irq_chip *chip = irq_get_chip(irq);
int i;
chained_irq_enter(chip, desc);
/* Need to check all communities for pending interrupts */
for (i = 0; i < pctrl->ncommunities; i++)
intel_gpio_community_irq_handler(gc, &pctrl->communities[i]);
chained_irq_exit(chip, desc);
}
static struct irq_chip intel_gpio_irqchip = {
.name = "intel-gpio",
.irq_ack = intel_gpio_irq_ack,
.irq_mask = intel_gpio_irq_mask,
.irq_unmask = intel_gpio_irq_unmask,
.irq_set_type = intel_gpio_irq_type,
.irq_set_wake = intel_gpio_irq_wake,
};
static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
{
size_t i;
for (i = 0; i < pctrl->ncommunities; i++) {
const struct intel_community *community;
void __iomem *base;
unsigned gpp;
community = &pctrl->communities[i];
base = community->regs;
for (gpp = 0; gpp < community->ngpps; gpp++) {
/* Mask and clear all interrupts */
writel(0, base + community->ie_offset + gpp * 4);
writel(0xffff, base + GPI_IS + gpp * 4);
}
}
}
static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
{
int ret;
pctrl->chip = intel_gpio_chip;
pctrl->chip.ngpio = pctrl->soc->npins;
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.dev = pctrl->dev;
pctrl->chip.base = -1;
ret = gpiochip_add(&pctrl->chip);
if (ret) {
dev_err(pctrl->dev, "failed to register gpiochip\n");
return ret;
}
ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev),
0, 0, pctrl->soc->npins);
if (ret) {
dev_err(pctrl->dev, "failed to add GPIO pin range\n");
gpiochip_remove(&pctrl->chip);
return ret;
}
ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add irqchip\n");
gpiochip_remove(&pctrl->chip);
return ret;
}
gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
intel_gpio_irq_handler);
return 0;
}
static int intel_pinctrl_pm_init(struct intel_pinctrl *pctrl)
{
#ifdef CONFIG_PM_SLEEP
const struct intel_pinctrl_soc_data *soc = pctrl->soc;
struct intel_community_context *communities;
struct intel_pad_context *pads;
int i;
pads = devm_kcalloc(pctrl->dev, soc->npins, sizeof(*pads), GFP_KERNEL);
if (!pads)
return -ENOMEM;
communities = devm_kcalloc(pctrl->dev, pctrl->ncommunities,
sizeof(*communities), GFP_KERNEL);
if (!communities)
return -ENOMEM;
for (i = 0; i < pctrl->ncommunities; i++) {
struct intel_community *community = &pctrl->communities[i];
u32 *intmask;
intmask = devm_kcalloc(pctrl->dev, community->ngpps,
sizeof(*intmask), GFP_KERNEL);
if (!intmask)
return -ENOMEM;
communities[i].intmask = intmask;
}
pctrl->context.pads = pads;
pctrl->context.communities = communities;
#endif
return 0;
}
int intel_pinctrl_probe(struct platform_device *pdev,
const struct intel_pinctrl_soc_data *soc_data)
{
struct intel_pinctrl *pctrl;
int i, ret, irq;
if (!soc_data)
return -EINVAL;
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
pctrl->dev = &pdev->dev;
pctrl->soc = soc_data;
spin_lock_init(&pctrl->lock);
/*
* Make a copy of the communities which we can use to hold pointers
* to the registers.
*/
pctrl->ncommunities = pctrl->soc->ncommunities;
pctrl->communities = devm_kcalloc(&pdev->dev, pctrl->ncommunities,
sizeof(*pctrl->communities), GFP_KERNEL);
if (!pctrl->communities)
return -ENOMEM;
for (i = 0; i < pctrl->ncommunities; i++) {
struct intel_community *community = &pctrl->communities[i];
struct resource *res;
void __iomem *regs;
u32 padbar;
*community = pctrl->soc->communities[i];
res = platform_get_resource(pdev, IORESOURCE_MEM,
community->barno);
regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(regs))
return PTR_ERR(regs);
/* Read offset of the pad configuration registers */
padbar = readl(regs + PADBAR);
community->regs = regs;
community->pad_regs = regs + padbar;
community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get interrupt number\n");
return irq;
}
ret = intel_pinctrl_pm_init(pctrl);
if (ret)
return ret;
pctrl->pctldesc = intel_pinctrl_desc;
pctrl->pctldesc.name = dev_name(&pdev->dev);
pctrl->pctldesc.pins = pctrl->soc->pins;
pctrl->pctldesc.npins = pctrl->soc->npins;
pctrl->pctldev = pinctrl_register(&pctrl->pctldesc, &pdev->dev, pctrl);
if (!pctrl->pctldev) {
dev_err(&pdev->dev, "failed to register pinctrl driver\n");
return -ENODEV;
}
ret = intel_gpio_probe(pctrl, irq);
if (ret) {
pinctrl_unregister(pctrl->pctldev);
return ret;
}
platform_set_drvdata(pdev, pctrl);
return 0;
}
EXPORT_SYMBOL_GPL(intel_pinctrl_probe);
int intel_pinctrl_remove(struct platform_device *pdev)
{
struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
gpiochip_remove(&pctrl->chip);
pinctrl_unregister(pctrl->pctldev);
return 0;
}
EXPORT_SYMBOL_GPL(intel_pinctrl_remove);
#ifdef CONFIG_PM_SLEEP
int intel_pinctrl_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
struct intel_community_context *communities;
struct intel_pad_context *pads;
int i;
pads = pctrl->context.pads;
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
u32 val;
if (!intel_pad_usable(pctrl, desc->number))
continue;
val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG0));
pads[i].padcfg0 = val & ~PADCFG0_GPIORXSTATE;
val = readl(intel_get_padcfg(pctrl, desc->number, PADCFG1));
pads[i].padcfg1 = val;
}
communities = pctrl->context.communities;
for (i = 0; i < pctrl->ncommunities; i++) {
struct intel_community *community = &pctrl->communities[i];
void __iomem *base;
unsigned gpp;
base = community->regs + community->ie_offset;
for (gpp = 0; gpp < community->ngpps; gpp++)
communities[i].intmask[gpp] = readl(base + gpp * 4);
}
return 0;
}
EXPORT_SYMBOL_GPL(intel_pinctrl_suspend);
int intel_pinctrl_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct intel_pinctrl *pctrl = platform_get_drvdata(pdev);
const struct intel_community_context *communities;
const struct intel_pad_context *pads;
int i;
/* Mask all interrupts */
intel_gpio_irq_init(pctrl);
pads = pctrl->context.pads;
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
void __iomem *padcfg;
u32 val;
if (!intel_pad_usable(pctrl, desc->number))
continue;
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
val = readl(padcfg) & ~PADCFG0_GPIORXSTATE;
if (val != pads[i].padcfg0) {
writel(pads[i].padcfg0, padcfg);
dev_dbg(dev, "restored pin %u padcfg0 %#08x\n",
desc->number, readl(padcfg));
}
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG1);
val = readl(padcfg);
if (val != pads[i].padcfg1) {
writel(pads[i].padcfg1, padcfg);
dev_dbg(dev, "restored pin %u padcfg1 %#08x\n",
desc->number, readl(padcfg));
}
}
communities = pctrl->context.communities;
for (i = 0; i < pctrl->ncommunities; i++) {
struct intel_community *community = &pctrl->communities[i];
void __iomem *base;
unsigned gpp;
base = community->regs + community->ie_offset;
for (gpp = 0; gpp < community->ngpps; gpp++) {
writel(communities[i].intmask[gpp], base + gpp * 4);
dev_dbg(dev, "restored mask %d/%u %#08x\n", i, gpp,
readl(base + gpp * 4));
}
}
return 0;
}
EXPORT_SYMBOL_GPL(intel_pinctrl_resume);
#endif
MODULE_AUTHOR("Mathias Nyman <mathias.nyman@linux.intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
MODULE_DESCRIPTION("Intel pinctrl/GPIO core driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
veo-labs/linux-veobox | drivers/staging/rtl8192e/rtl8192e/rtl_wx.c | 227 | 33342 | /******************************************************************************
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
#include <linux/string.h>
#include "rtl_core.h"
#include "rtl_wx.h"
#define RATE_COUNT 12
static u32 rtl8192_rates[] = {
1000000, 2000000, 5500000, 11000000, 6000000, 9000000, 12000000,
18000000, 24000000, 36000000, 48000000, 54000000
};
#ifndef ENETDOWN
#define ENETDOWN 1
#endif
static int r8192_wx_get_freq(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_freq(priv->rtllib, a, wrqu, b);
}
static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_mode(priv->rtllib, a, wrqu, b);
}
static int r8192_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_rate(priv->rtllib, info, wrqu, extra);
}
static int r8192_wx_set_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_set_rate(priv->rtllib, info, wrqu, extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_set_rts(priv->rtllib, info, wrqu, extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_rts(priv->rtllib, info, wrqu, extra);
}
static int r8192_wx_set_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff) {
RT_TRACE(COMP_ERR,
"%s():Hw is Radio Off, we can't set Power,return\n",
__func__);
return 0;
}
down(&priv->wx_sem);
ret = rtllib_wx_set_power(priv->rtllib, info, wrqu, extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_power(priv->rtllib, info, wrqu, extra);
}
static int r8192_wx_set_rawtx(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_set_rawtx(priv->rtllib, info, wrqu, extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_force_reset(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
down(&priv->wx_sem);
RT_TRACE(COMP_DBG, "%s(): force reset ! extra is %d\n",
__func__, *extra);
priv->force_reset = *extra;
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_force_mic_error(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
down(&priv->wx_sem);
RT_TRACE(COMP_DBG, "%s(): force mic error !\n", __func__);
ieee->force_mic_error = true;
up(&priv->wx_sem);
return 0;
}
#define MAX_ADHOC_PEER_NUM 64
struct adhoc_peer_entry {
unsigned char MacAddr[ETH_ALEN];
unsigned char WirelessMode;
unsigned char bCurTxBW40MHz;
};
struct adhoc_peers_info {
struct adhoc_peer_entry Entry[MAX_ADHOC_PEER_NUM];
unsigned char num;
};
static int r8192_wx_get_adhoc_peers(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
return 0;
}
static int r8191se_wx_get_firm_version(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrqu, char *extra)
{
return 0;
}
static int r8192_wx_adapter_power_status(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
(&(priv->rtllib->PowerSaveControl));
struct rtllib_device *ieee = priv->rtllib;
down(&priv->wx_sem);
RT_TRACE(COMP_POWER, "%s(): %s\n", __func__, (*extra == 6) ?
"DC power" : "AC power");
if (*extra || priv->force_lps) {
priv->ps_force = false;
pPSC->bLeisurePs = true;
} else {
if (priv->rtllib->state == RTLLIB_LINKED)
LeisurePSLeave(dev);
priv->ps_force = true;
pPSC->bLeisurePs = false;
ieee->ps = *extra;
}
up(&priv->wx_sem);
return 0;
}
static int r8192se_wx_set_radio(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
down(&priv->wx_sem);
netdev_info(dev, "%s(): set radio ! extra is %d\n", __func__, *extra);
if ((*extra != 0) && (*extra != 1)) {
RT_TRACE(COMP_ERR,
"%s(): set radio an err value,must 0(radio off) or 1(radio on)\n",
__func__);
up(&priv->wx_sem);
return -1;
}
priv->sw_radio_on = *extra;
up(&priv->wx_sem);
return 0;
}
static int r8192se_wx_set_lps_awake_interval(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_pwr_save_ctrl *pPSC = (struct rt_pwr_save_ctrl *)
(&(priv->rtllib->PowerSaveControl));
down(&priv->wx_sem);
netdev_info(dev, "%s(): set lps awake interval ! extra is %d\n",
__func__, *extra);
pPSC->RegMaxLPSAwakeIntvl = *extra;
up(&priv->wx_sem);
return 0;
}
static int r8192se_wx_set_force_lps(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
down(&priv->wx_sem);
netdev_info(dev,
"%s(): force LPS ! extra is %d (1 is open 0 is close)\n",
__func__, *extra);
priv->force_lps = *extra;
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_set_debugflag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 c = *extra;
if (priv->bHwRadioOff)
return 0;
netdev_info(dev, "=====>%s(), *extra:%x, debugflag:%x\n", __func__,
*extra, rt_global_debug_component);
if (c > 0)
rt_global_debug_component |= (1<<c);
else
rt_global_debug_component &= BIT31;
return 0;
}
static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = netdev_priv_rsl(dev);
enum rt_rf_power_state rtState;
int ret;
if (priv->bHwRadioOff)
return 0;
rtState = priv->rtllib->eRFPowerState;
down(&priv->wx_sem);
if (wrqu->mode == IW_MODE_ADHOC || wrqu->mode == IW_MODE_MONITOR ||
ieee->bNetPromiscuousMode) {
if (priv->rtllib->PowerSaveControl.bInactivePs) {
if (rtState == eRfOff) {
if (priv->rtllib->RfOffReason >
RF_CHANGE_BY_IPS) {
RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",
__func__);
up(&priv->wx_sem);
return -1;
}
netdev_info(dev, "=========>%s(): IPSLeave\n",
__func__);
down(&priv->rtllib->ips_sem);
IPSLeave(dev);
up(&priv->rtllib->ips_sem);
}
}
}
ret = rtllib_wx_set_mode(priv->rtllib, a, wrqu, b);
up(&priv->wx_sem);
return ret;
}
struct iw_range_with_scan_capa {
/* Informative stuff (to choose between different interface) */
__u32 throughput; /* To give an idea... */
/* In theory this value should be the maximum benchmarked
* TCP/IP throughput, because with most of these devices the
* bit rate is meaningless (overhead an co) to estimate how
* fast the connection will go and pick the fastest one.
* I suggest people to play with Netperf or any benchmark...
*/
/* NWID (or domain id) */
__u32 min_nwid; /* Minimal NWID we are able to set */
__u32 max_nwid; /* Maximal NWID we are able to set */
/* Old Frequency (backward compat - moved lower ) */
__u16 old_num_channels;
__u8 old_num_frequency;
/* Scan capabilities */
__u8 scan_capa;
};
static int rtl8192_wx_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
struct r8192_priv *priv = rtllib_priv(dev);
u16 val;
int i;
wrqu->data.length = sizeof(*range);
memset(range, 0, sizeof(*range));
/* ~130 Mb/s real (802.11n) */
range->throughput = 130 * 1000 * 1000;
if (priv->rf_set_sens != NULL) {
/* signal level threshold range */
range->sensitivity = priv->max_sens;
}
range->max_qual.qual = 100;
range->max_qual.level = 0;
range->max_qual.noise = 0;
range->max_qual.updated = 7; /* Updated all three */
range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */
range->avg_qual.level = 0;
range->avg_qual.noise = 0;
range->avg_qual.updated = 7; /* Updated all three */
range->num_bitrates = min(RATE_COUNT, IW_MAX_BITRATES);
for (i = 0; i < range->num_bitrates; i++)
range->bitrate[i] = rtl8192_rates[i];
range->max_rts = DEFAULT_RTS_THRESHOLD;
range->min_frag = MIN_FRAG_THRESHOLD;
range->max_frag = MAX_FRAG_THRESHOLD;
range->min_pmp = 0;
range->max_pmp = 5000000;
range->min_pmt = 0;
range->max_pmt = 65535*1000;
range->pmp_flags = IW_POWER_PERIOD;
range->pmt_flags = IW_POWER_TIMEOUT;
range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 18;
for (i = 0, val = 0; i < 14; i++) {
if ((priv->rtllib->active_channel_map)[i+1]) {
range->freq[val].i = i + 1;
range->freq[val].m = rtllib_wlan_frequencies[i] *
100000;
range->freq[val].e = 1;
val++;
}
if (val == IW_MAX_FREQUENCIES)
break;
}
range->num_frequency = val;
range->num_channels = val;
range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2|
IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP;
range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
/* Event capability (kernel + driver) */
return 0;
}
static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
enum rt_rf_power_state rtState;
int ret;
if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
if ((ieee->state >= RTLLIB_ASSOCIATING) &&
(ieee->state <= RTLLIB_ASSOCIATING_AUTHENTICATED))
return 0;
if ((priv->rtllib->state == RTLLIB_LINKED) &&
(priv->rtllib->CntAfterLink < 2))
return 0;
}
if (priv->bHwRadioOff) {
netdev_info(dev, "================>%s(): hwradio off\n",
__func__);
return 0;
}
rtState = priv->rtllib->eRFPowerState;
if (!priv->up)
return -ENETDOWN;
if (priv->rtllib->LinkDetectInfo.bBusyTraffic == true)
return -EAGAIN;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
struct iw_scan_req *req = (struct iw_scan_req *)b;
if (req->essid_len) {
ieee->current_network.ssid_len = req->essid_len;
memcpy(ieee->current_network.ssid, req->essid,
req->essid_len);
}
}
down(&priv->wx_sem);
priv->rtllib->FirstIe_InScan = true;
if (priv->rtllib->state != RTLLIB_LINKED) {
if (priv->rtllib->PowerSaveControl.bInactivePs) {
if (rtState == eRfOff) {
if (priv->rtllib->RfOffReason >
RF_CHANGE_BY_IPS) {
RT_TRACE(COMP_ERR,
"%s(): RF is OFF.\n",
__func__);
up(&priv->wx_sem);
return -1;
}
RT_TRACE(COMP_PS, "=========>%s(): IPSLeave\n",
__func__);
down(&priv->rtllib->ips_sem);
IPSLeave(dev);
up(&priv->rtllib->ips_sem);
}
}
rtllib_stop_scan(priv->rtllib);
if (priv->rtllib->LedControlHandler)
priv->rtllib->LedControlHandler(dev,
LED_CTL_SITE_SURVEY);
if (priv->rtllib->eRFPowerState != eRfOff) {
priv->rtllib->actscanning = true;
if (ieee->ScanOperationBackupHandler)
ieee->ScanOperationBackupHandler(ieee->dev,
SCAN_OPT_BACKUP);
rtllib_start_scan_syncro(priv->rtllib, 0);
if (ieee->ScanOperationBackupHandler)
ieee->ScanOperationBackupHandler(ieee->dev,
SCAN_OPT_RESTORE);
}
ret = 0;
} else {
priv->rtllib->actscanning = true;
ret = rtllib_wx_set_scan(priv->rtllib, a, wrqu, b);
}
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (!priv->up)
return -ENETDOWN;
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_get_scan(priv->rtllib, a, wrqu, b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
if (priv->bHwRadioOff) {
netdev_info(dev,
"=========>%s():hw radio off,or Rf state is eRfOff, return\n",
__func__);
return 0;
}
down(&priv->wx_sem);
ret = rtllib_wx_set_essid(priv->rtllib, a, wrqu, b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
down(&priv->wx_sem);
ret = rtllib_wx_get_essid(priv->rtllib, a, wrqu, b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (wrqu->data.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
down(&priv->wx_sem);
wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
memset(priv->nick, 0, sizeof(priv->nick));
memcpy(priv->nick, extra, wrqu->data.length);
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_get_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
down(&priv->wx_sem);
wrqu->data.length = strlen(priv->nick);
memcpy(extra, priv->nick, wrqu->data.length);
wrqu->data.flags = 1; /* active */
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_set_freq(priv->rtllib, a, wrqu, b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_name(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_name(priv->rtllib, info, wrqu, extra);
}
static int r8192_wx_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff)
return 0;
if (wrqu->frag.disabled)
priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD;
else {
if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
wrqu->frag.value > MAX_FRAG_THRESHOLD)
return -EINVAL;
priv->rtllib->fts = wrqu->frag.value & ~0x1;
}
return 0;
}
static int r8192_wx_get_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
wrqu->frag.value = priv->rtllib->fts;
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD);
return 0;
}
static int r8192_wx_set_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *awrq,
char *extra)
{
int ret;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_set_wap(priv->rtllib, info, awrq, extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_wap(priv->rtllib, info, wrqu, extra);
}
static int r8192_wx_get_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
{
struct r8192_priv *priv = rtllib_priv(dev);
return rtllib_wx_get_encode(priv->rtllib, info, wrqu, key);
}
static int r8192_wx_set_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
{
struct r8192_priv *priv = rtllib_priv(dev);
int ret;
struct rtllib_device *ieee = priv->rtllib;
u32 hwkey[4] = {0, 0, 0, 0};
u8 mask = 0xff;
u32 key_idx = 0;
u8 zero_addr[4][6] = {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03} };
int i;
if (priv->bHwRadioOff)
return 0;
if (!priv->up)
return -ENETDOWN;
priv->rtllib->wx_set_enc = 1;
down(&priv->rtllib->ips_sem);
IPSLeave(dev);
up(&priv->rtllib->ips_sem);
down(&priv->wx_sem);
RT_TRACE(COMP_SEC, "Setting SW wep key");
ret = rtllib_wx_set_encode(priv->rtllib, info, wrqu, key);
up(&priv->wx_sem);
if (wrqu->encoding.flags & IW_ENCODE_DISABLED) {
ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
CamResetAllEntry(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
goto end_hw_sec;
}
if (wrqu->encoding.length != 0) {
for (i = 0; i < 4; i++) {
hwkey[i] |= key[4*i+0]&mask;
if (i == 1 && (4 * i + 1) == wrqu->encoding.length)
mask = 0x00;
if (i == 3 && (4 * i + 1) == wrqu->encoding.length)
mask = 0x00;
hwkey[i] |= (key[4 * i + 1] & mask) << 8;
hwkey[i] |= (key[4 * i + 2] & mask) << 16;
hwkey[i] |= (key[4 * i + 3] & mask) << 24;
}
#define CONF_WEP40 0x4
#define CONF_WEP104 0x14
switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
case 0:
key_idx = ieee->crypt_info.tx_keyidx;
break;
case 1:
key_idx = 0;
break;
case 2:
key_idx = 1;
break;
case 3:
key_idx = 2;
break;
case 4:
key_idx = 3;
break;
default:
break;
}
if (wrqu->encoding.length == 0x5) {
ieee->pairwise_key_type = KEY_TYPE_WEP40;
EnableHWSecurityConfig8192(dev);
}
else if (wrqu->encoding.length == 0xd) {
ieee->pairwise_key_type = KEY_TYPE_WEP104;
EnableHWSecurityConfig8192(dev);
setKey(dev, key_idx, key_idx, KEY_TYPE_WEP104,
zero_addr[key_idx], 0, hwkey);
set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
zero_addr[key_idx], 0, hwkey, 0);
} else {
netdev_info(dev,
"wrong type in WEP, not WEP40 and WEP104\n");
}
}
end_hw_sec:
priv->rtllib->wx_set_enc = 0;
return ret;
}
static int r8192_wx_set_scan_type(struct net_device *dev,
struct iw_request_info *aa,
union iwreq_data *wrqu, char *p)
{
struct r8192_priv *priv = rtllib_priv(dev);
int *parms = (int *)p;
int mode = parms[0];
if (priv->bHwRadioOff)
return 0;
priv->rtllib->active_scan = mode;
return 1;
}
#define R8192_MAX_RETRY 255
static int r8192_wx_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
int err = 0;
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
wrqu->retry.disabled) {
err = -EINVAL;
goto exit;
}
if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) {
err = -EINVAL;
goto exit;
}
if (wrqu->retry.value > R8192_MAX_RETRY) {
err = -EINVAL;
goto exit;
}
if (wrqu->retry.flags & IW_RETRY_MAX) {
priv->retry_rts = wrqu->retry.value;
DMESG("Setting retry for RTS/CTS data to %d",
wrqu->retry.value);
} else {
priv->retry_data = wrqu->retry.value;
DMESG("Setting retry for non RTS/CTS data to %d",
wrqu->retry.value);
}
rtl8192_commit(dev);
exit:
up(&priv->wx_sem);
return err;
}
static int r8192_wx_get_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
wrqu->retry.disabled = 0; /* can't be disabled */
if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
IW_RETRY_LIFETIME)
return -EINVAL;
if (wrqu->retry.flags & IW_RETRY_MAX) {
wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
wrqu->retry.value = priv->retry_rts;
} else {
wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_MIN;
wrqu->retry.value = priv->retry_data;
}
return 0;
}
static int r8192_wx_get_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->rf_set_sens == NULL)
return -1; /* we have not this support for this radio */
wrqu->sens.value = priv->sens;
return 0;
}
static int r8192_wx_set_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
short err = 0;
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
if (priv->rf_set_sens == NULL) {
err = -1; /* we have not this support for this radio */
goto exit;
}
if (priv->rf_set_sens(dev, wrqu->sens.value) == 0)
priv->sens = wrqu->sens.value;
else
err = -EINVAL;
exit:
up(&priv->wx_sem);
return err;
}
static int r8192_wx_set_enc_ext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
priv->rtllib->wx_set_enc = 1;
down(&priv->rtllib->ips_sem);
IPSLeave(dev);
up(&priv->rtllib->ips_sem);
ret = rtllib_wx_set_encode_ext(ieee, info, wrqu, extra);
{
u8 broadcast_addr[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 zero[6] = {0};
u32 key[4] = {0};
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
struct iw_point *encoding = &wrqu->encoding;
u8 idx = 0, alg = 0, group = 0;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
ieee->pairwise_key_type = ieee->group_key_type
= KEY_TYPE_NA;
CamResetAllEntry(dev);
memset(priv->rtllib->swcamtable, 0,
sizeof(struct sw_cam_table) * 32);
goto end_hw_sec;
}
alg = (ext->alg == IW_ENCODE_ALG_CCMP) ? KEY_TYPE_CCMP :
ext->alg;
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx)
idx--;
group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY;
if ((!group) || (IW_MODE_ADHOC == ieee->iw_mode) ||
(alg == KEY_TYPE_WEP40)) {
if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40))
alg = KEY_TYPE_WEP104;
ieee->pairwise_key_type = alg;
EnableHWSecurityConfig8192(dev);
}
memcpy((u8 *)key, ext->key, 16);
if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode != 2)) {
if (ext->key_len == 13)
ieee->pairwise_key_type = alg = KEY_TYPE_WEP104;
setKey(dev, idx, idx, alg, zero, 0, key);
set_swcam(dev, idx, idx, alg, zero, 0, key, 0);
} else if (group) {
ieee->group_key_type = alg;
setKey(dev, idx, idx, alg, broadcast_addr, 0, key);
set_swcam(dev, idx, idx, alg, broadcast_addr, 0,
key, 0);
} else {
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
ieee->pHTInfo->bCurrentHTSupport)
write_nic_byte(dev, 0x173, 1);
setKey(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr,
0, key);
set_swcam(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr,
0, key, 0);
}
}
end_hw_sec:
priv->rtllib->wx_set_enc = 0;
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_set_auth(priv->rtllib, info, &(data->param), extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_set_mlme(priv->rtllib, info, wrqu, extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
if (priv->bHwRadioOff)
return 0;
down(&priv->wx_sem);
ret = rtllib_wx_set_gen_ie(priv->rtllib, extra, data->data.length);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
int ret = 0;
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
data->data.length = 0;
return 0;
}
if (data->data.length < ieee->wpa_ie_len)
return -E2BIG;
data->data.length = ieee->wpa_ie_len;
memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
return ret;
}
#define OID_RT_INTEL_PROMISCUOUS_MODE 0xFF0101F6
static int r8192_wx_set_PromiscuousMode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
u32 info_buf[3];
u32 oid;
u32 bPromiscuousOn;
u32 bFilterSourceStationFrame;
if (copy_from_user(info_buf, wrqu->data.pointer, sizeof(info_buf)))
return -EFAULT;
oid = info_buf[0];
bPromiscuousOn = info_buf[1];
bFilterSourceStationFrame = info_buf[2];
if (OID_RT_INTEL_PROMISCUOUS_MODE == oid) {
ieee->IntelPromiscuousModeInfo.bPromiscuousOn =
(bPromiscuousOn) ? (true) : (false);
ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame =
(bFilterSourceStationFrame) ? (true) : (false);
(bPromiscuousOn) ?
(rtllib_EnableIntelPromiscuousMode(dev, false)) :
(rtllib_DisableIntelPromiscuousMode(dev, false));
netdev_info(dev,
"=======>%s(), on = %d, filter src sta = %d\n",
__func__, bPromiscuousOn,
bFilterSourceStationFrame);
} else {
return -1;
}
return 0;
}
static int r8192_wx_get_PromiscuousMode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
down(&priv->wx_sem);
snprintf(extra, 45, "PromiscuousMode:%d, FilterSrcSTAFrame:%d",
ieee->IntelPromiscuousModeInfo.bPromiscuousOn,
ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame);
wrqu->data.length = strlen(extra) + 1;
up(&priv->wx_sem);
return 0;
}
#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
static iw_handler r8192_wx_handlers[] = {
IW_IOCTL(SIOCGIWNAME) = r8192_wx_get_name,
IW_IOCTL(SIOCSIWFREQ) = r8192_wx_set_freq,
IW_IOCTL(SIOCGIWFREQ) = r8192_wx_get_freq,
IW_IOCTL(SIOCSIWMODE) = r8192_wx_set_mode,
IW_IOCTL(SIOCGIWMODE) = r8192_wx_get_mode,
IW_IOCTL(SIOCSIWSENS) = r8192_wx_set_sens,
IW_IOCTL(SIOCGIWSENS) = r8192_wx_get_sens,
IW_IOCTL(SIOCGIWRANGE) = rtl8192_wx_get_range,
IW_IOCTL(SIOCSIWAP) = r8192_wx_set_wap,
IW_IOCTL(SIOCGIWAP) = r8192_wx_get_wap,
IW_IOCTL(SIOCSIWSCAN) = r8192_wx_set_scan,
IW_IOCTL(SIOCGIWSCAN) = r8192_wx_get_scan,
IW_IOCTL(SIOCSIWESSID) = r8192_wx_set_essid,
IW_IOCTL(SIOCGIWESSID) = r8192_wx_get_essid,
IW_IOCTL(SIOCSIWNICKN) = r8192_wx_set_nick,
IW_IOCTL(SIOCGIWNICKN) = r8192_wx_get_nick,
IW_IOCTL(SIOCSIWRATE) = r8192_wx_set_rate,
IW_IOCTL(SIOCGIWRATE) = r8192_wx_get_rate,
IW_IOCTL(SIOCSIWRTS) = r8192_wx_set_rts,
IW_IOCTL(SIOCGIWRTS) = r8192_wx_get_rts,
IW_IOCTL(SIOCSIWFRAG) = r8192_wx_set_frag,
IW_IOCTL(SIOCGIWFRAG) = r8192_wx_get_frag,
IW_IOCTL(SIOCSIWRETRY) = r8192_wx_set_retry,
IW_IOCTL(SIOCGIWRETRY) = r8192_wx_get_retry,
IW_IOCTL(SIOCSIWENCODE) = r8192_wx_set_enc,
IW_IOCTL(SIOCGIWENCODE) = r8192_wx_get_enc,
IW_IOCTL(SIOCSIWPOWER) = r8192_wx_set_power,
IW_IOCTL(SIOCGIWPOWER) = r8192_wx_get_power,
IW_IOCTL(SIOCSIWGENIE) = r8192_wx_set_gen_ie,
IW_IOCTL(SIOCGIWGENIE) = r8192_wx_get_gen_ie,
IW_IOCTL(SIOCSIWMLME) = r8192_wx_set_mlme,
IW_IOCTL(SIOCSIWAUTH) = r8192_wx_set_auth,
IW_IOCTL(SIOCSIWENCODEEXT) = r8192_wx_set_enc_ext,
};
/* the following rule need to be following,
* Odd : get (world access),
* even : set (root access)
*/
static const struct iw_priv_args r8192_private_args[] = {
{
SIOCIWFIRSTPRIV + 0x0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_debugflag"
}, {
SIOCIWFIRSTPRIV + 0x1,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
}, {
SIOCIWFIRSTPRIV + 0x2,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
}, {
SIOCIWFIRSTPRIV + 0x3,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset"
}, {
SIOCIWFIRSTPRIV + 0x4,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "force_mic_error"
}, {
SIOCIWFIRSTPRIV + 0x5,
IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_INT|IW_PRIV_SIZE_FIXED|1,
"firm_ver"
}, {
SIOCIWFIRSTPRIV + 0x6,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
"set_power"
}, {
SIOCIWFIRSTPRIV + 0x9,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
"radio"
}, {
SIOCIWFIRSTPRIV + 0xa,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
"lps_interv"
}, {
SIOCIWFIRSTPRIV + 0xb,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
"lps_force"
}, {
SIOCIWFIRSTPRIV + 0xc,
0, IW_PRIV_TYPE_CHAR|2047, "adhoc_peer_list"
}, {
SIOCIWFIRSTPRIV + 0x16,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "setpromisc"
}, {
SIOCIWFIRSTPRIV + 0x17,
0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 45, "getpromisc"
}
};
static iw_handler r8192_private_handler[] = {
(iw_handler)r8192_wx_set_debugflag, /*SIOCIWSECONDPRIV*/
(iw_handler)r8192_wx_set_scan_type,
(iw_handler)r8192_wx_set_rawtx,
(iw_handler)r8192_wx_force_reset,
(iw_handler)r8192_wx_force_mic_error,
(iw_handler)r8191se_wx_get_firm_version,
(iw_handler)r8192_wx_adapter_power_status,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)r8192se_wx_set_radio,
(iw_handler)r8192se_wx_set_lps_awake_interval,
(iw_handler)r8192se_wx_set_force_lps,
(iw_handler)r8192_wx_get_adhoc_peers,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)NULL,
(iw_handler)r8192_wx_set_PromiscuousMode,
(iw_handler)r8192_wx_get_PromiscuousMode,
};
static struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
struct iw_statistics *wstats = &priv->wstats;
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
if (ieee->state < RTLLIB_LINKED) {
wstats->qual.qual = 10;
wstats->qual.level = 0;
wstats->qual.noise = -100;
wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
return wstats;
}
tmp_level = (&ieee->current_network)->stats.rssi;
tmp_qual = (&ieee->current_network)->stats.signal;
tmp_noise = (&ieee->current_network)->stats.noise;
wstats->qual.level = tmp_level;
wstats->qual.qual = tmp_qual;
wstats->qual.noise = tmp_noise;
wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
return wstats;
}
const struct iw_handler_def r8192_wx_handlers_def = {
.standard = r8192_wx_handlers,
.num_standard = ARRAY_SIZE(r8192_wx_handlers),
.private = r8192_private_handler,
.num_private = ARRAY_SIZE(r8192_private_handler),
.num_private_args = sizeof(r8192_private_args) /
sizeof(struct iw_priv_args),
.get_wireless_stats = r8192_get_wireless_stats,
.private_args = (struct iw_priv_args *)r8192_private_args,
};
| gpl-2.0 |
tinyclub/linux-loongson-community | arch/arm64/crypto/aes-ce-ccm-glue.c | 483 | 7656 | /*
* aes-ccm-glue.c - AES-CCM transform for ARMv8 with Crypto Extensions
*
* Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/neon.h>
#include <asm/unaligned.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include "aes-ce-setkey.h"
static int num_rounds(struct crypto_aes_ctx *ctx)
{
/*
* # of rounds specified by AES:
* 128 bit key 10 rounds
* 192 bit key 12 rounds
* 256 bit key 14 rounds
* => n byte key => 6 + (n/4) rounds
*/
return 6 + ctx->key_length / 4;
}
asmlinkage void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
u32 *macp, u32 const rk[], u32 rounds);
asmlinkage void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
u32 const rk[], u32 rounds, u8 mac[],
u8 ctr[]);
asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
u32 const rk[], u32 rounds, u8 mac[],
u8 ctr[]);
asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
u32 rounds);
static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_aead_ctx(tfm);
int ret;
ret = ce_aes_expandkey(ctx, in_key, key_len);
if (!ret)
return 0;
tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
static int ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
if ((authsize & 1) || authsize < 4)
return -EINVAL;
return 0;
}
static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
__be32 *n = (__be32 *)&maciv[AES_BLOCK_SIZE - 8];
u32 l = req->iv[0] + 1;
/* verify that CCM dimension 'L' is set correctly in the IV */
if (l < 2 || l > 8)
return -EINVAL;
/* verify that msglen can in fact be represented in L bytes */
if (l < 4 && msglen >> (8 * l))
return -EOVERFLOW;
/*
* Even if the CCM spec allows L values of up to 8, the Linux cryptoapi
* uses a u32 type to represent msglen so the top 4 bytes are always 0.
*/
n[0] = 0;
n[1] = cpu_to_be32(msglen);
memcpy(maciv, req->iv, AES_BLOCK_SIZE - l);
/*
* Meaning of byte 0 according to CCM spec (RFC 3610/NIST 800-38C)
* - bits 0..2 : max # of bytes required to represent msglen, minus 1
* (already set by caller)
* - bits 3..5 : size of auth tag (1 => 4 bytes, 2 => 6 bytes, etc)
* - bit 6 : indicates presence of authenticate-only data
*/
maciv[0] |= (crypto_aead_authsize(aead) - 2) << 2;
if (req->assoclen)
maciv[0] |= 0x40;
memset(&req->iv[AES_BLOCK_SIZE - l], 0, l);
return 0;
}
static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
struct __packed { __be16 l; __be32 h; u16 len; } ltag;
struct scatter_walk walk;
u32 len = req->assoclen;
u32 macp = 0;
/* prepend the AAD with a length tag */
if (len < 0xff00) {
ltag.l = cpu_to_be16(len);
ltag.len = 2;
} else {
ltag.l = cpu_to_be16(0xfffe);
put_unaligned_be32(len, <ag.h);
ltag.len = 6;
}
ce_aes_ccm_auth_data(mac, (u8 *)<ag, ltag.len, &macp, ctx->key_enc,
num_rounds(ctx));
scatterwalk_start(&walk, req->assoc);
do {
u32 n = scatterwalk_clamp(&walk, len);
u8 *p;
if (!n) {
scatterwalk_start(&walk, sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
p = scatterwalk_map(&walk);
ce_aes_ccm_auth_data(mac, p, n, &macp, ctx->key_enc,
num_rounds(ctx));
len -= n;
scatterwalk_unmap(p);
scatterwalk_advance(&walk, n);
scatterwalk_done(&walk, 0, len);
} while (len);
}
static int ccm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
struct blkcipher_desc desc = { .info = req->iv };
struct blkcipher_walk walk;
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen;
int err;
err = ccm_init_mac(req, mac, len);
if (err)
return err;
kernel_neon_begin_partial(6);
if (req->assoclen)
ccm_calculate_auth_mac(req, mac);
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
blkcipher_walk_init(&walk, req->dst, req->src, len);
err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
AES_BLOCK_SIZE);
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
if (walk.nbytes == len)
tail = 0;
ce_aes_ccm_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv);
len -= walk.nbytes - tail;
err = blkcipher_walk_done(&desc, &walk, tail);
}
if (!err)
ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
kernel_neon_end();
if (err)
return err;
/* copy authtag to end of dst */
scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
return 0;
}
static int ccm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
struct blkcipher_desc desc = { .info = req->iv };
struct blkcipher_walk walk;
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen - authsize;
int err;
err = ccm_init_mac(req, mac, len);
if (err)
return err;
kernel_neon_begin_partial(6);
if (req->assoclen)
ccm_calculate_auth_mac(req, mac);
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
blkcipher_walk_init(&walk, req->dst, req->src, len);
err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
AES_BLOCK_SIZE);
while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
if (walk.nbytes == len)
tail = 0;
ce_aes_ccm_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes - tail, ctx->key_enc,
num_rounds(ctx), mac, walk.iv);
len -= walk.nbytes - tail;
err = blkcipher_walk_done(&desc, &walk, tail);
}
if (!err)
ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx));
kernel_neon_end();
if (err)
return err;
/* compare calculated auth tag with the stored one */
scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
authsize, 0);
if (memcmp(mac, buf, authsize))
return -EBADMSG;
return 0;
}
static struct crypto_alg ccm_aes_alg = {
.cra_name = "ccm(aes)",
.cra_driver_name = "ccm-aes-ce",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_AEAD,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
.cra_aead = {
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = AES_BLOCK_SIZE,
.setkey = ccm_setkey,
.setauthsize = ccm_setauthsize,
.encrypt = ccm_encrypt,
.decrypt = ccm_decrypt,
}
};
static int __init aes_mod_init(void)
{
if (!(elf_hwcap & HWCAP_AES))
return -ENODEV;
return crypto_register_alg(&ccm_aes_alg);
}
static void __exit aes_mod_exit(void)
{
crypto_unregister_alg(&ccm_aes_alg);
}
module_init(aes_mod_init);
module_exit(aes_mod_exit);
MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("ccm(aes)");
| gpl-2.0 |
Kcilorak/s2110_3.0.8_kernel | drivers/gpu/drm/i915/intel_hdmi.c | 739 | 12643 | /*
* Copyright 2006 Dave Airlie <airlied@linux.ie>
* Copyright © 2006-2009 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
int force_audio;
};
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
}
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_hdmi, base);
}
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
{
uint8_t *data = (uint8_t *)avi_if;
uint8_t sum = 0;
unsigned i;
avi_if->checksum = 0;
avi_if->ecc = 0;
for (i = 0; i < sizeof(*avi_if); i++)
sum += data[i];
avi_if->checksum = 0x100 - sum;
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
uint32_t *data = (uint32_t *)&avi_if;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 port;
unsigned i;
if (!intel_hdmi->has_hdmi_sink)
return;
/* XXX first guess at handling video port, is this corrent? */
if (intel_hdmi->sdvox_reg == SDVOB)
port = VIDEO_DIP_PORT_B;
else if (intel_hdmi->sdvox_reg == SDVOC)
port = VIDEO_DIP_PORT_C;
else
return;
I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
intel_dip_infoframe_csum(&avi_if);
for (i = 0; i < sizeof(avi_if); i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
VIDEO_DIP_ENABLE_AVI);
}
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
/* Required on CPT */
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
if (intel_hdmi->has_audio) {
sdvox |= SDVO_AUDIO_ENABLE;
sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
}
if (intel_crtc->pipe == 1) {
if (HAS_PCH_CPT(dev))
sdvox |= PORT_TRANS_B_SEL_CPT;
else
sdvox |= SDVO_PIPE_B_SELECT;
}
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
intel_hdmi_set_avi_infoframe(encoder);
}
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
temp = I915_READ(intel_hdmi->sdvox_reg);
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->sdvox_reg);
}
if (mode != DRM_MODE_DPMS_ON) {
temp &= ~SDVO_ENABLE;
} else {
temp |= SDVO_ENABLE;
}
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
}
}
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
return MODE_OK;
}
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
}
if (status == connector_status_connected) {
if (intel_hdmi->force_audio)
intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
}
return status;
}
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
/* We should parse the EDID data and find out if it's an HDMI sink so
* we can send audio to it.
*/
return intel_ddc_get_modes(connector,
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
}
static bool
intel_hdmi_detect_audio(struct drm_connector *connector)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
bool has_audio = false;
edid = drm_get_edid(connector,
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
}
return has_audio;
}
static int
intel_hdmi_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
ret = drm_connector_property_set_value(connector, property, val);
if (ret)
return ret;
if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
if (i == intel_hdmi->force_audio)
return 0;
intel_hdmi->force_audio = i;
if (i == 0)
has_audio = intel_hdmi_detect_audio(connector);
else
has_audio = i > 0;
if (has_audio == intel_hdmi->has_audio)
return 0;
intel_hdmi->has_audio = has_audio;
goto done;
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_hdmi->color_range)
return 0;
intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
goto done;
}
return -EINVAL;
done:
if (intel_hdmi->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y,
crtc->fb);
}
return 0;
}
static void intel_hdmi_destroy(struct drm_connector *connector)
{
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
.prepare = intel_encoder_prepare,
.mode_set = intel_hdmi_mode_set,
.commit = intel_encoder_commit,
};
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
.destroy = intel_hdmi_destroy,
};
static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
.get_modes = intel_hdmi_get_modes,
.mode_valid = intel_hdmi_mode_valid,
.best_encoder = intel_best_encoder,
};
static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
.destroy = intel_encoder_destroy,
};
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
}
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_hdmi);
return;
}
intel_encoder = &intel_hdmi->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
intel_encoder->type = INTEL_OUTPUT_HDMI;
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
intel_hdmi->sdvox_reg = sdvox_reg;
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_hdmi_add_properties(intel_hdmi, connector);
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
*/
if (IS_G4X(dev) && !IS_GM45(dev)) {
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.