repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
willemwouters/orangepi_kernel | arch/frv/mb93090-mb00/pci-frv.c | 7235 | 6090 | /* pci-frv.c: low-level PCI access routines
*
* Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from the i386 equivalent stuff
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/errno.h>
#include "pci-frv.h"
/*
* We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the
* addresses to be allocated in the 0x000-0x0ff region
* modulo 0x400.
*
* Why? Because some silly external IO cards only decode
* the low 10 bits of the IO address. The 0x00-0xff region
* is reserved for motherboard devices that decode all 16
* bits, so it's ok to allocate at, say, 0x2800-0x28ff,
* but we want to try to avoid allocating at 0x2900-0x2bff
* which might have be mirrored at 0x0100-0x03ff..
*/
resource_size_t
pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
resource_size_t start = res->start;
if ((res->flags & IORESOURCE_IO) && (start & 0x300))
start = (start + 0x3ff) & ~0x3ff;
return start;
}
/*
* Handle resources of PCI devices. If the world were perfect, we could
* just allocate all the resource regions and do nothing more. It isn't.
* On the other hand, we cannot just re-allocate all devices, as it would
* require us to know lots of host bridge internals. So we attempt to
* keep as much of the original configuration as possible, but tweak it
* when it's found to be wrong.
*
* Known BIOS problems we have to work around:
* - I/O or memory regions not configured
* - regions configured, but not enabled in the command register
* - bogus I/O addresses above 64K used
* - expansion ROMs left enabled (this may sound harmless, but given
* the fact the PCI specs explicitly allow address decoders to be
* shared between expansion ROMs and other resource regions, it's
* at least dangerous)
*
* Our solution:
* (1) Allocate resources for all buses behind PCI-to-PCI bridges.
* This gives us fixed barriers on where we can allocate.
* (2) Allocate resources for all enabled devices. If there is
* a collision, just mark the resource as unallocated. Also
* disable expansion ROMs during this step.
* (3) Try to allocate resources for disabled devices. If the
* resources were assigned correctly, everything goes well,
* if they weren't, they won't disturb allocation of other
* resources.
* (4) Assign new addresses to resources which were either
* not configured at all or misconfigured. If explicitly
* requested by the user, configure expansion ROM address
* as well.
*/
static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
{
struct list_head *ln;
struct pci_bus *bus;
struct pci_dev *dev;
int idx;
struct resource *r;
/* Depth-First Search on bus tree */
for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
bus = pci_bus_b(ln);
if ((dev = bus->self)) {
for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
r = &dev->resource[idx];
if (!r->start)
continue;
pci_claim_resource(dev, idx);
}
}
pcibios_allocate_bus_resources(&bus->children);
}
}
static void __init pcibios_allocate_resources(int pass)
{
struct pci_dev *dev = NULL;
int idx, disabled;
u16 command;
struct resource *r;
for_each_pci_dev(dev) {
pci_read_config_word(dev, PCI_COMMAND, &command);
for(idx = 0; idx < 6; idx++) {
r = &dev->resource[idx];
if (r->parent) /* Already allocated */
continue;
if (!r->start) /* Address not assigned at all */
continue;
if (r->flags & IORESOURCE_IO)
disabled = !(command & PCI_COMMAND_IO);
else
disabled = !(command & PCI_COMMAND_MEMORY);
if (pass == disabled) {
DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
r->start, r->end, r->flags, disabled, pass);
if (pci_claim_resource(dev, idx) < 0) {
/* We'll assign a new address later */
r->end -= r->start;
r->start = 0;
}
}
}
if (!pass) {
r = &dev->resource[PCI_ROM_RESOURCE];
if (r->flags & IORESOURCE_ROM_ENABLE) {
/* Turn the ROM off, leave the resource region, but keep it unregistered. */
u32 reg;
DBG("PCI: Switching off ROM of %s\n", pci_name(dev));
r->flags &= ~IORESOURCE_ROM_ENABLE;
pci_read_config_dword(dev, dev->rom_base_reg, ®);
pci_write_config_dword(dev, dev->rom_base_reg, reg & ~PCI_ROM_ADDRESS_ENABLE);
}
}
}
}
static void __init pcibios_assign_resources(void)
{
struct pci_dev *dev = NULL;
int idx;
struct resource *r;
for_each_pci_dev(dev) {
int class = dev->class >> 8;
/* Don't touch classless devices and host bridges */
if (!class || class == PCI_CLASS_BRIDGE_HOST)
continue;
for(idx=0; idx<6; idx++) {
r = &dev->resource[idx];
/*
* Don't touch IDE controllers and I/O ports of video cards!
*/
if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
(class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
continue;
/*
* We shall assign a new address to this resource, either because
* the BIOS forgot to do so or because we have decided the old
* address was unusable for some reason.
*/
if (!r->start && r->end)
pci_assign_resource(dev, idx);
}
if (pci_probe & PCI_ASSIGN_ROMS) {
r = &dev->resource[PCI_ROM_RESOURCE];
r->end -= r->start;
r->start = 0;
if (r->end)
pci_assign_resource(dev, PCI_ROM_RESOURCE);
}
}
}
void __init pcibios_resource_survey(void)
{
DBG("PCI: Allocating resources\n");
pcibios_allocate_bus_resources(&pci_root_buses);
pcibios_allocate_resources(0);
pcibios_allocate_resources(1);
pcibios_assign_resources();
}
| gpl-2.0 |
Docker-J/Sail_STOCK | net/ipv4/sysfs_net_ipv4.c | 8003 | 2305 | /*
* net/ipv4/sysfs_net_ipv4.c
*
* sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
*
* Copyright (C) 2008 Google, Inc.
*
* Robert Love <rlove@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/init.h>
#include <net/tcp.h>
#define CREATE_IPV4_FILE(_name, _var) \
static ssize_t _name##_show(struct kobject *kobj, \
struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%d\n", _var); \
} \
static ssize_t _name##_store(struct kobject *kobj, \
struct kobj_attribute *attr, \
const char *buf, size_t count) \
{ \
int val, ret; \
ret = sscanf(buf, "%d", &val); \
if (ret != 1) \
return -EINVAL; \
if (val < 0) \
return -EINVAL; \
_var = val; \
return count; \
} \
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
static struct attribute *ipv4_attrs[] = {
&tcp_wmem_min_attr.attr,
&tcp_wmem_def_attr.attr,
&tcp_wmem_max_attr.attr,
&tcp_rmem_min_attr.attr,
&tcp_rmem_def_attr.attr,
&tcp_rmem_max_attr.attr,
NULL
};
static struct attribute_group ipv4_attr_group = {
.attrs = ipv4_attrs,
};
static __init int sysfs_ipv4_init(void)
{
struct kobject *ipv4_kobject;
int ret;
ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
if (!ipv4_kobject)
return -ENOMEM;
ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
if (ret) {
kobject_put(ipv4_kobject);
return ret;
}
return 0;
}
subsys_initcall(sysfs_ipv4_init);
| gpl-2.0 |
Klozz/XPerience_Kernel_msm8226_mmi_Falcon | arch/mips/kernel/spinlock_test.c | 8771 | 2381 | #include <linux/init.h>
#include <linux/kthread.h>
#include <linux/hrtimer.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/spinlock.h>
static int ss_get(void *data, u64 *val)
{
ktime_t start, finish;
int loops;
int cont;
DEFINE_RAW_SPINLOCK(ss_spin);
loops = 1000000;
cont = 1;
start = ktime_get();
while (cont) {
raw_spin_lock(&ss_spin);
loops--;
if (loops == 0)
cont = 0;
raw_spin_unlock(&ss_spin);
}
finish = ktime_get();
*val = ktime_us_delta(finish, start);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n");
struct spin_multi_state {
raw_spinlock_t lock;
atomic_t start_wait;
atomic_t enter_wait;
atomic_t exit_wait;
int loops;
};
struct spin_multi_per_thread {
struct spin_multi_state *state;
ktime_t start;
};
static int multi_other(void *data)
{
int loops;
int cont;
struct spin_multi_per_thread *pt = data;
struct spin_multi_state *s = pt->state;
loops = s->loops;
cont = 1;
atomic_dec(&s->enter_wait);
while (atomic_read(&s->enter_wait))
; /* spin */
pt->start = ktime_get();
atomic_dec(&s->start_wait);
while (atomic_read(&s->start_wait))
; /* spin */
while (cont) {
raw_spin_lock(&s->lock);
loops--;
if (loops == 0)
cont = 0;
raw_spin_unlock(&s->lock);
}
atomic_dec(&s->exit_wait);
while (atomic_read(&s->exit_wait))
; /* spin */
return 0;
}
static int multi_get(void *data, u64 *val)
{
ktime_t finish;
struct spin_multi_state ms;
struct spin_multi_per_thread t1, t2;
ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get");
ms.loops = 1000000;
atomic_set(&ms.start_wait, 2);
atomic_set(&ms.enter_wait, 2);
atomic_set(&ms.exit_wait, 2);
t1.state = &ms;
t2.state = &ms;
kthread_run(multi_other, &t2, "multi_get");
multi_other(&t1);
finish = ktime_get();
*val = ktime_us_delta(finish, t1.start);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
extern struct dentry *mips_debugfs_dir;
static int __init spinlock_test(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_file("spin_single", S_IRUGO,
mips_debugfs_dir, NULL,
&fops_ss);
if (!d)
return -ENOMEM;
d = debugfs_create_file("spin_multi", S_IRUGO,
mips_debugfs_dir, NULL,
&fops_multi);
if (!d)
return -ENOMEM;
return 0;
}
device_initcall(spinlock_test);
| gpl-2.0 |
blkredstarV/kernel_asus_moorefield | sound/core/memalloc.c | 68 | 13849 | /*
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Takashi Iwai <tiwai@suse.de>
*
* Generic memory allocators
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <sound/memalloc.h>
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>, Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Memory allocator for ALSA system.");
MODULE_LICENSE("GPL");
/*
*/
static DEFINE_MUTEX(list_mutex);
static LIST_HEAD(mem_list_head);
/* buffer preservation list */
struct snd_mem_list {
struct snd_dma_buffer buffer;
unsigned int id;
struct list_head list;
};
/* id for pre-allocated buffers */
#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
/*
*
* Generic memory allocators
*
*/
static long snd_allocated_pages; /* holding the number of allocated pages */
static inline void inc_snd_pages(int order)
{
snd_allocated_pages += 1 << order;
}
static inline void dec_snd_pages(int order)
{
snd_allocated_pages -= 1 << order;
}
/**
* snd_malloc_pages - allocate pages with the given size
* @size: the size to allocate in bytes
* @gfp_flags: the allocation conditions, GFP_XXX
*
* Allocates the physically contiguous pages with the given size.
*
* Return: The pointer of the buffer, or %NULL if no enough memory.
*/
void *snd_malloc_pages(size_t size, gfp_t gfp_flags)
{
int pg;
void *res;
if (WARN_ON(!size))
return NULL;
if (WARN_ON(!gfp_flags))
return NULL;
gfp_flags |= __GFP_COMP; /* compound page lets parts be mapped */
pg = get_order(size);
if ((res = (void *) __get_free_pages(gfp_flags, pg)) != NULL)
inc_snd_pages(pg);
return res;
}
/**
* snd_free_pages - release the pages
* @ptr: the buffer pointer to release
* @size: the allocated buffer size
*
* Releases the buffer allocated via snd_malloc_pages().
*/
void snd_free_pages(void *ptr, size_t size)
{
int pg;
if (ptr == NULL)
return;
pg = get_order(size);
dec_snd_pages(pg);
free_pages((unsigned long) ptr, pg);
}
/*
*
* Bus-specific memory allocators
*
*/
#ifdef CONFIG_HAS_DMA
/* allocate the coherent DMA pages */
static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
{
int pg;
void *res;
gfp_t gfp_flags;
if (WARN_ON(!dma))
return NULL;
pg = get_order(size);
gfp_flags = GFP_KERNEL
| __GFP_COMP /* compound page lets parts be mapped */
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
res = dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
if (res != NULL)
inc_snd_pages(pg);
return res;
}
/* free the coherent DMA pages */
static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
dma_addr_t dma)
{
int pg;
if (ptr == NULL)
return;
pg = get_order(size);
dec_snd_pages(pg);
dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
}
#endif /* CONFIG_HAS_DMA */
/*
*
* ALSA generic memory management
*
*/
/**
* snd_dma_alloc_pages - allocate the buffer area according to the given type
* @type: the DMA buffer type
* @device: the device pointer
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Calls the memory-allocator function for the corresponding
* buffer type.
*
* Return: Zero if the buffer with the given size is allocated successfully,
* otherwise a negative value on error.
*/
int snd_dma_alloc_pages(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
if (WARN_ON(!size))
return -ENXIO;
if (WARN_ON(!dmab))
return -ENXIO;
dmab->dev.type = type;
dmab->dev.dev = device;
dmab->bytes = 0;
switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
#ifdef CONFIG_XEN
dmab->area = snd_malloc_pages(size, __GFP_DMA32);
#else
dmab->area = snd_malloc_pages(size,
(__force gfp_t)(unsigned long)device);
#endif
dmab->addr = 0;
break;
#ifdef CONFIG_HAS_DMA
case SNDRV_DMA_TYPE_DEV:
dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_malloc_sgbuf_pages(device, size, dmab, NULL);
break;
#endif
default:
printk(KERN_ERR "snd-malloc: invalid device type %d\n", type);
dmab->area = NULL;
dmab->addr = 0;
return -ENXIO;
}
if (! dmab->area)
return -ENOMEM;
dmab->bytes = size;
return 0;
}
/**
* snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
* @type: the DMA buffer type
* @device: the device pointer
* @size: the buffer size to allocate
* @dmab: buffer allocation record to store the allocated data
*
* Calls the memory-allocator function for the corresponding
* buffer type. When no space is left, this function reduces the size and
* tries to allocate again. The size actually allocated is stored in
* res_size argument.
*
* Return: Zero if the buffer with the given size is allocated successfully,
* otherwise a negative value on error.
*/
int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
struct snd_dma_buffer *dmab)
{
int err;
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
size_t aligned_size;
if (err != -ENOMEM)
return err;
if (size <= PAGE_SIZE)
return -ENOMEM;
aligned_size = PAGE_SIZE << get_order(size);
if (size != aligned_size)
size = aligned_size;
else
size >>= 1;
}
if (! dmab->area)
return -ENOMEM;
return 0;
}
/**
* snd_dma_free_pages - release the allocated buffer
* @dmab: the buffer allocation record to release
*
* Releases the allocated buffer via snd_dma_alloc_pages().
*/
void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{
switch (dmab->dev.type) {
case SNDRV_DMA_TYPE_CONTINUOUS:
snd_free_pages(dmab->area, dmab->bytes);
break;
#ifdef CONFIG_HAS_DMA
case SNDRV_DMA_TYPE_DEV:
snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
snd_free_sgbuf_pages(dmab);
break;
#endif
default:
printk(KERN_ERR "snd-malloc: invalid device type %d\n", dmab->dev.type);
}
}
/**
* snd_dma_get_reserved - get the reserved buffer for the given device
* @dmab: the buffer allocation record to store
* @id: the buffer id
*
* Looks for the reserved-buffer list and re-uses if the same buffer
* is found in the list. When the buffer is found, it's removed from the free list.
*
* Return: The size of buffer if the buffer is found, or zero if not found.
*/
size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
{
struct snd_mem_list *mem;
if (WARN_ON(!dmab))
return 0;
mutex_lock(&list_mutex);
list_for_each_entry(mem, &mem_list_head, list) {
if (mem->id == id &&
(mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
struct device *dev = dmab->dev.dev;
list_del(&mem->list);
*dmab = mem->buffer;
if (dmab->dev.dev == NULL)
dmab->dev.dev = dev;
kfree(mem);
mutex_unlock(&list_mutex);
return dmab->bytes;
}
}
mutex_unlock(&list_mutex);
return 0;
}
/**
* snd_dma_reserve_buf - reserve the buffer
* @dmab: the buffer to reserve
* @id: the buffer id
*
* Reserves the given buffer as a reserved buffer.
*
* Return: Zero if successful, or a negative code on error.
*/
int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
{
struct snd_mem_list *mem;
if (WARN_ON(!dmab))
return -EINVAL;
mem = kmalloc(sizeof(*mem), GFP_KERNEL);
if (! mem)
return -ENOMEM;
mutex_lock(&list_mutex);
mem->buffer = *dmab;
mem->id = id;
list_add_tail(&mem->list, &mem_list_head);
mutex_unlock(&list_mutex);
return 0;
}
/*
* purge all reserved buffers
*/
static void free_all_reserved_pages(void)
{
struct list_head *p;
struct snd_mem_list *mem;
mutex_lock(&list_mutex);
while (! list_empty(&mem_list_head)) {
p = mem_list_head.next;
mem = list_entry(p, struct snd_mem_list, list);
list_del(p);
snd_dma_free_pages(&mem->buffer);
kfree(mem);
}
mutex_unlock(&list_mutex);
}
#ifdef CONFIG_PROC_FS
/*
* proc file interface
*/
#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
static struct proc_dir_entry *snd_mem_proc;
static int snd_mem_proc_read(struct seq_file *seq, void *offset)
{
long pages = snd_allocated_pages >> (PAGE_SHIFT-12);
struct snd_mem_list *mem;
int devno;
static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" };
mutex_lock(&list_mutex);
seq_printf(seq, "pages : %li bytes (%li pages per %likB)\n",
pages * PAGE_SIZE, pages, PAGE_SIZE / 1024);
devno = 0;
list_for_each_entry(mem, &mem_list_head, list) {
devno++;
seq_printf(seq, "buffer %d : ID %08x : type %s\n",
devno, mem->id, types[mem->buffer.dev.type]);
seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
(unsigned long)mem->buffer.addr,
(int)mem->buffer.bytes);
}
mutex_unlock(&list_mutex);
return 0;
}
static int snd_mem_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, snd_mem_proc_read, NULL);
}
/* FIXME: for pci only - other bus? */
#ifdef CONFIG_PCI
#define gettoken(bufp) strsep(bufp, " \t\n")
static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
size_t count, loff_t * ppos)
{
char buf[128];
char *token, *p;
if (count > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
buf[count] = '\0';
p = buf;
token = gettoken(&p);
if (! token || *token == '#')
return count;
if (strcmp(token, "add") == 0) {
char *endp;
int vendor, device, size, buffers;
long mask;
int i, alloced;
struct pci_dev *pci;
if ((token = gettoken(&p)) == NULL ||
(vendor = simple_strtol(token, NULL, 0)) <= 0 ||
(token = gettoken(&p)) == NULL ||
(device = simple_strtol(token, NULL, 0)) <= 0 ||
(token = gettoken(&p)) == NULL ||
(mask = simple_strtol(token, NULL, 0)) < 0 ||
(token = gettoken(&p)) == NULL ||
(size = memparse(token, &endp)) < 64*1024 ||
size > 16*1024*1024 /* too big */ ||
(token = gettoken(&p)) == NULL ||
(buffers = simple_strtol(token, NULL, 0)) <= 0 ||
buffers > 4) {
printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
return count;
}
vendor &= 0xffff;
device &= 0xffff;
alloced = 0;
pci = NULL;
while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
if (mask > 0 && mask < 0xffffffff) {
if (pci_set_dma_mask(pci, mask) < 0 ||
pci_set_consistent_dma_mask(pci, mask) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
pci_dev_put(pci);
return count;
}
}
for (i = 0; i < buffers; i++) {
struct snd_dma_buffer dmab;
memset(&dmab, 0, sizeof(dmab));
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
pci_dev_put(pci);
return count;
}
snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
}
alloced++;
}
if (! alloced) {
for (i = 0; i < buffers; i++) {
struct snd_dma_buffer dmab;
memset(&dmab, 0, sizeof(dmab));
/* FIXME: We can allocate only in ZONE_DMA
* without a device pointer!
*/
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
size, &dmab) < 0) {
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
break;
}
snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
}
}
} else if (strcmp(token, "erase") == 0)
/* FIXME: need for releasing each buffer chunk? */
free_all_reserved_pages();
else
printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
return count;
}
#endif /* CONFIG_PCI */
static const struct file_operations snd_mem_proc_fops = {
.owner = THIS_MODULE,
.open = snd_mem_proc_open,
.read = seq_read,
#ifdef CONFIG_PCI
.write = snd_mem_proc_write,
#endif
.llseek = seq_lseek,
.release = single_release,
};
#endif /* CONFIG_PROC_FS */
/*
* module entry
*/
static int __init snd_mem_init(void)
{
#ifdef CONFIG_PROC_FS
snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
&snd_mem_proc_fops);
#endif
return 0;
}
static void __exit snd_mem_exit(void)
{
remove_proc_entry(SND_MEM_PROC_FILE, NULL);
free_all_reserved_pages();
if (snd_allocated_pages > 0)
printk(KERN_ERR "snd-malloc: Memory leak? pages not freed = %li\n", snd_allocated_pages);
}
module_init(snd_mem_init)
module_exit(snd_mem_exit)
/*
* exports
*/
EXPORT_SYMBOL(snd_dma_alloc_pages);
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
EXPORT_SYMBOL(snd_dma_free_pages);
EXPORT_SYMBOL(snd_dma_get_reserved_buf);
EXPORT_SYMBOL(snd_dma_reserve_buf);
EXPORT_SYMBOL(snd_malloc_pages);
EXPORT_SYMBOL(snd_free_pages);
| gpl-2.0 |
GaloisInc/linux-deadline | sound/soc/s3c24xx/neo1973_gta02_wm8753.c | 68 | 13029 | /*
* neo1973_gta02_wm8753.c -- SoC audio for Openmoko Freerunner(GTA02)
*
* Copyright 2007 Openmoko Inc
* Author: Graeme Gregory <graeme@openmoko.org>
* Copyright 2007 Wolfson Microelectronics PLC.
* Author: Graeme Gregory <linux@wolfsonmicro.com>
* Copyright 2009 Wolfson Microelectronics
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <asm/mach-types.h>
#include <plat/regs-iis.h>
#include <mach/regs-clock.h>
#include <asm/io.h>
#include <mach/gta02.h>
#include "../codecs/wm8753.h"
#include "s3c-dma.h"
#include "s3c24xx-i2s.h"
static struct snd_soc_card neo1973_gta02;
static int neo1973_gta02_hifi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int pll_out = 0, bclk = 0;
int ret = 0;
unsigned long iis_clkrate;
iis_clkrate = s3c24xx_i2s_get_clockrate();
switch (params_rate(params)) {
case 8000:
case 16000:
pll_out = 12288000;
break;
case 48000:
bclk = WM8753_BCLK_DIV_4;
pll_out = 12288000;
break;
case 96000:
bclk = WM8753_BCLK_DIV_2;
pll_out = 12288000;
break;
case 11025:
bclk = WM8753_BCLK_DIV_16;
pll_out = 11289600;
break;
case 22050:
bclk = WM8753_BCLK_DIV_8;
pll_out = 11289600;
break;
case 44100:
bclk = WM8753_BCLK_DIV_4;
pll_out = 11289600;
break;
case 88200:
bclk = WM8753_BCLK_DIV_2;
pll_out = 11289600;
break;
}
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai,
SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai,
SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
/* set MCLK division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
S3C2410_IISMOD_32FS);
if (ret < 0)
return ret;
/* set codec BCLK division for sample rate */
ret = snd_soc_dai_set_clkdiv(codec_dai,
WM8753_BCLKDIV, bclk);
if (ret < 0)
return ret;
/* set prescaler division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
S3C24XX_PRESCALE(4, 4));
if (ret < 0)
return ret;
/* codec PLL input is PCLK/4 */
ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0,
iis_clkrate / 4, pll_out);
if (ret < 0)
return ret;
return 0;
}
static int neo1973_gta02_hifi_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
/* disable the PLL */
return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0, 0);
}
/*
* Neo1973 WM8753 HiFi DAI opserations.
*/
static struct snd_soc_ops neo1973_gta02_hifi_ops = {
.hw_params = neo1973_gta02_hifi_hw_params,
.hw_free = neo1973_gta02_hifi_hw_free,
};
static int neo1973_gta02_voice_hw_params(
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
unsigned int pcmdiv = 0;
int ret = 0;
unsigned long iis_clkrate;
iis_clkrate = s3c24xx_i2s_get_clockrate();
if (params_rate(params) != 8000)
return -EINVAL;
if (params_channels(params) != 1)
return -EINVAL;
pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
/* todo: gg check mode (DSP_B) against CSR datasheet */
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK,
12288000, SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
/* set codec PCM division for sample rate */
ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV,
pcmdiv);
if (ret < 0)
return ret;
/* configure and enable PLL for 12.288MHz output */
ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0,
iis_clkrate / 4, 12288000);
if (ret < 0)
return ret;
return 0;
}
static int neo1973_gta02_voice_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
/* disable the PLL */
return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0);
}
static struct snd_soc_ops neo1973_gta02_voice_ops = {
.hw_params = neo1973_gta02_voice_hw_params,
.hw_free = neo1973_gta02_voice_hw_free,
};
#define LM4853_AMP 1
#define LM4853_SPK 2
static u8 lm4853_state;
/* This has no effect, it exists only to maintain compatibility with
* existing ALSA state files.
*/
static int lm4853_set_state(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int val = ucontrol->value.integer.value[0];
if (val)
lm4853_state |= LM4853_AMP;
else
lm4853_state &= ~LM4853_AMP;
return 0;
}
static int lm4853_get_state(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = lm4853_state & LM4853_AMP;
return 0;
}
static int lm4853_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int val = ucontrol->value.integer.value[0];
if (val) {
lm4853_state |= LM4853_SPK;
gpio_set_value(GTA02_GPIO_HP_IN, 0);
} else {
lm4853_state &= ~LM4853_SPK;
gpio_set_value(GTA02_GPIO_HP_IN, 1);
}
return 0;
}
static int lm4853_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = (lm4853_state & LM4853_SPK) >> 1;
return 0;
}
static int lm4853_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k,
int event)
{
gpio_set_value(GTA02_GPIO_AMP_SHUT, SND_SOC_DAPM_EVENT_OFF(event));
return 0;
}
static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Stereo Out", lm4853_event),
SND_SOC_DAPM_LINE("GSM Line Out", NULL),
SND_SOC_DAPM_LINE("GSM Line In", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Handset Mic", NULL),
SND_SOC_DAPM_SPK("Handset Spk", NULL),
};
/* example machine audio_mapnections */
static const struct snd_soc_dapm_route audio_map[] = {
/* Connections to the lm4853 amp */
{"Stereo Out", NULL, "LOUT1"},
{"Stereo Out", NULL, "ROUT1"},
/* Connections to the GSM Module */
{"GSM Line Out", NULL, "MONO1"},
{"GSM Line Out", NULL, "MONO2"},
{"RXP", NULL, "GSM Line In"},
{"RXN", NULL, "GSM Line In"},
/* Connections to Headset */
{"MIC1", NULL, "Mic Bias"},
{"Mic Bias", NULL, "Headset Mic"},
/* Call Mic */
{"MIC2", NULL, "Mic Bias"},
{"MIC2N", NULL, "Mic Bias"},
{"Mic Bias", NULL, "Handset Mic"},
/* Call Speaker */
{"Handset Spk", NULL, "LOUT2"},
{"Handset Spk", NULL, "ROUT2"},
/* Connect the ALC pins */
{"ACIN", NULL, "ACOP"},
};
static const struct snd_kcontrol_new wm8753_neo1973_gta02_controls[] = {
SOC_DAPM_PIN_SWITCH("Stereo Out"),
SOC_DAPM_PIN_SWITCH("GSM Line Out"),
SOC_DAPM_PIN_SWITCH("GSM Line In"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Handset Mic"),
SOC_DAPM_PIN_SWITCH("Handset Spk"),
/* This has no effect, it exists only to maintain compatibility with
* existing ALSA state files.
*/
SOC_SINGLE_EXT("Amp State Switch", 6, 0, 1, 0,
lm4853_get_state,
lm4853_set_state),
SOC_SINGLE_EXT("Amp Spk Switch", 7, 0, 1, 0,
lm4853_get_spk,
lm4853_set_spk),
};
/*
* This is an example machine initialisation for a wm8753 connected to a
* neo1973 GTA02.
*/
static int neo1973_gta02_wm8753_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
int err;
/* set up NC codec pins */
snd_soc_dapm_nc_pin(codec, "OUT3");
snd_soc_dapm_nc_pin(codec, "OUT4");
snd_soc_dapm_nc_pin(codec, "LINE1");
snd_soc_dapm_nc_pin(codec, "LINE2");
/* Add neo1973 gta02 specific widgets */
snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
ARRAY_SIZE(wm8753_dapm_widgets));
/* add neo1973 gta02 specific controls */
err = snd_soc_add_controls(codec, wm8753_neo1973_gta02_controls,
ARRAY_SIZE(wm8753_neo1973_gta02_controls));
if (err < 0)
return err;
/* set up neo1973 gta02 specific audio path audio_map */
snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
/* set endpoints to default off mode */
snd_soc_dapm_disable_pin(codec, "Stereo Out");
snd_soc_dapm_disable_pin(codec, "GSM Line Out");
snd_soc_dapm_disable_pin(codec, "GSM Line In");
snd_soc_dapm_disable_pin(codec, "Headset Mic");
snd_soc_dapm_disable_pin(codec, "Handset Mic");
snd_soc_dapm_disable_pin(codec, "Handset Spk");
/* allow audio paths from the GSM modem to run during suspend */
snd_soc_dapm_ignore_suspend(codec, "Stereo Out");
snd_soc_dapm_ignore_suspend(codec, "GSM Line Out");
snd_soc_dapm_ignore_suspend(codec, "GSM Line In");
snd_soc_dapm_ignore_suspend(codec, "Headset Mic");
snd_soc_dapm_ignore_suspend(codec, "Handset Mic");
snd_soc_dapm_ignore_suspend(codec, "Handset Spk");
snd_soc_dapm_sync(codec);
return 0;
}
/*
* BT Codec DAI
*/
static struct snd_soc_dai_driver bt_dai = {
.name = "bluetooth-dai",
.playback = {
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
.capture = {
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,},
};
static struct snd_soc_dai_link neo1973_gta02_dai[] = {
{ /* Hifi Playback - for similatious use with voice below */
.name = "WM8753",
.stream_name = "WM8753 HiFi",
.cpu_dai_name = "s3c24xx-i2s",
.codec_dai_name = "wm8753-hifi",
.init = neo1973_gta02_wm8753_init,
.platform_name = "s3c24xx-pcm-audio",
.codec_name = "wm8753-codec.0-0x1a",
.ops = &neo1973_gta02_hifi_ops,
},
{ /* Voice via BT */
.name = "Bluetooth",
.stream_name = "Voice",
.cpu_dai_name = "bluetooth-dai",
.codec_dai_name = "wm8753-voice",
.ops = &neo1973_gta02_voice_ops,
.codec_name = "wm8753-codec.0-0x1a",
.platform_name = "s3c24xx-pcm-audio",
},
};
static struct snd_soc_card neo1973_gta02 = {
.name = "neo1973-gta02",
.dai_link = neo1973_gta02_dai,
.num_links = ARRAY_SIZE(neo1973_gta02_dai),
};
static struct platform_device *neo1973_gta02_snd_device;
static int __init neo1973_gta02_init(void)
{
int ret;
if (!machine_is_neo1973_gta02()) {
printk(KERN_INFO
"Only GTA02 is supported by this ASoC driver\n");
return -ENODEV;
}
neo1973_gta02_snd_device = platform_device_alloc("soc-audio", -1);
if (!neo1973_gta02_snd_device)
return -ENOMEM;
/* register bluetooth DAI here */
ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, -1, &bt_dai);
if (ret) {
platform_device_put(neo1973_gta02_snd_device);
return ret;
}
platform_set_drvdata(neo1973_gta02_snd_device, &neo1973_gta02);
ret = platform_device_add(neo1973_gta02_snd_device);
if (ret) {
platform_device_put(neo1973_gta02_snd_device);
return ret;
}
/* Initialise GPIOs used by amp */
ret = gpio_request(GTA02_GPIO_HP_IN, "GTA02_HP_IN");
if (ret) {
pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_HP_IN);
goto err_unregister_device;
}
ret = gpio_direction_output(GTA02_GPIO_HP_IN, 1);
if (ret) {
pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_HP_IN);
goto err_free_gpio_hp_in;
}
ret = gpio_request(GTA02_GPIO_AMP_SHUT, "GTA02_AMP_SHUT");
if (ret) {
pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_AMP_SHUT);
goto err_free_gpio_hp_in;
}
ret = gpio_direction_output(GTA02_GPIO_AMP_SHUT, 1);
if (ret) {
pr_err("gta02_wm8753: Failed to configure GPIO %d\n", GTA02_GPIO_AMP_SHUT);
goto err_free_gpio_amp_shut;
}
return 0;
err_free_gpio_amp_shut:
gpio_free(GTA02_GPIO_AMP_SHUT);
err_free_gpio_hp_in:
gpio_free(GTA02_GPIO_HP_IN);
err_unregister_device:
platform_device_unregister(neo1973_gta02_snd_device);
return ret;
}
module_init(neo1973_gta02_init);
static void __exit neo1973_gta02_exit(void)
{
snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev, -1);
platform_device_unregister(neo1973_gta02_snd_device);
gpio_free(GTA02_GPIO_HP_IN);
gpio_free(GTA02_GPIO_AMP_SHUT);
}
module_exit(neo1973_gta02_exit);
/* Module information */
MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org");
MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973 GTA02");
MODULE_LICENSE("GPL");
| gpl-2.0 |
TheBootloader/android_kernel_samsung_msm8930-common | arch/arm/mach-msm/bms-batterydata-cane.c | 68 | 3522 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/mfd/pm8xxx/batterydata-lib.h>
static struct single_row_lut fcc_temp = {
.x = {-20, 0, 25, 40, 60},
.y = {1802, 1816, 1813, 1813, 1800},
.cols = 5
};
static struct single_row_lut fcc_sf = {
.x = {0},
.y = {100},
.cols = 1
};
static struct sf_lut rbatt_sf = {
.rows = 30,
.cols = 5,
.row_entries = {-20, 0, 25, 40, 60},
.percent = {100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20, 16, 13, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1},
.sf = {
{1161, 282, 100, 80, 71},
{1107, 284, 100, 80, 72},
{1052, 287, 101, 81, 72},
{989, 287, 105, 83, 74},
{968, 295, 108, 85, 76},
{881, 292, 116, 92, 78},
{870, 252, 116, 91, 80},
{861, 255, 125, 97, 82},
{868, 252, 118, 100, 86},
{895, 249, 101, 88, 81},
{933, 248, 97, 80, 73},
{971, 249, 98, 81, 73},
{1011, 256, 99, 83, 77},
{1054, 274, 100, 83, 79},
{1104, 298, 103, 83, 78},
{1161, 324, 107, 83, 72},
{1243, 347, 107, 83, 73},
{1383, 366, 104, 83, 74},
{1496, 390, 102, 80, 72},
{1423, 387, 101, 82, 74},
{1262, 380, 101, 83, 75},
{1284, 387, 104, 84, 77},
{1400, 405, 107, 87, 79},
{1523, 426, 112, 90, 81},
{1666, 446, 115, 92, 82},
{1854, 462, 116, 90, 79},
{2118, 491, 111, 87, 78},
{2512, 546, 116, 92, 82},
{3210, 664, 134, 107, 95},
{7092, 150448, 13159, 13954, 143},
}
};
static struct pc_temp_ocv_lut pc_temp_ocv = {
.rows = 31,
.cols = 5,
.temp = {-20, 0, 25, 40, 60},
.percent = {100, 95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20, 16, 13, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0},
.ocv = {
{4323, 4320, 4317, 4315, 4307},
{4218, 4240, 4247, 4247, 4243},
{4155, 4184, 4190, 4190, 4187},
{4100, 4132, 4137, 4137, 4133},
{4058, 4084, 4086, 4086, 4082},
{3955, 4026, 4044, 4043, 4035},
{3911, 3946, 3979, 3988, 3991},
{3869, 3913, 3952, 3955, 3952},
{3840, 3882, 3909, 3916, 3915},
{3821, 3851, 3864, 3870, 3871},
{3808, 3825, 3833, 3834, 3835},
{3796, 3804, 3811, 3812, 3812},
{3784, 3789, 3794, 3794, 3794},
{3771, 3780, 3780, 3779, 3779},
{3758, 3771, 3771, 3767, 3763},
{3744, 3758, 3762, 3754, 3740},
{3728, 3736, 3744, 3734, 3720},
{3714, 3714, 3715, 3707, 3694},
{3700, 3703, 3692, 3684, 3673},
{3689, 3698, 3688, 3680, 3669},
{3682, 3695, 3688, 3680, 3669},
{3674, 3692, 3686, 3679, 3668},
{3664, 3689, 3685, 3678, 3666},
{3652, 3684, 3684, 3676, 3664},
{3635, 3672, 3678, 3670, 3658},
{3612, 3646, 3661, 3650, 3634},
{3580, 3600, 3616, 3603, 3589},
{3535, 3528, 3550, 3538, 3526},
{3470, 3415, 3459, 3449, 3442},
{3361, 3248, 3309, 3300, 3302},
{3000, 3000, 3000, 3000, 3000}
}
};
struct bms_battery_data Samsung_8930_Serrano_1900mAh_data = {
.fcc = 1800,
.fcc_temp_lut = &fcc_temp,
.fcc_sf_lut = &fcc_sf,
.pc_temp_ocv_lut = &pc_temp_ocv,
.rbatt_sf_lut = &rbatt_sf,
.default_rbatt_mohm = 206,
};
| gpl-2.0 |
varigit/kernel-VAR-SOM-AMxx | drivers/acpi/acpica/evgpeinit.c | 324 | 13884 | /******************************************************************************
*
* Module Name: evgpeinit - System GPE initialization and update
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpeinit")
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
/*
* Note: History of _PRW support in ACPICA
*
* Originally (2000 - 2010), the GPE initialization code performed a walk of
* the entire namespace to execute the _PRW methods and detect all GPEs
* capable of waking the system.
*
* As of 10/2010, the _PRW method execution has been removed since it is
* actually unnecessary. The host OS must in fact execute all _PRW methods
* in order to identify the device/power-resource dependencies. We now put
* the onus on the host OS to identify the wake GPEs as part of this process
* and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This
* not only reduces the complexity of the ACPICA initialization code, but in
* some cases (on systems with very large namespaces) it should reduce the
* kernel boot time as well.
*/
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_initialize
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
*
******************************************************************************/
acpi_status acpi_ev_gpe_initialize(void)
{
u32 register_count0 = 0;
u32 register_count1 = 0;
u32 gpe_number_max = 0;
acpi_status status;
ACPI_FUNCTION_TRACE(ev_gpe_initialize);
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"Initializing General Purpose Events (GPEs):\n"));
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Initialize the GPE Block(s) defined in the FADT
*
* Why the GPE register block lengths are divided by 2: From the ACPI
* Spec, section "General-Purpose Event Registers", we have:
*
* "Each register block contains two registers of equal length
* GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
* GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
* The length of the GPE1_STS and GPE1_EN registers is equal to
* half the GPE1_LEN. If a generic register block is not supported
* then its respective block pointer and block length values in the
* FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
* to be the same size."
*/
/*
* Determine the maximum GPE number for this machine.
*
* Note: both GPE0 and GPE1 are optional, and either can exist without
* the other.
*
* If EITHER the register length OR the block address are zero, then that
* particular block is not supported.
*/
if (acpi_gbl_FADT.gpe0_block_length &&
acpi_gbl_FADT.xgpe0_block.address) {
/* GPE block 0 exists (has both length and address > 0) */
register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
gpe_number_max =
(register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
/* Install GPE Block 0 */
status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
&acpi_gbl_FADT.xgpe0_block,
register_count0, 0,
acpi_gbl_FADT.sci_interrupt,
&acpi_gbl_gpe_fadt_blocks[0]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not create GPE Block 0"));
}
}
if (acpi_gbl_FADT.gpe1_block_length &&
acpi_gbl_FADT.xgpe1_block.address) {
/* GPE block 1 exists (has both length and address > 0) */
register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
/* Check for GPE0/GPE1 overlap (if both banks exist) */
if ((register_count0) &&
(gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
ACPI_ERROR((AE_INFO,
"GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
"(GPE %u to %u) - Ignoring GPE1",
gpe_number_max, acpi_gbl_FADT.gpe1_base,
acpi_gbl_FADT.gpe1_base +
((register_count1 *
ACPI_GPE_REGISTER_WIDTH) - 1)));
/* Ignore GPE1 block by setting the register count to zero */
register_count1 = 0;
} else {
/* Install GPE Block 1 */
status =
acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
&acpi_gbl_FADT.xgpe1_block,
register_count1,
acpi_gbl_FADT.gpe1_base,
acpi_gbl_FADT.
sci_interrupt,
&acpi_gbl_gpe_fadt_blocks
[1]);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not create GPE Block 1"));
}
/*
* GPE0 and GPE1 do not have to be contiguous in the GPE number
* space. However, GPE0 always starts at GPE number zero.
*/
gpe_number_max = acpi_gbl_FADT.gpe1_base +
((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
}
}
/* Exit if there are no GPE registers */
if ((register_count0 + register_count1) == 0) {
/* GPEs are not required by ACPI, this is OK */
ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"There are no GPE blocks defined in the FADT\n"));
status = AE_OK;
goto cleanup;
}
cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_update_gpes
*
* PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
*
* RETURN: None
*
* DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
* result of a Load() or load_table() operation. If new GPE
* methods have been installed, register the new methods.
*
******************************************************************************/
void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
{
struct acpi_gpe_xrupt_info *gpe_xrupt_info;
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_walk_info walk_info;
acpi_status status = AE_OK;
/*
* Find any _Lxx/_Exx GPE methods that have just been loaded.
*
* Any GPEs that correspond to new _Lxx/_Exx methods are immediately
* enabled.
*
* Examine the namespace underneath each gpe_device within the
* gpe_block lists.
*/
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return;
}
walk_info.count = 0;
walk_info.owner_id = table_owner_id;
walk_info.execute_by_owner_id = TRUE;
/* Walk the interrupt level descriptor list */
gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
while (gpe_xrupt_info) {
/* Walk all Gpe Blocks attached to this interrupt level */
gpe_block = gpe_xrupt_info->gpe_block_list_head;
while (gpe_block) {
walk_info.gpe_block = gpe_block;
walk_info.gpe_device = gpe_block->node;
status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
walk_info.gpe_device,
ACPI_UINT32_MAX,
ACPI_NS_WALK_NO_UNLOCK,
acpi_ev_match_gpe_method,
NULL, &walk_info, NULL);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While decoding _Lxx/_Exx methods"));
}
gpe_block = gpe_block->next;
}
gpe_xrupt_info = gpe_xrupt_info->next;
}
if (walk_info.count) {
ACPI_INFO((AE_INFO, "Enabled %u new GPEs", walk_info.count));
}
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return;
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_match_gpe_method
*
* PARAMETERS: Callback from walk_namespace
*
* RETURN: Status
*
* DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
* control method under the _GPE portion of the namespace.
* Extract the name and GPE type from the object, saving this
* information for quick lookup during GPE dispatch. Allows a
* per-owner_id evaluation if execute_by_owner_id is TRUE in the
* walk_info parameter block.
*
* The name of each GPE control method is of the form:
* "_Lxx" or "_Exx", where:
* L - means that the GPE is level triggered
* E - means that the GPE is edge triggered
* xx - is the GPE number [in HEX]
*
* If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
* with that owner.
*
******************************************************************************/
acpi_status
acpi_ev_match_gpe_method(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
struct acpi_namespace_node *method_node =
ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
struct acpi_gpe_walk_info *walk_info =
ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_number;
char name[ACPI_NAME_SIZE + 1];
u8 type;
ACPI_FUNCTION_TRACE(ev_match_gpe_method);
/* Check if requested owner_id matches this owner_id */
if ((walk_info->execute_by_owner_id) &&
(method_node->owner_id != walk_info->owner_id)) {
return_ACPI_STATUS(AE_OK);
}
/*
* Match and decode the _Lxx and _Exx GPE method names
*
* 1) Extract the method name and null terminate it
*/
ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
name[ACPI_NAME_SIZE] = 0;
/* 2) Name must begin with an underscore */
if (name[0] != '_') {
return_ACPI_STATUS(AE_OK); /* Ignore this method */
}
/*
* 3) Edge/Level determination is based on the 2nd character
* of the method name
*/
switch (name[1]) {
case 'L':
type = ACPI_GPE_LEVEL_TRIGGERED;
break;
case 'E':
type = ACPI_GPE_EDGE_TRIGGERED;
break;
default:
/* Unknown method type, just ignore it */
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Ignoring unknown GPE method type: %s "
"(name not of form _Lxx or _Exx)", name));
return_ACPI_STATUS(AE_OK);
}
/* 4) The last two characters of the name are the hex GPE Number */
gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
if (gpe_number == ACPI_UINT32_MAX) {
/* Conversion failed; invalid method, just ignore it */
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Could not extract GPE number from name: %s "
"(name is not of form _Lxx or _Exx)", name));
return_ACPI_STATUS(AE_OK);
}
/* Ensure that we have a valid GPE number for this GPE block */
gpe_event_info =
acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
if (!gpe_event_info) {
/*
* This gpe_number is not valid for this GPE block, just ignore it.
* However, it may be valid for a different GPE block, since GPE0
* and GPE1 methods both appear under \_GPE.
*/
return_ACPI_STATUS(AE_OK);
}
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
/* If there is already a handler, ignore this GPE method */
return_ACPI_STATUS(AE_OK);
}
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD) {
/*
* If there is already a method, ignore this method. But check
* for a type mismatch (if both the _Lxx AND _Exx exist)
*/
if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
ACPI_ERROR((AE_INFO,
"For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
gpe_number, gpe_number, gpe_number));
}
return_ACPI_STATUS(AE_OK);
}
/* Disable the GPE in case it's been enabled already. */
(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
/*
* Add the GPE information from above to the gpe_event_info block for
* use during dispatch of this GPE.
*/
gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK);
gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
gpe_event_info->dispatch.method_node = method_node;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
"Registered GPE method %s as GPE number 0x%.2X\n",
name, gpe_number));
return_ACPI_STATUS(AE_OK);
}
#endif /* !ACPI_REDUCED_HARDWARE */
| gpl-2.0 |
vtss/linux-stable | drivers/acpi/acpica/hwxface.c | 324 | 17850 | /******************************************************************************
*
* Module Name: hwxface - Public ACPICA hardware interfaces
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#define EXPORT_ACPI_INTERFACES
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwxface")
/******************************************************************************
*
* FUNCTION: acpi_reset
*
* PARAMETERS: None
*
* RETURN: Status
*
* DESCRIPTION: Set reset register in memory or IO space. Note: Does not
* support reset register in PCI config space, this must be
* handled separately.
*
******************************************************************************/
acpi_status acpi_reset(void)
{
struct acpi_generic_address *reset_reg;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_reset);
reset_reg = &acpi_gbl_FADT.reset_register;
/* Check if the reset register is supported */
if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) ||
!reset_reg->address) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
if (reset_reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
/*
* For I/O space, write directly to the OSL. This bypasses the port
* validation mechanism, which may block a valid write to the reset
* register.
*
* NOTE:
* The ACPI spec requires the reset register width to be 8, so we
* hardcode it here and ignore the FADT value. This maintains
* compatibility with other ACPI implementations that have allowed
* BIOS code with bad register width values to go unnoticed.
*/
status =
acpi_os_write_port((acpi_io_address) reset_reg->address,
acpi_gbl_FADT.reset_value,
ACPI_RESET_REGISTER_WIDTH);
} else {
/* Write the reset value to the reset register */
status = acpi_hw_write(acpi_gbl_FADT.reset_value, reset_reg);
}
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_reset)
/******************************************************************************
*
* FUNCTION: acpi_read
*
* PARAMETERS: value - Where the value is returned
* reg - GAS register structure
*
* RETURN: Status
*
* DESCRIPTION: Read from either memory or IO space.
*
* LIMITATIONS: <These limitations also apply to acpi_write>
* bit_width must be exactly 8, 16, 32, or 64.
* space_ID must be system_memory or system_IO.
* bit_offset and access_width are currently ignored, as there has
* not been a need to implement these.
*
******************************************************************************/
acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
{
u32 value_lo;
u32 value_hi;
u32 width;
u64 address;
acpi_status status;
ACPI_FUNCTION_NAME(acpi_read);
if (!return_value) {
return (AE_BAD_PARAMETER);
}
/* Validate contents of the GAS register. Allow 64-bit transfers */
status = acpi_hw_validate_register(reg, 64, &address);
if (ACPI_FAILURE(status)) {
return (status);
}
/*
* Two address spaces supported: Memory or I/O. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
status = acpi_os_read_memory((acpi_physical_address)
address, return_value,
reg->bit_width);
if (ACPI_FAILURE(status)) {
return (status);
}
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
value_lo = 0;
value_hi = 0;
width = reg->bit_width;
if (width == 64) {
width = 32; /* Break into two 32-bit transfers */
}
status = acpi_hw_read_port((acpi_io_address)
address, &value_lo, width);
if (ACPI_FAILURE(status)) {
return (status);
}
if (reg->bit_width == 64) {
/* Read the top 32 bits */
status = acpi_hw_read_port((acpi_io_address)
(address + 4), &value_hi,
32);
if (ACPI_FAILURE(status)) {
return (status);
}
}
/* Set the return value only if status is AE_OK */
*return_value = (value_lo | ((u64)value_hi << 32));
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"Read: %8.8X%8.8X width %2d from %8.8X%8.8X (%s)\n",
ACPI_FORMAT_UINT64(*return_value), reg->bit_width,
ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_read)
/******************************************************************************
*
* FUNCTION: acpi_write
*
* PARAMETERS: value - Value to be written
* reg - GAS register structure
*
* RETURN: Status
*
* DESCRIPTION: Write to either memory or IO space.
*
******************************************************************************/
acpi_status acpi_write(u64 value, struct acpi_generic_address *reg)
{
u32 width;
u64 address;
acpi_status status;
ACPI_FUNCTION_NAME(acpi_write);
/* Validate contents of the GAS register. Allow 64-bit transfers */
status = acpi_hw_validate_register(reg, 64, &address);
if (ACPI_FAILURE(status)) {
return (status);
}
/*
* Two address spaces supported: Memory or IO. PCI_Config is
* not supported here because the GAS structure is insufficient
*/
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
status = acpi_os_write_memory((acpi_physical_address)
address, value, reg->bit_width);
if (ACPI_FAILURE(status)) {
return (status);
}
} else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
width = reg->bit_width;
if (width == 64) {
width = 32; /* Break into two 32-bit transfers */
}
status = acpi_hw_write_port((acpi_io_address)
address, ACPI_LODWORD(value),
width);
if (ACPI_FAILURE(status)) {
return (status);
}
if (reg->bit_width == 64) {
status = acpi_hw_write_port((acpi_io_address)
(address + 4),
ACPI_HIDWORD(value), 32);
if (ACPI_FAILURE(status)) {
return (status);
}
}
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"Wrote: %8.8X%8.8X width %2d to %8.8X%8.8X (%s)\n",
ACPI_FORMAT_UINT64(value), reg->bit_width,
ACPI_FORMAT_UINT64(address),
acpi_ut_get_region_name(reg->space_id)));
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_write)
#if (!ACPI_REDUCED_HARDWARE)
/*******************************************************************************
*
* FUNCTION: acpi_read_bit_register
*
* PARAMETERS: register_id - ID of ACPI Bit Register to access
* return_value - Value that was read from the register,
* normalized to bit position zero.
*
* RETURN: Status and the value read from the specified Register. Value
* returned is normalized to bit0 (is shifted all the way right)
*
* DESCRIPTION: ACPI bit_register read function. Does not acquire the HW lock.
*
* SUPPORTS: Bit fields in PM1 Status, PM1 Enable, PM1 Control, and
* PM2 Control.
*
* Note: The hardware lock is not required when reading the ACPI bit registers
* since almost all of them are single bit and it does not matter that
* the parent hardware register can be split across two physical
* registers. The only multi-bit field is SLP_TYP in the PM1 control
* register, but this field does not cross an 8-bit boundary (nor does
* it make much sense to actually read this field.)
*
******************************************************************************/
acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value)
{
struct acpi_bit_register_info *bit_reg_info;
u32 register_value;
u32 value;
acpi_status status;
ACPI_FUNCTION_TRACE_U32(acpi_read_bit_register, register_id);
/* Get the info structure corresponding to the requested ACPI Register */
bit_reg_info = acpi_hw_get_bit_register_info(register_id);
if (!bit_reg_info) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Read the entire parent register */
status = acpi_hw_register_read(bit_reg_info->parent_register,
®ister_value);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Normalize the value that was read, mask off other bits */
value = ((register_value & bit_reg_info->access_bit_mask)
>> bit_reg_info->bit_position);
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"BitReg %X, ParentReg %X, Actual %8.8X, ReturnValue %8.8X\n",
register_id, bit_reg_info->parent_register,
register_value, value));
*return_value = value;
return_ACPI_STATUS(AE_OK);
}
ACPI_EXPORT_SYMBOL(acpi_read_bit_register)
/*******************************************************************************
*
* FUNCTION: acpi_write_bit_register
*
* PARAMETERS: register_id - ID of ACPI Bit Register to access
* value - Value to write to the register, in bit
* position zero. The bit is automatically
* shifted to the correct position.
*
* RETURN: Status
*
* DESCRIPTION: ACPI Bit Register write function. Acquires the hardware lock
* since most operations require a read/modify/write sequence.
*
* SUPPORTS: Bit fields in PM1 Status, PM1 Enable, PM1 Control, and
* PM2 Control.
*
* Note that at this level, the fact that there may be actually two
* hardware registers (A and B - and B may not exist) is abstracted.
*
******************************************************************************/
acpi_status acpi_write_bit_register(u32 register_id, u32 value)
{
struct acpi_bit_register_info *bit_reg_info;
acpi_cpu_flags lock_flags;
u32 register_value;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_U32(acpi_write_bit_register, register_id);
/* Get the info structure corresponding to the requested ACPI Register */
bit_reg_info = acpi_hw_get_bit_register_info(register_id);
if (!bit_reg_info) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/*
* At this point, we know that the parent register is one of the
* following: PM1 Status, PM1 Enable, PM1 Control, or PM2 Control
*/
if (bit_reg_info->parent_register != ACPI_REGISTER_PM1_STATUS) {
/*
* 1) Case for PM1 Enable, PM1 Control, and PM2 Control
*
* Perform a register read to preserve the bits that we are not
* interested in
*/
status = acpi_hw_register_read(bit_reg_info->parent_register,
®ister_value);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
/*
* Insert the input bit into the value that was just read
* and write the register
*/
ACPI_REGISTER_INSERT_VALUE(register_value,
bit_reg_info->bit_position,
bit_reg_info->access_bit_mask,
value);
status = acpi_hw_register_write(bit_reg_info->parent_register,
register_value);
} else {
/*
* 2) Case for PM1 Status
*
* The Status register is different from the rest. Clear an event
* by writing 1, writing 0 has no effect. So, the only relevant
* information is the single bit we're interested in, all others
* should be written as 0 so they will be left unchanged.
*/
register_value = ACPI_REGISTER_PREPARE_BITS(value,
bit_reg_info->
bit_position,
bit_reg_info->
access_bit_mask);
/* No need to write the register if value is all zeros */
if (register_value) {
status =
acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
register_value);
}
}
ACPI_DEBUG_PRINT((ACPI_DB_IO,
"BitReg %X, ParentReg %X, Value %8.8X, Actual %8.8X\n",
register_id, bit_reg_info->parent_register, value,
register_value));
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_write_bit_register)
#endif /* !ACPI_REDUCED_HARDWARE */
/*******************************************************************************
*
* FUNCTION: acpi_get_sleep_type_data
*
* PARAMETERS: sleep_state - Numeric sleep state
* *sleep_type_a - Where SLP_TYPa is returned
* *sleep_type_b - Where SLP_TYPb is returned
*
* RETURN: Status
*
* DESCRIPTION: Obtain the SLP_TYPa and SLP_TYPb values for the requested
* sleep state via the appropriate \_Sx object.
*
* The sleep state package returned from the corresponding \_Sx_ object
* must contain at least one integer.
*
* March 2005:
* Added support for a package that contains two integers. This
* goes against the ACPI specification which defines this object as a
* package with one encoded DWORD integer. However, existing practice
* by many BIOS vendors is to return a package with 2 or more integer
* elements, at least one per sleep type (A/B).
*
* January 2013:
* Therefore, we must be prepared to accept a package with either a
* single integer or multiple integers.
*
* The single integer DWORD format is as follows:
* BYTE 0 - Value for the PM1A SLP_TYP register
* BYTE 1 - Value for the PM1B SLP_TYP register
* BYTE 2-3 - Reserved
*
* The dual integer format is as follows:
* Integer 0 - Value for the PM1A SLP_TYP register
* Integer 1 - Value for the PM1A SLP_TYP register
*
******************************************************************************/
acpi_status
acpi_get_sleep_type_data(u8 sleep_state, u8 *sleep_type_a, u8 *sleep_type_b)
{
acpi_status status;
struct acpi_evaluate_info *info;
union acpi_operand_object **elements;
ACPI_FUNCTION_TRACE(acpi_get_sleep_type_data);
/* Validate parameters */
if ((sleep_state > ACPI_S_STATES_MAX) || !sleep_type_a || !sleep_type_b) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Allocate the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/*
* Evaluate the \_Sx namespace object containing the register values
* for this state
*/
info->relative_pathname =
ACPI_CAST_PTR(char, acpi_gbl_sleep_state_names[sleep_state]);
status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Must have a return object */
if (!info->return_object) {
ACPI_ERROR((AE_INFO, "No Sleep State object returned from [%s]",
info->relative_pathname));
status = AE_AML_NO_RETURN_VALUE;
goto cleanup;
}
/* Return object must be of type Package */
if (info->return_object->common.type != ACPI_TYPE_PACKAGE) {
ACPI_ERROR((AE_INFO,
"Sleep State return object is not a Package"));
status = AE_AML_OPERAND_TYPE;
goto cleanup1;
}
/*
* Any warnings about the package length or the object types have
* already been issued by the predefined name module -- there is no
* need to repeat them here.
*/
elements = info->return_object->package.elements;
switch (info->return_object->package.count) {
case 0:
status = AE_AML_PACKAGE_LIMIT;
break;
case 1:
if (elements[0]->common.type != ACPI_TYPE_INTEGER) {
status = AE_AML_OPERAND_TYPE;
break;
}
/* A valid _Sx_ package with one integer */
*sleep_type_a = (u8)elements[0]->integer.value;
*sleep_type_b = (u8)(elements[0]->integer.value >> 8);
break;
case 2:
default:
if ((elements[0]->common.type != ACPI_TYPE_INTEGER) ||
(elements[1]->common.type != ACPI_TYPE_INTEGER)) {
status = AE_AML_OPERAND_TYPE;
break;
}
/* A valid _Sx_ package with two integers */
*sleep_type_a = (u8)elements[0]->integer.value;
*sleep_type_b = (u8)elements[1]->integer.value;
break;
}
cleanup1:
acpi_ut_remove_reference(info->return_object);
cleanup:
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"While evaluating Sleep State [%s]",
info->relative_pathname));
}
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_get_sleep_type_data)
| gpl-2.0 |
thederekjay/htc-kernel-msm7x30-DARKSIDE | arch/arm/mach-s5pv210/setup-sdhci.c | 580 | 1745 | /* linux/arch/arm/mach-s5pv210/setup-sdhci.c
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* S5PV210 - Helper functions for settign up SDHCI device(s) (HSMMC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <plat/regs-sdhci.h>
#include <plat/sdhci.h>
/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
char *s5pv210_hsmmc_clksrcs[4] = {
[0] = "hsmmc", /* HCLK */
[1] = "hsmmc", /* HCLK */
[2] = "sclk_mmc", /* mmc_bus */
/*[4] = reserved */
};
void s5pv210_setup_sdhci_cfg_card(struct platform_device *dev,
void __iomem *r,
struct mmc_ios *ios,
struct mmc_card *card)
{
u32 ctrl2, ctrl3;
/* don't need to alter anything acording to card-type */
writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA, r + S3C64XX_SDHCI_CONTROL4);
ctrl2 = readl(r + S3C_SDHCI_CONTROL2);
ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
ctrl2 |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
S3C_SDHCI_CTRL2_ENFBCLKRX |
S3C_SDHCI_CTRL2_DFCNT_NONE |
S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
if (ios->clock < 25 * 1000000)
ctrl3 = (S3C_SDHCI_CTRL3_FCSEL3 |
S3C_SDHCI_CTRL3_FCSEL2 |
S3C_SDHCI_CTRL3_FCSEL1 |
S3C_SDHCI_CTRL3_FCSEL0);
else
ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
writel(ctrl2, r + S3C_SDHCI_CONTROL2);
writel(ctrl3, r + S3C_SDHCI_CONTROL3);
}
| gpl-2.0 |
aceofall/linux-kernel | drivers/misc/bh1770glc.c | 580 | 38636 | /*
* This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
* Chip is combined proximity and ambient light sensor.
*
* Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
*
* Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/i2c/bh1770glc.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/slab.h>
#define BH1770_ALS_CONTROL 0x80 /* ALS operation mode control */
#define BH1770_PS_CONTROL 0x81 /* PS operation mode control */
#define BH1770_I_LED 0x82 /* active LED and LED1, LED2 current */
#define BH1770_I_LED3 0x83 /* LED3 current setting */
#define BH1770_ALS_PS_MEAS 0x84 /* Forced mode trigger */
#define BH1770_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */
#define BH1770_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */
#define BH1770_PART_ID 0x8a /* Part number and revision ID */
#define BH1770_MANUFACT_ID 0x8b /* Manufacturerer ID */
#define BH1770_ALS_DATA_0 0x8c /* ALS DATA low byte */
#define BH1770_ALS_DATA_1 0x8d /* ALS DATA high byte */
#define BH1770_ALS_PS_STATUS 0x8e /* Measurement data and int status */
#define BH1770_PS_DATA_LED1 0x8f /* PS data from LED1 */
#define BH1770_PS_DATA_LED2 0x90 /* PS data from LED2 */
#define BH1770_PS_DATA_LED3 0x91 /* PS data from LED3 */
#define BH1770_INTERRUPT 0x92 /* Interrupt setting */
#define BH1770_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */
#define BH1770_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */
#define BH1770_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */
#define BH1770_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */
#define BH1770_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */
#define BH1770_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */
#define BH1770_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */
/* MANUFACT_ID */
#define BH1770_MANUFACT_ROHM 0x01
#define BH1770_MANUFACT_OSRAM 0x03
/* PART_ID */
#define BH1770_PART 0x90
#define BH1770_PART_MASK 0xf0
#define BH1770_REV_MASK 0x0f
#define BH1770_REV_SHIFT 0
#define BH1770_REV_0 0x00
#define BH1770_REV_1 0x01
/* Operating modes for both */
#define BH1770_STANDBY 0x00
#define BH1770_FORCED 0x02
#define BH1770_STANDALONE 0x03
#define BH1770_SWRESET (0x01 << 2)
#define BH1770_PS_TRIG_MEAS (1 << 0)
#define BH1770_ALS_TRIG_MEAS (1 << 1)
/* Interrupt control */
#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */
#define BH1770_INT_POLARITY (1 << 2) /* 1 = active high */
#define BH1770_INT_ALS_ENA (1 << 1)
#define BH1770_INT_PS_ENA (1 << 0)
/* Interrupt status */
#define BH1770_INT_LED1_DATA (1 << 0)
#define BH1770_INT_LED1_INT (1 << 1)
#define BH1770_INT_LED2_DATA (1 << 2)
#define BH1770_INT_LED2_INT (1 << 3)
#define BH1770_INT_LED3_DATA (1 << 4)
#define BH1770_INT_LED3_INT (1 << 5)
#define BH1770_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5))
#define BH1770_INT_ALS_DATA (1 << 6)
#define BH1770_INT_ALS_INT (1 << 7)
/* Led channels */
#define BH1770_LED1 0x00
#define BH1770_DISABLE 0
#define BH1770_ENABLE 1
#define BH1770_PROX_CHANNELS 1
#define BH1770_LUX_DEFAULT_RATE 1 /* Index to lux rate table */
#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */
#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */
#define BH1770_STARTUP_DELAY 50
#define BH1770_RESET_TIME 10
#define BH1770_TIMEOUT 2100 /* Timeout in 2.1 seconds */
#define BH1770_LUX_RANGE 65535
#define BH1770_PROX_RANGE 255
#define BH1770_COEF_SCALER 1024
#define BH1770_CALIB_SCALER 8192
#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER)
#define BH1770_LUX_DEF_THRES 1000
#define BH1770_PROX_DEF_THRES 70
#define BH1770_PROX_DEF_ABS_THRES 100
#define BH1770_DEFAULT_PERSISTENCE 10
#define BH1770_PROX_MAX_PERSISTENCE 50
#define BH1770_LUX_GA_SCALE 16384
#define BH1770_LUX_CF_SCALE 2048 /* CF ChipFactor */
#define BH1770_NEUTRAL_CF BH1770_LUX_CF_SCALE
#define BH1770_LUX_CORR_SCALE 4096
#define PROX_ABOVE_THRESHOLD 1
#define PROX_BELOW_THRESHOLD 0
#define PROX_IGNORE_LUX_LIMIT 500
struct bh1770_chip {
struct bh1770_platform_data *pdata;
char chipname[10];
u8 revision;
struct i2c_client *client;
struct regulator_bulk_data regs[2];
struct mutex mutex; /* avoid parallel access */
wait_queue_head_t wait;
bool int_mode_prox;
bool int_mode_lux;
struct delayed_work prox_work;
u32 lux_cf; /* Chip specific factor */
u32 lux_ga;
u32 lux_calib;
int lux_rate_index;
u32 lux_corr;
u16 lux_data_raw;
u16 lux_threshold_hi;
u16 lux_threshold_lo;
u16 lux_thres_hi_onchip;
u16 lux_thres_lo_onchip;
bool lux_wait_result;
int prox_enable_count;
u16 prox_coef;
u16 prox_const;
int prox_rate;
int prox_rate_threshold;
u8 prox_persistence;
u8 prox_persistence_counter;
u8 prox_data;
u8 prox_threshold;
u8 prox_threshold_hw;
bool prox_force_update;
u8 prox_abs_thres;
u8 prox_led;
};
static const char reg_vcc[] = "Vcc";
static const char reg_vleds[] = "Vleds";
/*
* Supported stand alone rates in ms from chip data sheet
* {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000};
*/
static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
/* Supported IR-led currents in mA */
static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
/*
* Supported stand alone rates in ms from chip data sheet
* {100, 200, 500, 1000, 2000};
*/
static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0};
/*
* interrupt control functions are called while keeping chip->mutex
* excluding module probe / remove
*/
static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip,
int lux)
{
chip->int_mode_lux = lux;
/* Set interrupt modes, interrupt active low, latched */
return i2c_smbus_write_byte_data(chip->client,
BH1770_INTERRUPT,
(lux << 1) | chip->int_mode_prox);
}
static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip,
int ps)
{
chip->int_mode_prox = ps;
return i2c_smbus_write_byte_data(chip->client,
BH1770_INTERRUPT,
(chip->int_mode_lux << 1) | (ps << 0));
}
/* chip->mutex is always kept here */
static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index)
{
/* sysfs may call this when the chip is powered off */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
/* Proper proximity response needs fastest lux rate (100ms) */
if (chip->prox_enable_count)
rate_index = 0;
return i2c_smbus_write_byte_data(chip->client,
BH1770_ALS_MEAS_RATE,
rate_index);
}
static int bh1770_prox_rate(struct bh1770_chip *chip, int mode)
{
int rate;
rate = (mode == PROX_ABOVE_THRESHOLD) ?
chip->prox_rate_threshold : chip->prox_rate;
return i2c_smbus_write_byte_data(chip->client,
BH1770_PS_MEAS_RATE,
rate);
}
/* InfraredLED is controlled by the chip during proximity scanning */
static inline int bh1770_led_cfg(struct bh1770_chip *chip)
{
/* LED cfg, current for leds 1 and 2 */
return i2c_smbus_write_byte_data(chip->client,
BH1770_I_LED,
(BH1770_LED1 << 6) |
(BH1770_LED_5mA << 3) |
chip->prox_led);
}
/*
* Following two functions converts raw ps values from HW to normalized
* values. Purpose is to compensate differences between different sensor
* versions and variants so that result means about the same between
* versions.
*/
static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw)
{
u16 adjusted;
adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) /
BH1770_COEF_SCALER);
if (adjusted > BH1770_PROX_RANGE)
adjusted = BH1770_PROX_RANGE;
return adjusted;
}
static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps)
{
u16 raw;
raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef);
if (raw > chip->prox_const)
raw = raw - chip->prox_const;
else
raw = 0;
return raw;
}
/*
* Following two functions converts raw lux values from HW to normalized
* values. Purpose is to compensate differences between different sensor
* versions and variants so that result means about the same between
* versions. Chip->mutex is kept when this is called.
*/
static int bh1770_prox_set_threshold(struct bh1770_chip *chip)
{
u8 tmp = 0;
/* sysfs may call this when the chip is powered off */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold);
chip->prox_threshold_hw = tmp;
return i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1,
tmp);
}
static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw)
{
u32 lux;
lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE;
return min(lux, (u32)BH1770_LUX_RANGE);
}
static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip,
u16 adjusted)
{
return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr;
}
/* chip->mutex is kept when this is called */
static int bh1770_lux_update_thresholds(struct bh1770_chip *chip,
u16 threshold_hi, u16 threshold_lo)
{
u8 data[4];
int ret;
/* sysfs may call this when the chip is powered off */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
/*
* Compensate threshold values with the correction factors if not
* set to minimum or maximum.
* Min & max values disables interrupts.
*/
if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0)
threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi);
if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0)
threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo);
if (chip->lux_thres_hi_onchip == threshold_hi &&
chip->lux_thres_lo_onchip == threshold_lo)
return 0;
chip->lux_thres_hi_onchip = threshold_hi;
chip->lux_thres_lo_onchip = threshold_lo;
data[0] = threshold_hi;
data[1] = threshold_hi >> 8;
data[2] = threshold_lo;
data[3] = threshold_lo >> 8;
ret = i2c_smbus_write_i2c_block_data(chip->client,
BH1770_ALS_TH_UP_0,
ARRAY_SIZE(data),
data);
return ret;
}
static int bh1770_lux_get_result(struct bh1770_chip *chip)
{
u16 data;
int ret;
ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0);
if (ret < 0)
return ret;
data = ret & 0xff;
ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1);
if (ret < 0)
return ret;
chip->lux_data_raw = data | ((ret & 0xff) << 8);
return 0;
}
/* Calculate correction value which contains chip and device specific parts */
static u32 bh1770_get_corr_value(struct bh1770_chip *chip)
{
u32 tmp;
/* Impact of glass attenuation correction */
tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE;
/* Impact of chip factor correction */
tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE;
/* Impact of Device specific calibration correction */
tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER;
return tmp;
}
static int bh1770_lux_read_result(struct bh1770_chip *chip)
{
bh1770_lux_get_result(chip);
return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw);
}
/*
* Chip on / off functions are called while keeping mutex except probe
* or remove phase
*/
static int bh1770_chip_on(struct bh1770_chip *chip)
{
int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
chip->regs);
if (ret < 0)
return ret;
usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
/* Reset the chip */
i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL,
BH1770_SWRESET);
usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2);
/*
* ALS is started always since proximity needs als results
* for realibility estimation.
* Let's assume dark until the first ALS measurement is ready.
*/
chip->lux_data_raw = 0;
chip->prox_data = 0;
ret = i2c_smbus_write_byte_data(chip->client,
BH1770_ALS_CONTROL, BH1770_STANDALONE);
/* Assume reset defaults */
chip->lux_thres_hi_onchip = BH1770_LUX_RANGE;
chip->lux_thres_lo_onchip = 0;
return ret;
}
static void bh1770_chip_off(struct bh1770_chip *chip)
{
i2c_smbus_write_byte_data(chip->client,
BH1770_INTERRUPT, BH1770_DISABLE);
i2c_smbus_write_byte_data(chip->client,
BH1770_ALS_CONTROL, BH1770_STANDBY);
i2c_smbus_write_byte_data(chip->client,
BH1770_PS_CONTROL, BH1770_STANDBY);
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
}
/* chip->mutex is kept when this is called */
static int bh1770_prox_mode_control(struct bh1770_chip *chip)
{
if (chip->prox_enable_count) {
chip->prox_force_update = true; /* Force immediate update */
bh1770_lux_rate(chip, chip->lux_rate_index);
bh1770_prox_set_threshold(chip);
bh1770_led_cfg(chip);
bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD);
bh1770_prox_interrupt_control(chip, BH1770_ENABLE);
i2c_smbus_write_byte_data(chip->client,
BH1770_PS_CONTROL, BH1770_STANDALONE);
} else {
chip->prox_data = 0;
bh1770_lux_rate(chip, chip->lux_rate_index);
bh1770_prox_interrupt_control(chip, BH1770_DISABLE);
i2c_smbus_write_byte_data(chip->client,
BH1770_PS_CONTROL, BH1770_STANDBY);
}
return 0;
}
/* chip->mutex is kept when this is called */
static int bh1770_prox_read_result(struct bh1770_chip *chip)
{
int ret;
bool above;
u8 mode;
ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1);
if (ret < 0)
goto out;
if (ret > chip->prox_threshold_hw)
above = true;
else
above = false;
/*
* when ALS levels goes above limit, proximity result may be
* false proximity. Thus ignore the result. With real proximity
* there is a shadow causing low als levels.
*/
if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT)
ret = 0;
chip->prox_data = bh1770_psraw_to_adjusted(chip, ret);
/* Strong proximity level or force mode requires immediate response */
if (chip->prox_data >= chip->prox_abs_thres ||
chip->prox_force_update)
chip->prox_persistence_counter = chip->prox_persistence;
chip->prox_force_update = false;
/* Persistence filttering to reduce false proximity events */
if (likely(above)) {
if (chip->prox_persistence_counter < chip->prox_persistence) {
chip->prox_persistence_counter++;
ret = -ENODATA;
} else {
mode = PROX_ABOVE_THRESHOLD;
ret = 0;
}
} else {
chip->prox_persistence_counter = 0;
mode = PROX_BELOW_THRESHOLD;
chip->prox_data = 0;
ret = 0;
}
/* Set proximity detection rate based on above or below value */
if (ret == 0) {
bh1770_prox_rate(chip, mode);
sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw");
}
out:
return ret;
}
static int bh1770_detect(struct bh1770_chip *chip)
{
struct i2c_client *client = chip->client;
s32 ret;
u8 manu, part;
ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID);
if (ret < 0)
goto error;
manu = (u8)ret;
ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID);
if (ret < 0)
goto error;
part = (u8)ret;
chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT;
chip->prox_coef = BH1770_COEF_SCALER;
chip->prox_const = 0;
chip->lux_cf = BH1770_NEUTRAL_CF;
if ((manu == BH1770_MANUFACT_ROHM) &&
((part & BH1770_PART_MASK) == BH1770_PART)) {
snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC");
return 0;
}
if ((manu == BH1770_MANUFACT_OSRAM) &&
((part & BH1770_PART_MASK) == BH1770_PART)) {
snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770");
/* Values selected by comparing different versions */
chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */
chip->prox_const = 40;
return 0;
}
ret = -ENODEV;
error:
dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n");
return ret;
}
/*
* This work is re-scheduled at every proximity interrupt.
* If this work is running, it means that there hasn't been any
* proximity interrupt in time. Situation is handled as no-proximity.
* It would be nice to have low-threshold interrupt or interrupt
* when measurement and hi-threshold are both 0. But neither of those exists.
* This is a workaroud for missing HW feature.
*/
static void bh1770_prox_work(struct work_struct *work)
{
struct bh1770_chip *chip =
container_of(work, struct bh1770_chip, prox_work.work);
mutex_lock(&chip->mutex);
bh1770_prox_read_result(chip);
mutex_unlock(&chip->mutex);
}
/* This is threaded irq handler */
static irqreturn_t bh1770_irq(int irq, void *data)
{
struct bh1770_chip *chip = data;
int status;
int rate = 0;
mutex_lock(&chip->mutex);
status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS);
/* Acknowledge interrupt by reading this register */
i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT);
/*
* Check if there is fresh data available for als.
* If this is the very first data, update thresholds after that.
*/
if (status & BH1770_INT_ALS_DATA) {
bh1770_lux_get_result(chip);
if (unlikely(chip->lux_wait_result)) {
chip->lux_wait_result = false;
wake_up(&chip->wait);
bh1770_lux_update_thresholds(chip,
chip->lux_threshold_hi,
chip->lux_threshold_lo);
}
}
/* Disable interrupt logic to guarantee acknowledgement */
i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
(0 << 1) | (0 << 0));
if ((status & BH1770_INT_ALS_INT))
sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input");
if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) {
rate = prox_rates_ms[chip->prox_rate_threshold];
bh1770_prox_read_result(chip);
}
/* Re-enable interrupt logic */
i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
(chip->int_mode_lux << 1) |
(chip->int_mode_prox << 0));
mutex_unlock(&chip->mutex);
/*
* Can't cancel work while keeping mutex since the work uses the
* same mutex.
*/
if (rate) {
/*
* Simulate missing no-proximity interrupt 50ms after the
* next expected interrupt time.
*/
cancel_delayed_work_sync(&chip->prox_work);
schedule_delayed_work(&chip->prox_work,
msecs_to_jiffies(rate + 50));
}
return IRQ_HANDLED;
}
static ssize_t bh1770_power_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
ssize_t ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
if (value) {
pm_runtime_get_sync(dev);
ret = bh1770_lux_rate(chip, chip->lux_rate_index);
if (ret < 0) {
pm_runtime_put(dev);
goto leave;
}
ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
if (ret < 0) {
pm_runtime_put(dev);
goto leave;
}
/* This causes interrupt after the next measurement cycle */
bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
BH1770_LUX_DEF_THRES);
/* Inform that we are waiting for a result from ALS */
chip->lux_wait_result = true;
bh1770_prox_mode_control(chip);
} else if (!pm_runtime_suspended(dev)) {
pm_runtime_put(dev);
}
ret = count;
leave:
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t bh1770_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
}
static ssize_t bh1770_lux_result_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t ret;
long timeout;
if (pm_runtime_suspended(dev))
return -EIO; /* Chip is not enabled at all */
timeout = wait_event_interruptible_timeout(chip->wait,
!chip->lux_wait_result,
msecs_to_jiffies(BH1770_TIMEOUT));
if (!timeout)
return -EIO;
mutex_lock(&chip->mutex);
ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip));
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t bh1770_lux_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", BH1770_LUX_RANGE);
}
static ssize_t bh1770_prox_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
/* Assume no proximity. Sensor will tell real state soon */
if (!chip->prox_enable_count)
chip->prox_data = 0;
if (value)
chip->prox_enable_count++;
else if (chip->prox_enable_count > 0)
chip->prox_enable_count--;
else
goto leave;
/* Run control only when chip is powered on */
if (!pm_runtime_suspended(dev))
bh1770_prox_mode_control(chip);
leave:
mutex_unlock(&chip->mutex);
return count;
}
static ssize_t bh1770_prox_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t len;
mutex_lock(&chip->mutex);
len = sprintf(buf, "%d\n", chip->prox_enable_count);
mutex_unlock(&chip->mutex);
return len;
}
static ssize_t bh1770_prox_result_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t ret;
mutex_lock(&chip->mutex);
if (chip->prox_enable_count && !pm_runtime_suspended(dev))
ret = sprintf(buf, "%d\n", chip->prox_data);
else
ret = -EIO;
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t bh1770_prox_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", BH1770_PROX_RANGE);
}
static ssize_t bh1770_get_prox_rate_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i;
int pos = 0;
for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++)
pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]);
sprintf(buf + pos - 1, "\n");
return pos;
}
static ssize_t bh1770_get_prox_rate_above(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]);
}
static ssize_t bh1770_get_prox_rate_below(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]);
}
static int bh1770_prox_rate_validate(int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++)
if (rate >= prox_rates_hz[i])
break;
return i;
}
static ssize_t bh1770_set_prox_rate_above(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
chip->prox_rate_threshold = bh1770_prox_rate_validate(value);
mutex_unlock(&chip->mutex);
return count;
}
static ssize_t bh1770_set_prox_rate_below(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
chip->prox_rate = bh1770_prox_rate_validate(value);
mutex_unlock(&chip->mutex);
return count;
}
static ssize_t bh1770_get_prox_thres(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->prox_threshold);
}
static ssize_t bh1770_set_prox_thres(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
if (value > BH1770_PROX_RANGE)
return -EINVAL;
mutex_lock(&chip->mutex);
chip->prox_threshold = value;
ret = bh1770_prox_set_threshold(chip);
mutex_unlock(&chip->mutex);
if (ret < 0)
return ret;
return count;
}
static ssize_t bh1770_prox_persistence_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", chip->prox_persistence);
}
static ssize_t bh1770_prox_persistence_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
if (value > BH1770_PROX_MAX_PERSISTENCE)
return -EINVAL;
chip->prox_persistence = value;
return len;
}
static ssize_t bh1770_prox_abs_thres_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", chip->prox_abs_thres);
}
static ssize_t bh1770_prox_abs_thres_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
if (value > BH1770_PROX_RANGE)
return -EINVAL;
chip->prox_abs_thres = value;
return len;
}
static ssize_t bh1770_chip_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision);
}
static ssize_t bh1770_lux_calib_default_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", BH1770_CALIB_SCALER);
}
static ssize_t bh1770_lux_calib_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t len;
mutex_lock(&chip->mutex);
len = sprintf(buf, "%u\n", chip->lux_calib);
mutex_unlock(&chip->mutex);
return len;
}
static ssize_t bh1770_lux_calib_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
u32 old_calib;
u32 new_corr;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
old_calib = chip->lux_calib;
chip->lux_calib = value;
new_corr = bh1770_get_corr_value(chip);
if (new_corr == 0) {
chip->lux_calib = old_calib;
mutex_unlock(&chip->mutex);
return -EINVAL;
}
chip->lux_corr = new_corr;
/* Refresh thresholds on HW after changing correction value */
bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi,
chip->lux_threshold_lo);
mutex_unlock(&chip->mutex);
return len;
}
static ssize_t bh1770_get_lux_rate_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i;
int pos = 0;
for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++)
pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]);
sprintf(buf + pos - 1, "\n");
return pos;
}
static ssize_t bh1770_get_lux_rate(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]);
}
static ssize_t bh1770_set_lux_rate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long rate_hz;
int ret, i;
ret = kstrtoul(buf, 0, &rate_hz);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++)
if (rate_hz >= lux_rates_hz[i])
break;
mutex_lock(&chip->mutex);
chip->lux_rate_index = i;
ret = bh1770_lux_rate(chip, i);
mutex_unlock(&chip->mutex);
if (ret < 0)
return ret;
return count;
}
static ssize_t bh1770_get_lux_thresh_above(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->lux_threshold_hi);
}
static ssize_t bh1770_get_lux_thresh_below(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->lux_threshold_lo);
}
static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target,
const char *buf)
{
unsigned long thresh;
int ret;
ret = kstrtoul(buf, 0, &thresh);
if (ret)
return ret;
if (thresh > BH1770_LUX_RANGE)
return -EINVAL;
mutex_lock(&chip->mutex);
*target = thresh;
/*
* Don't update values in HW if we are still waiting for
* first interrupt to come after device handle open call.
*/
if (!chip->lux_wait_result)
ret = bh1770_lux_update_thresholds(chip,
chip->lux_threshold_hi,
chip->lux_threshold_lo);
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t bh1770_set_lux_thresh_above(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf);
if (ret < 0)
return ret;
return len;
}
static ssize_t bh1770_set_lux_thresh_below(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf);
if (ret < 0)
return ret;
return len;
}
static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show,
bh1770_prox_enable_store);
static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR,
bh1770_prox_abs_thres_show,
bh1770_prox_abs_thres_store);
static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR,
bh1770_get_prox_thres,
bh1770_set_prox_thres);
static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL);
static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL);
static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR,
bh1770_prox_persistence_show,
bh1770_prox_persistence_store);
static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR,
bh1770_get_prox_rate_above,
bh1770_set_prox_rate_above);
static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR,
bh1770_get_prox_rate_below,
bh1770_set_prox_rate_below);
static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL);
static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show,
bh1770_lux_calib_store);
static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
bh1770_lux_calib_default_show,
NULL);
static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL);
static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL);
static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate,
bh1770_set_lux_rate);
static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL);
static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
bh1770_get_lux_thresh_above,
bh1770_set_lux_thresh_above);
static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
bh1770_get_lux_thresh_below,
bh1770_set_lux_thresh_below);
static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL);
static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show,
bh1770_power_state_store);
static struct attribute *sysfs_attrs[] = {
&dev_attr_lux0_calibscale.attr,
&dev_attr_lux0_calibscale_default.attr,
&dev_attr_lux0_input.attr,
&dev_attr_lux0_sensor_range.attr,
&dev_attr_lux0_rate.attr,
&dev_attr_lux0_rate_avail.attr,
&dev_attr_lux0_thresh_above_value.attr,
&dev_attr_lux0_thresh_below_value.attr,
&dev_attr_prox0_raw.attr,
&dev_attr_prox0_sensor_range.attr,
&dev_attr_prox0_raw_en.attr,
&dev_attr_prox0_thresh_above_count.attr,
&dev_attr_prox0_rate_above.attr,
&dev_attr_prox0_rate_below.attr,
&dev_attr_prox0_rate_avail.attr,
&dev_attr_prox0_thresh_above0_value.attr,
&dev_attr_prox0_thresh_above1_value.attr,
&dev_attr_chip_id.attr,
&dev_attr_power_state.attr,
NULL
};
static struct attribute_group bh1770_attribute_group = {
.attrs = sysfs_attrs
};
static int bh1770_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bh1770_chip *chip;
int err;
chip = kzalloc(sizeof *chip, GFP_KERNEL);
if (!chip)
return -ENOMEM;
i2c_set_clientdata(client, chip);
chip->client = client;
mutex_init(&chip->mutex);
init_waitqueue_head(&chip->wait);
INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work);
if (client->dev.platform_data == NULL) {
dev_err(&client->dev, "platform data is mandatory\n");
err = -EINVAL;
goto fail1;
}
chip->pdata = client->dev.platform_data;
chip->lux_calib = BH1770_LUX_NEUTRAL_CALIB_VALUE;
chip->lux_rate_index = BH1770_LUX_DEFAULT_RATE;
chip->lux_threshold_lo = BH1770_LUX_DEF_THRES;
chip->lux_threshold_hi = BH1770_LUX_DEF_THRES;
if (chip->pdata->glass_attenuation == 0)
chip->lux_ga = BH1770_NEUTRAL_GA;
else
chip->lux_ga = chip->pdata->glass_attenuation;
chip->prox_threshold = BH1770_PROX_DEF_THRES;
chip->prox_led = chip->pdata->led_def_curr;
chip->prox_abs_thres = BH1770_PROX_DEF_ABS_THRES;
chip->prox_persistence = BH1770_DEFAULT_PERSISTENCE;
chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH;
chip->prox_rate = BH1770_PROX_DEFAULT_RATE;
chip->prox_data = 0;
chip->regs[0].supply = reg_vcc;
chip->regs[1].supply = reg_vleds;
err = regulator_bulk_get(&client->dev,
ARRAY_SIZE(chip->regs), chip->regs);
if (err < 0) {
dev_err(&client->dev, "Cannot get regulators\n");
goto fail1;
}
err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
chip->regs);
if (err < 0) {
dev_err(&client->dev, "Cannot enable regulators\n");
goto fail2;
}
usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
err = bh1770_detect(chip);
if (err < 0)
goto fail3;
/* Start chip */
bh1770_chip_on(chip);
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
chip->lux_corr = bh1770_get_corr_value(chip);
if (chip->lux_corr == 0) {
dev_err(&client->dev, "Improper correction values\n");
err = -EINVAL;
goto fail3;
}
if (chip->pdata->setup_resources) {
err = chip->pdata->setup_resources();
if (err) {
err = -EINVAL;
goto fail3;
}
}
err = sysfs_create_group(&chip->client->dev.kobj,
&bh1770_attribute_group);
if (err < 0) {
dev_err(&chip->client->dev, "Sysfs registration failed\n");
goto fail4;
}
/*
* Chip needs level triggered interrupt to work. However,
* level triggering doesn't work always correctly with power
* management. Select both
*/
err = request_threaded_irq(client->irq, NULL,
bh1770_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
IRQF_TRIGGER_LOW,
"bh1770", chip);
if (err) {
dev_err(&client->dev, "could not get IRQ %d\n",
client->irq);
goto fail5;
}
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
return err;
fail5:
sysfs_remove_group(&chip->client->dev.kobj,
&bh1770_attribute_group);
fail4:
if (chip->pdata->release_resources)
chip->pdata->release_resources();
fail3:
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
fail2:
regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
fail1:
kfree(chip);
return err;
}
static int bh1770_remove(struct i2c_client *client)
{
struct bh1770_chip *chip = i2c_get_clientdata(client);
free_irq(client->irq, chip);
sysfs_remove_group(&chip->client->dev.kobj,
&bh1770_attribute_group);
if (chip->pdata->release_resources)
chip->pdata->release_resources();
cancel_delayed_work_sync(&chip->prox_work);
if (!pm_runtime_suspended(&client->dev))
bh1770_chip_off(chip);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
kfree(chip);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int bh1770_suspend(struct device *dev)
{
struct i2c_client *client = container_of(dev, struct i2c_client, dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_off(chip);
return 0;
}
static int bh1770_resume(struct device *dev)
{
struct i2c_client *client = container_of(dev, struct i2c_client, dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
int ret = 0;
bh1770_chip_on(chip);
if (!pm_runtime_suspended(dev)) {
/*
* If we were enabled at suspend time, it is expected
* everything works nice and smoothly
*/
ret = bh1770_lux_rate(chip, chip->lux_rate_index);
ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
/* This causes interrupt after the next measurement cycle */
bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
BH1770_LUX_DEF_THRES);
/* Inform that we are waiting for a result from ALS */
chip->lux_wait_result = true;
bh1770_prox_mode_control(chip);
}
return ret;
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int bh1770_runtime_suspend(struct device *dev)
{
struct i2c_client *client = container_of(dev, struct i2c_client, dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_off(chip);
return 0;
}
static int bh1770_runtime_resume(struct device *dev)
{
struct i2c_client *client = container_of(dev, struct i2c_client, dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_on(chip);
return 0;
}
#endif
static const struct i2c_device_id bh1770_id[] = {
{"bh1770glc", 0 },
{"sfh7770", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, bh1770_id);
static const struct dev_pm_ops bh1770_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume)
SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL)
};
static struct i2c_driver bh1770_driver = {
.driver = {
.name = "bh1770glc",
.owner = THIS_MODULE,
.pm = &bh1770_pm_ops,
},
.probe = bh1770_probe,
.remove = bh1770_remove,
.id_table = bh1770_id,
};
module_i2c_driver(bh1770_driver);
MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor");
MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jumpstarter-io/linux | drivers/media/platform/exynos4-is/fimc-is-errno.c | 1860 | 10004 | /*
* Samsung Exynos4 SoC series FIMC-IS slave interface driver
*
* Error log interface functions
*
* Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
*
* Authors: Younghwan Joo <yhwan.joo@samsung.com>
* Sylwester Nawrocki <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "fimc-is-errno.h"
const char *fimc_is_param_strerr(unsigned int error)
{
switch (error) {
case ERROR_COMMON_CMD:
return "ERROR_COMMON_CMD: Invalid Command";
case ERROR_COMMON_PARAMETER:
return "ERROR_COMMON_PARAMETER: Invalid Parameter";
case ERROR_COMMON_SETFILE_LOAD:
return "ERROR_COMMON_SETFILE_LOAD: Illegal Setfile Loading";
case ERROR_COMMON_SETFILE_ADJUST:
return "ERROR_COMMON_SETFILE_ADJUST: Setfile isn't adjusted";
case ERROR_COMMON_SETFILE_INDEX:
return "ERROR_COMMON_SETFILE_INDEX: Invalid setfile index";
case ERROR_COMMON_INPUT_PATH:
return "ERROR_COMMON_INPUT_PATH: Input path can be changed in ready state";
case ERROR_COMMON_INPUT_INIT:
return "ERROR_COMMON_INPUT_INIT: IP can not start if input path is not set";
case ERROR_COMMON_OUTPUT_PATH:
return "ERROR_COMMON_OUTPUT_PATH: Output path can be changed in ready state (stop)";
case ERROR_COMMON_OUTPUT_INIT:
return "ERROR_COMMON_OUTPUT_INIT: IP can not start if output path is not set";
case ERROR_CONTROL_BYPASS:
return "ERROR_CONTROL_BYPASS";
case ERROR_OTF_INPUT_FORMAT:
return "ERROR_OTF_INPUT_FORMAT: Invalid format (DRC: YUV444, FD: YUV444, 422, 420)";
case ERROR_OTF_INPUT_WIDTH:
return "ERROR_OTF_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
case ERROR_OTF_INPUT_HEIGHT:
return "ERROR_OTF_INPUT_HEIGHT: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
case ERROR_OTF_INPUT_BIT_WIDTH:
return "ERROR_OTF_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
case ERROR_DMA_INPUT_WIDTH:
return "ERROR_DMA_INPUT_WIDTH: Invalid width (DRC: 128~8192, FD: 32~8190)";
case ERROR_DMA_INPUT_HEIGHT:
return "ERROR_DMA_INPUT_HEIGHT: Invalid height (DRC: 64~8192, FD: 16~8190)";
case ERROR_DMA_INPUT_FORMAT:
return "ERROR_DMA_INPUT_FORMAT: Invalid format (DRC: YUV444 or YUV422, FD: YUV444,422,420)";
case ERROR_DMA_INPUT_BIT_WIDTH:
return "ERROR_DMA_INPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
case ERROR_DMA_INPUT_ORDER:
return "ERROR_DMA_INPUT_ORDER: Invalid order(DRC: YYCbCr,YCbYCr,FD:NO,YYCbCr,YCbYCr,CbCr,CrCb)";
case ERROR_DMA_INPUT_PLANE:
return "ERROR_DMA_INPUT_PLANE: Invalid palne (DRC: 3, FD: 1, 2, 3)";
case ERROR_OTF_OUTPUT_WIDTH:
return "ERROR_OTF_OUTPUT_WIDTH: Invalid width (DRC: 128~8192)";
case ERROR_OTF_OUTPUT_HEIGHT:
return "ERROR_OTF_OUTPUT_HEIGHT: Invalid height (DRC: 64~8192)";
case ERROR_OTF_OUTPUT_FORMAT:
return "ERROR_OTF_OUTPUT_FORMAT: Invalid format (DRC: YUV444)";
case ERROR_OTF_OUTPUT_BIT_WIDTH:
return "ERROR_OTF_OUTPUT_BIT_WIDTH: Invalid bit-width (DRC: 8~12bits, FD: 8bit)";
case ERROR_DMA_OUTPUT_WIDTH:
return "ERROR_DMA_OUTPUT_WIDTH";
case ERROR_DMA_OUTPUT_HEIGHT:
return "ERROR_DMA_OUTPUT_HEIGHT";
case ERROR_DMA_OUTPUT_FORMAT:
return "ERROR_DMA_OUTPUT_FORMAT";
case ERROR_DMA_OUTPUT_BIT_WIDTH:
return "ERROR_DMA_OUTPUT_BIT_WIDTH";
case ERROR_DMA_OUTPUT_PLANE:
return "ERROR_DMA_OUTPUT_PLANE";
case ERROR_DMA_OUTPUT_ORDER:
return "ERROR_DMA_OUTPUT_ORDER";
/* Sensor Error(100~199) */
case ERROR_SENSOR_I2C_FAIL:
return "ERROR_SENSOR_I2C_FAIL";
case ERROR_SENSOR_INVALID_FRAMERATE:
return "ERROR_SENSOR_INVALID_FRAMERATE";
case ERROR_SENSOR_INVALID_EXPOSURETIME:
return "ERROR_SENSOR_INVALID_EXPOSURETIME";
case ERROR_SENSOR_INVALID_SIZE:
return "ERROR_SENSOR_INVALID_SIZE";
case ERROR_SENSOR_INVALID_SETTING:
return "ERROR_SENSOR_INVALID_SETTING";
case ERROR_SENSOR_ACTURATOR_INIT_FAIL:
return "ERROR_SENSOR_ACTURATOR_INIT_FAIL";
case ERROR_SENSOR_INVALID_AF_POS:
return "ERROR_SENSOR_INVALID_AF_POS";
case ERROR_SENSOR_UNSUPPORT_FUNC:
return "ERROR_SENSOR_UNSUPPORT_FUNC";
case ERROR_SENSOR_UNSUPPORT_PERI:
return "ERROR_SENSOR_UNSUPPORT_PERI";
case ERROR_SENSOR_UNSUPPORT_AF:
return "ERROR_SENSOR_UNSUPPORT_AF";
/* ISP Error (200~299) */
case ERROR_ISP_AF_BUSY:
return "ERROR_ISP_AF_BUSY";
case ERROR_ISP_AF_INVALID_COMMAND:
return "ERROR_ISP_AF_INVALID_COMMAND";
case ERROR_ISP_AF_INVALID_MODE:
return "ERROR_ISP_AF_INVALID_MODE";
/* DRC Error (300~399) */
/* FD Error (400~499) */
case ERROR_FD_CONFIG_MAX_NUMBER_STATE:
return "ERROR_FD_CONFIG_MAX_NUMBER_STATE";
case ERROR_FD_CONFIG_MAX_NUMBER_INVALID:
return "ERROR_FD_CONFIG_MAX_NUMBER_INVALID";
case ERROR_FD_CONFIG_YAW_ANGLE_STATE:
return "ERROR_FD_CONFIG_YAW_ANGLE_STATE";
case ERROR_FD_CONFIG_YAW_ANGLE_INVALID:
return "ERROR_FD_CONFIG_YAW_ANGLE_INVALID\n";
case ERROR_FD_CONFIG_ROLL_ANGLE_STATE:
return "ERROR_FD_CONFIG_ROLL_ANGLE_STATE";
case ERROR_FD_CONFIG_ROLL_ANGLE_INVALID:
return "ERROR_FD_CONFIG_ROLL_ANGLE_INVALID";
case ERROR_FD_CONFIG_SMILE_MODE_INVALID:
return "ERROR_FD_CONFIG_SMILE_MODE_INVALID";
case ERROR_FD_CONFIG_BLINK_MODE_INVALID:
return "ERROR_FD_CONFIG_BLINK_MODE_INVALID";
case ERROR_FD_CONFIG_EYES_DETECT_INVALID:
return "ERROR_FD_CONFIG_EYES_DETECT_INVALID";
case ERROR_FD_CONFIG_MOUTH_DETECT_INVALID:
return "ERROR_FD_CONFIG_MOUTH_DETECT_INVALID";
case ERROR_FD_CONFIG_ORIENTATION_STATE:
return "ERROR_FD_CONFIG_ORIENTATION_STATE";
case ERROR_FD_CONFIG_ORIENTATION_INVALID:
return "ERROR_FD_CONFIG_ORIENTATION_INVALID";
case ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID:
return "ERROR_FD_CONFIG_ORIENTATION_VALUE_INVALID";
case ERROR_FD_RESULT:
return "ERROR_FD_RESULT";
case ERROR_FD_MODE:
return "ERROR_FD_MODE";
default:
return "Unknown";
}
}
const char *fimc_is_strerr(unsigned int error)
{
error &= ~IS_ERROR_TIME_OUT_FLAG;
switch (error) {
/* General */
case IS_ERROR_INVALID_COMMAND:
return "IS_ERROR_INVALID_COMMAND";
case IS_ERROR_REQUEST_FAIL:
return "IS_ERROR_REQUEST_FAIL";
case IS_ERROR_INVALID_SCENARIO:
return "IS_ERROR_INVALID_SCENARIO";
case IS_ERROR_INVALID_SENSORID:
return "IS_ERROR_INVALID_SENSORID";
case IS_ERROR_INVALID_MODE_CHANGE:
return "IS_ERROR_INVALID_MODE_CHANGE";
case IS_ERROR_INVALID_MAGIC_NUMBER:
return "IS_ERROR_INVALID_MAGIC_NUMBER";
case IS_ERROR_INVALID_SETFILE_HDR:
return "IS_ERROR_INVALID_SETFILE_HDR";
case IS_ERROR_BUSY:
return "IS_ERROR_BUSY";
case IS_ERROR_SET_PARAMETER:
return "IS_ERROR_SET_PARAMETER";
case IS_ERROR_INVALID_PATH:
return "IS_ERROR_INVALID_PATH";
case IS_ERROR_OPEN_SENSOR_FAIL:
return "IS_ERROR_OPEN_SENSOR_FAIL";
case IS_ERROR_ENTRY_MSG_THREAD_DOWN:
return "IS_ERROR_ENTRY_MSG_THREAD_DOWN";
case IS_ERROR_ISP_FRAME_END_NOT_DONE:
return "IS_ERROR_ISP_FRAME_END_NOT_DONE";
case IS_ERROR_DRC_FRAME_END_NOT_DONE:
return "IS_ERROR_DRC_FRAME_END_NOT_DONE";
case IS_ERROR_SCALERC_FRAME_END_NOT_DONE:
return "IS_ERROR_SCALERC_FRAME_END_NOT_DONE";
case IS_ERROR_ODC_FRAME_END_NOT_DONE:
return "IS_ERROR_ODC_FRAME_END_NOT_DONE";
case IS_ERROR_DIS_FRAME_END_NOT_DONE:
return "IS_ERROR_DIS_FRAME_END_NOT_DONE";
case IS_ERROR_TDNR_FRAME_END_NOT_DONE:
return "IS_ERROR_TDNR_FRAME_END_NOT_DONE";
case IS_ERROR_SCALERP_FRAME_END_NOT_DONE:
return "IS_ERROR_SCALERP_FRAME_END_NOT_DONE";
case IS_ERROR_WAIT_STREAM_OFF_NOT_DONE:
return "IS_ERROR_WAIT_STREAM_OFF_NOT_DONE";
case IS_ERROR_NO_MSG_IS_RECEIVED:
return "IS_ERROR_NO_MSG_IS_RECEIVED";
case IS_ERROR_SENSOR_MSG_FAIL:
return "IS_ERROR_SENSOR_MSG_FAIL";
case IS_ERROR_ISP_MSG_FAIL:
return "IS_ERROR_ISP_MSG_FAIL";
case IS_ERROR_DRC_MSG_FAIL:
return "IS_ERROR_DRC_MSG_FAIL";
case IS_ERROR_LHFD_MSG_FAIL:
return "IS_ERROR_LHFD_MSG_FAIL";
case IS_ERROR_UNKNOWN:
return "IS_ERROR_UNKNOWN";
/* Sensor */
case IS_ERROR_SENSOR_PWRDN_FAIL:
return "IS_ERROR_SENSOR_PWRDN_FAIL";
/* ISP */
case IS_ERROR_ISP_PWRDN_FAIL:
return "IS_ERROR_ISP_PWRDN_FAIL";
case IS_ERROR_ISP_MULTIPLE_INPUT:
return "IS_ERROR_ISP_MULTIPLE_INPUT";
case IS_ERROR_ISP_ABSENT_INPUT:
return "IS_ERROR_ISP_ABSENT_INPUT";
case IS_ERROR_ISP_ABSENT_OUTPUT:
return "IS_ERROR_ISP_ABSENT_OUTPUT";
case IS_ERROR_ISP_NONADJACENT_OUTPUT:
return "IS_ERROR_ISP_NONADJACENT_OUTPUT";
case IS_ERROR_ISP_FORMAT_MISMATCH:
return "IS_ERROR_ISP_FORMAT_MISMATCH";
case IS_ERROR_ISP_WIDTH_MISMATCH:
return "IS_ERROR_ISP_WIDTH_MISMATCH";
case IS_ERROR_ISP_HEIGHT_MISMATCH:
return "IS_ERROR_ISP_HEIGHT_MISMATCH";
case IS_ERROR_ISP_BITWIDTH_MISMATCH:
return "IS_ERROR_ISP_BITWIDTH_MISMATCH";
case IS_ERROR_ISP_FRAME_END_TIME_OUT:
return "IS_ERROR_ISP_FRAME_END_TIME_OUT";
/* DRC */
case IS_ERROR_DRC_PWRDN_FAIL:
return "IS_ERROR_DRC_PWRDN_FAIL";
case IS_ERROR_DRC_MULTIPLE_INPUT:
return "IS_ERROR_DRC_MULTIPLE_INPUT";
case IS_ERROR_DRC_ABSENT_INPUT:
return "IS_ERROR_DRC_ABSENT_INPUT";
case IS_ERROR_DRC_NONADJACENT_INPUT:
return "IS_ERROR_DRC_NONADJACENT_INPUT";
case IS_ERROR_DRC_ABSENT_OUTPUT:
return "IS_ERROR_DRC_ABSENT_OUTPUT";
case IS_ERROR_DRC_NONADJACENT_OUTPUT:
return "IS_ERROR_DRC_NONADJACENT_OUTPUT";
case IS_ERROR_DRC_FORMAT_MISMATCH:
return "IS_ERROR_DRC_FORMAT_MISMATCH";
case IS_ERROR_DRC_WIDTH_MISMATCH:
return "IS_ERROR_DRC_WIDTH_MISMATCH";
case IS_ERROR_DRC_HEIGHT_MISMATCH:
return "IS_ERROR_DRC_HEIGHT_MISMATCH";
case IS_ERROR_DRC_BITWIDTH_MISMATCH:
return "IS_ERROR_DRC_BITWIDTH_MISMATCH";
case IS_ERROR_DRC_FRAME_END_TIME_OUT:
return "IS_ERROR_DRC_FRAME_END_TIME_OUT";
/* FD */
case IS_ERROR_FD_PWRDN_FAIL:
return "IS_ERROR_FD_PWRDN_FAIL";
case IS_ERROR_FD_MULTIPLE_INPUT:
return "IS_ERROR_FD_MULTIPLE_INPUT";
case IS_ERROR_FD_ABSENT_INPUT:
return "IS_ERROR_FD_ABSENT_INPUT";
case IS_ERROR_FD_NONADJACENT_INPUT:
return "IS_ERROR_FD_NONADJACENT_INPUT";
case IS_ERROR_LHFD_FRAME_END_TIME_OUT:
return "IS_ERROR_LHFD_FRAME_END_TIME_OUT";
default:
return "Unknown";
}
}
| gpl-2.0 |
YUPlayGodDev/platform_kernel_cyanogen_msm8916 | drivers/acpi/acpi_lpss.c | 1860 | 7918 | /*
* ACPI support for Intel Lynxpoint LPSS.
*
* Copyright (C) 2013, Intel Corporation
* Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
* Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/platform_data/clk-lpss.h>
#include <linux/pm_runtime.h>
#include "internal.h"
ACPI_MODULE_NAME("acpi_lpss");
#define LPSS_CLK_SIZE 0x04
#define LPSS_LTR_SIZE 0x18
/* Offsets relative to LPSS_PRIVATE_OFFSET */
#define LPSS_GENERAL 0x08
#define LPSS_GENERAL_LTR_MODE_SW BIT(2)
#define LPSS_SW_LTR 0x10
#define LPSS_AUTO_LTR 0x14
struct lpss_device_desc {
bool clk_required;
const char *clkdev_name;
bool ltr_required;
unsigned int prv_offset;
};
static struct lpss_device_desc lpss_dma_desc = {
.clk_required = true,
.clkdev_name = "hclk",
};
struct lpss_private_data {
void __iomem *mmio_base;
resource_size_t mmio_size;
struct clk *clk;
const struct lpss_device_desc *dev_desc;
};
static struct lpss_device_desc lpt_dev_desc = {
.clk_required = true,
.prv_offset = 0x800,
.ltr_required = true,
};
static struct lpss_device_desc lpt_sdio_dev_desc = {
.prv_offset = 0x1000,
.ltr_required = true,
};
static const struct acpi_device_id acpi_lpss_device_ids[] = {
/* Generic LPSS devices */
{ "INTL9C60", (unsigned long)&lpss_dma_desc },
/* Lynxpoint LPSS devices */
{ "INT33C0", (unsigned long)&lpt_dev_desc },
{ "INT33C1", (unsigned long)&lpt_dev_desc },
{ "INT33C2", (unsigned long)&lpt_dev_desc },
{ "INT33C3", (unsigned long)&lpt_dev_desc },
{ "INT33C4", (unsigned long)&lpt_dev_desc },
{ "INT33C5", (unsigned long)&lpt_dev_desc },
{ "INT33C6", (unsigned long)&lpt_sdio_dev_desc },
{ "INT33C7", },
{ }
};
static int is_memory(struct acpi_resource *res, void *not_used)
{
struct resource r;
return !acpi_dev_resource_memory(res, &r);
}
/* LPSS main clock device. */
static struct platform_device *lpss_clk_dev;
static inline void lpt_register_clock_device(void)
{
lpss_clk_dev = platform_device_register_simple("clk-lpt", -1, NULL, 0);
}
static int register_device_clock(struct acpi_device *adev,
struct lpss_private_data *pdata)
{
const struct lpss_device_desc *dev_desc = pdata->dev_desc;
struct lpss_clk_data *clk_data;
if (!lpss_clk_dev)
lpt_register_clock_device();
clk_data = platform_get_drvdata(lpss_clk_dev);
if (!clk_data)
return -ENODEV;
if (dev_desc->clkdev_name) {
clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name,
dev_name(&adev->dev));
return 0;
}
if (!pdata->mmio_base
|| pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
return -ENODATA;
pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
clk_data->name, 0,
pdata->mmio_base + dev_desc->prv_offset,
0, 0, NULL);
if (IS_ERR(pdata->clk))
return PTR_ERR(pdata->clk);
clk_register_clkdev(pdata->clk, NULL, dev_name(&adev->dev));
return 0;
}
static int acpi_lpss_create_device(struct acpi_device *adev,
const struct acpi_device_id *id)
{
struct lpss_device_desc *dev_desc;
struct lpss_private_data *pdata;
struct resource_list_entry *rentry;
struct list_head resource_list;
int ret;
dev_desc = (struct lpss_device_desc *)id->driver_data;
if (!dev_desc)
return acpi_create_platform_device(adev, id);
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL);
if (ret < 0)
goto err_out;
list_for_each_entry(rentry, &resource_list, node)
if (resource_type(&rentry->res) == IORESOURCE_MEM) {
pdata->mmio_size = resource_size(&rentry->res);
pdata->mmio_base = ioremap(rentry->res.start,
pdata->mmio_size);
break;
}
acpi_dev_free_resource_list(&resource_list);
pdata->dev_desc = dev_desc;
if (dev_desc->clk_required) {
ret = register_device_clock(adev, pdata);
if (ret) {
/* Skip the device, but continue the namespace scan. */
ret = 0;
goto err_out;
}
}
/*
* This works around a known issue in ACPI tables where LPSS devices
* have _PS0 and _PS3 without _PSC (and no power resources), so
* acpi_bus_init_power() will assume that the BIOS has put them into D0.
*/
ret = acpi_device_fix_up_power(adev);
if (ret) {
/* Skip the device, but continue the namespace scan. */
ret = 0;
goto err_out;
}
adev->driver_data = pdata;
ret = acpi_create_platform_device(adev, id);
if (ret > 0)
return ret;
adev->driver_data = NULL;
err_out:
kfree(pdata);
return ret;
}
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
{
struct acpi_device *adev;
struct lpss_private_data *pdata;
unsigned long flags;
int ret;
ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
if (WARN_ON(ret))
return ret;
spin_lock_irqsave(&dev->power.lock, flags);
if (pm_runtime_suspended(dev)) {
ret = -EAGAIN;
goto out;
}
pdata = acpi_driver_data(adev);
if (WARN_ON(!pdata || !pdata->mmio_base)) {
ret = -ENODEV;
goto out;
}
*val = readl(pdata->mmio_base + pdata->dev_desc->prv_offset + reg);
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
}
static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u32 ltr_value = 0;
unsigned int reg;
int ret;
reg = strcmp(attr->attr.name, "auto_ltr") ? LPSS_SW_LTR : LPSS_AUTO_LTR;
ret = lpss_reg_read(dev, reg, <r_value);
if (ret)
return ret;
return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
}
static ssize_t lpss_ltr_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 ltr_mode = 0;
char *outstr;
int ret;
ret = lpss_reg_read(dev, LPSS_GENERAL, <r_mode);
if (ret)
return ret;
outstr = (ltr_mode & LPSS_GENERAL_LTR_MODE_SW) ? "sw" : "auto";
return sprintf(buf, "%s\n", outstr);
}
static DEVICE_ATTR(auto_ltr, S_IRUSR, lpss_ltr_show, NULL);
static DEVICE_ATTR(sw_ltr, S_IRUSR, lpss_ltr_show, NULL);
static DEVICE_ATTR(ltr_mode, S_IRUSR, lpss_ltr_mode_show, NULL);
static struct attribute *lpss_attrs[] = {
&dev_attr_auto_ltr.attr,
&dev_attr_sw_ltr.attr,
&dev_attr_ltr_mode.attr,
NULL,
};
static struct attribute_group lpss_attr_group = {
.attrs = lpss_attrs,
.name = "lpss_ltr",
};
static int acpi_lpss_platform_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct platform_device *pdev = to_platform_device(data);
struct lpss_private_data *pdata;
struct acpi_device *adev;
const struct acpi_device_id *id;
int ret = 0;
id = acpi_match_device(acpi_lpss_device_ids, &pdev->dev);
if (!id || !id->driver_data)
return 0;
if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
return 0;
pdata = acpi_driver_data(adev);
if (!pdata || !pdata->mmio_base || !pdata->dev_desc->ltr_required)
return 0;
if (pdata->mmio_size < pdata->dev_desc->prv_offset + LPSS_LTR_SIZE) {
dev_err(&pdev->dev, "MMIO size insufficient to access LTR\n");
return 0;
}
if (action == BUS_NOTIFY_ADD_DEVICE)
ret = sysfs_create_group(&pdev->dev.kobj, &lpss_attr_group);
else if (action == BUS_NOTIFY_DEL_DEVICE)
sysfs_remove_group(&pdev->dev.kobj, &lpss_attr_group);
return ret;
}
static struct notifier_block acpi_lpss_nb = {
.notifier_call = acpi_lpss_platform_notify,
};
static struct acpi_scan_handler lpss_handler = {
.ids = acpi_lpss_device_ids,
.attach = acpi_lpss_create_device,
};
void __init acpi_lpss_init(void)
{
if (!lpt_clk_init()) {
bus_register_notifier(&platform_bus_type, &acpi_lpss_nb);
acpi_scan_add_handler(&lpss_handler);
}
}
| gpl-2.0 |
ktd2004/linux-stable | drivers/scsi/gvp11.c | 2116 | 10753 | #include <linux/types.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/zorro.h>
#include <linux/module.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/amigaints.h>
#include <asm/amigahw.h>
#include "scsi.h"
#include "wd33c93.h"
#include "gvp11.h"
#define CHECK_WD33C93
struct gvp11_hostdata {
struct WD33C93_hostdata wh;
struct gvp11_scsiregs *regs;
};
static irqreturn_t gvp11_intr(int irq, void *data)
{
struct Scsi_Host *instance = data;
struct gvp11_hostdata *hdata = shost_priv(instance);
unsigned int status = hdata->regs->CNTR;
unsigned long flags;
if (!(status & GVP11_DMAC_INT_PENDING))
return IRQ_NONE;
spin_lock_irqsave(instance->host_lock, flags);
wd33c93_intr(instance);
spin_unlock_irqrestore(instance->host_lock, flags);
return IRQ_HANDLED;
}
static int gvp11_xfer_mask = 0;
void gvp11_setup(char *str, int *ints)
{
gvp11_xfer_mask = ints[1];
}
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct Scsi_Host *instance = cmd->device->host;
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
unsigned short cntr = GVP11_DMAC_INT_ENABLE;
unsigned long addr = virt_to_bus(cmd->SCp.ptr);
int bank_mask;
static int scsi_alloc_out_of_range = 0;
/* use bounce buffer if the physical address is bad */
if (addr & wh->dma_xfer_mask) {
wh->dma_bounce_len = (cmd->SCp.this_residual + 511) & ~0x1ff;
if (!scsi_alloc_out_of_range) {
wh->dma_bounce_buffer =
kmalloc(wh->dma_bounce_len, GFP_KERNEL);
wh->dma_buffer_pool = BUF_SCSI_ALLOCED;
}
if (scsi_alloc_out_of_range ||
!wh->dma_bounce_buffer) {
wh->dma_bounce_buffer =
amiga_chip_alloc(wh->dma_bounce_len,
"GVP II SCSI Bounce Buffer");
if (!wh->dma_bounce_buffer) {
wh->dma_bounce_len = 0;
return 1;
}
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
/* check if the address of the bounce buffer is OK */
addr = virt_to_bus(wh->dma_bounce_buffer);
if (addr & wh->dma_xfer_mask) {
/* fall back to Chip RAM if address out of range */
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED) {
kfree(wh->dma_bounce_buffer);
scsi_alloc_out_of_range = 1;
} else {
amiga_chip_free(wh->dma_bounce_buffer);
}
wh->dma_bounce_buffer =
amiga_chip_alloc(wh->dma_bounce_len,
"GVP II SCSI Bounce Buffer");
if (!wh->dma_bounce_buffer) {
wh->dma_bounce_len = 0;
return 1;
}
addr = virt_to_bus(wh->dma_bounce_buffer);
wh->dma_buffer_pool = BUF_CHIP_ALLOCED;
}
if (!dir_in) {
/* copy to bounce buffer for a write */
memcpy(wh->dma_bounce_buffer, cmd->SCp.ptr,
cmd->SCp.this_residual);
}
}
/* setup dma direction */
if (!dir_in)
cntr |= GVP11_DMAC_DIR_WRITE;
wh->dma_dir = dir_in;
regs->CNTR = cntr;
/* setup DMA *physical* address */
regs->ACR = addr;
if (dir_in) {
/* invalidate any cache */
cache_clear(addr, cmd->SCp.this_residual);
} else {
/* push any dirty cache */
cache_push(addr, cmd->SCp.this_residual);
}
bank_mask = (~wh->dma_xfer_mask >> 18) & 0x01c0;
if (bank_mask)
regs->BANK = bank_mask & (addr >> 18);
/* start DMA */
regs->ST_DMA = 1;
/* return success */
return 0;
}
static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
int status)
{
struct gvp11_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh;
struct gvp11_scsiregs *regs = hdata->regs;
/* stop DMA */
regs->SP_DMA = 1;
/* remove write bit from CONTROL bits */
regs->CNTR = GVP11_DMAC_INT_ENABLE;
/* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) {
if (wh->dma_dir && SCpnt)
memcpy(SCpnt->SCp.ptr, wh->dma_bounce_buffer,
SCpnt->SCp.this_residual);
if (wh->dma_buffer_pool == BUF_SCSI_ALLOCED)
kfree(wh->dma_bounce_buffer);
else
amiga_chip_free(wh->dma_bounce_buffer);
wh->dma_bounce_buffer = NULL;
wh->dma_bounce_len = 0;
}
}
static int gvp11_bus_reset(struct scsi_cmnd *cmd)
{
struct Scsi_Host *instance = cmd->device->host;
/* FIXME perform bus-specific reset */
/* FIXME 2: shouldn't we no-op this function (return
FAILED), and fall back to host reset function,
wd33c93_host_reset ? */
spin_lock_irq(instance->host_lock);
wd33c93_host_reset(cmd);
spin_unlock_irq(instance->host_lock);
return SUCCESS;
}
static struct scsi_host_template gvp11_scsi_template = {
.module = THIS_MODULE,
.name = "GVP Series II SCSI",
.show_info = wd33c93_show_info,
.write_info = wd33c93_write_info,
.proc_name = "GVP11",
.queuecommand = wd33c93_queuecommand,
.eh_abort_handler = wd33c93_abort,
.eh_bus_reset_handler = gvp11_bus_reset,
.eh_host_reset_handler = wd33c93_host_reset,
.can_queue = CAN_QUEUE,
.this_id = 7,
.sg_tablesize = SG_ALL,
.cmd_per_lun = CMD_PER_LUN,
.use_clustering = DISABLE_CLUSTERING
};
static int check_wd33c93(struct gvp11_scsiregs *regs)
{
#ifdef CHECK_WD33C93
volatile unsigned char *sasr_3393, *scmd_3393;
unsigned char save_sasr;
unsigned char q, qq;
/*
* These darn GVP boards are a problem - it can be tough to tell
* whether or not they include a SCSI controller. This is the
* ultimate Yet-Another-GVP-Detection-Hack in that it actually
* probes for a WD33c93 chip: If we find one, it's extremely
* likely that this card supports SCSI, regardless of Product_
* Code, Board_Size, etc.
*/
/* Get pointers to the presumed register locations and save contents */
sasr_3393 = ®s->SASR;
scmd_3393 = ®s->SCMD;
save_sasr = *sasr_3393;
/* First test the AuxStatus Reg */
q = *sasr_3393; /* read it */
if (q & 0x08) /* bit 3 should always be clear */
return -ENODEV;
*sasr_3393 = WD_AUXILIARY_STATUS; /* setup indirect address */
if (*sasr_3393 == WD_AUXILIARY_STATUS) { /* shouldn't retain the write */
*sasr_3393 = save_sasr; /* Oops - restore this byte */
return -ENODEV;
}
if (*sasr_3393 != q) { /* should still read the same */
*sasr_3393 = save_sasr; /* Oops - restore this byte */
return -ENODEV;
}
if (*scmd_3393 != q) /* and so should the image at 0x1f */
return -ENODEV;
/*
* Ok, we probably have a wd33c93, but let's check a few other places
* for good measure. Make sure that this works for both 'A and 'B
* chip versions.
*/
*sasr_3393 = WD_SCSI_STATUS;
q = *scmd_3393;
*sasr_3393 = WD_SCSI_STATUS;
*scmd_3393 = ~q;
*sasr_3393 = WD_SCSI_STATUS;
qq = *scmd_3393;
*sasr_3393 = WD_SCSI_STATUS;
*scmd_3393 = q;
if (qq != q) /* should be read only */
return -ENODEV;
*sasr_3393 = 0x1e; /* this register is unimplemented */
q = *scmd_3393;
*sasr_3393 = 0x1e;
*scmd_3393 = ~q;
*sasr_3393 = 0x1e;
qq = *scmd_3393;
*sasr_3393 = 0x1e;
*scmd_3393 = q;
if (qq != q || qq != 0xff) /* should be read only, all 1's */
return -ENODEV;
*sasr_3393 = WD_TIMEOUT_PERIOD;
q = *scmd_3393;
*sasr_3393 = WD_TIMEOUT_PERIOD;
*scmd_3393 = ~q;
*sasr_3393 = WD_TIMEOUT_PERIOD;
qq = *scmd_3393;
*sasr_3393 = WD_TIMEOUT_PERIOD;
*scmd_3393 = q;
if (qq != (~q & 0xff)) /* should be read/write */
return -ENODEV;
#endif /* CHECK_WD33C93 */
return 0;
}
static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
{
struct Scsi_Host *instance;
unsigned long address;
int error;
unsigned int epc;
unsigned int default_dma_xfer_mask;
struct gvp11_hostdata *hdata;
struct gvp11_scsiregs *regs;
wd33c93_regs wdregs;
default_dma_xfer_mask = ent->driver_data;
/*
* Rumors state that some GVP ram boards use the same product
* code as the SCSI controllers. Therefore if the board-size
* is not 64KB we assume it is a ram board and bail out.
*/
if (zorro_resource_len(z) != 0x10000)
return -ENODEV;
address = z->resource.start;
if (!request_mem_region(address, 256, "wd33c93"))
return -EBUSY;
regs = ZTWO_VADDR(address);
error = check_wd33c93(regs);
if (error)
goto fail_check_or_alloc;
instance = scsi_host_alloc(&gvp11_scsi_template,
sizeof(struct gvp11_hostdata));
if (!instance) {
error = -ENOMEM;
goto fail_check_or_alloc;
}
instance->irq = IRQ_AMIGA_PORTS;
instance->unique_id = z->slotaddr;
regs->secret2 = 1;
regs->secret1 = 0;
regs->secret3 = 15;
while (regs->CNTR & GVP11_DMAC_BUSY)
;
regs->CNTR = 0;
regs->BANK = 0;
wdregs.SASR = ®s->SASR;
wdregs.SCMD = ®s->SCMD;
hdata = shost_priv(instance);
if (gvp11_xfer_mask)
hdata->wh.dma_xfer_mask = gvp11_xfer_mask;
else
hdata->wh.dma_xfer_mask = default_dma_xfer_mask;
hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA;
hdata->regs = regs;
/*
* Check for 14MHz SCSI clock
*/
epc = *(unsigned short *)(ZTWO_VADDR(address) + 0x8000);
wd33c93_init(instance, wdregs, dma_setup, dma_stop,
(epc & GVP_SCSICLKMASK) ? WD33C93_FS_8_10
: WD33C93_FS_12_15);
error = request_irq(IRQ_AMIGA_PORTS, gvp11_intr, IRQF_SHARED,
"GVP11 SCSI", instance);
if (error)
goto fail_irq;
regs->CNTR = GVP11_DMAC_INT_ENABLE;
error = scsi_add_host(instance, NULL);
if (error)
goto fail_host;
zorro_set_drvdata(z, instance);
scsi_scan_host(instance);
return 0;
fail_host:
free_irq(IRQ_AMIGA_PORTS, instance);
fail_irq:
scsi_host_put(instance);
fail_check_or_alloc:
release_mem_region(address, 256);
return error;
}
static void gvp11_remove(struct zorro_dev *z)
{
struct Scsi_Host *instance = zorro_get_drvdata(z);
struct gvp11_hostdata *hdata = shost_priv(instance);
hdata->regs->CNTR = 0;
scsi_remove_host(instance);
free_irq(IRQ_AMIGA_PORTS, instance);
scsi_host_put(instance);
release_mem_region(z->resource.start, 256);
}
/*
* This should (hopefully) be the correct way to identify
* all the different GVP SCSI controllers (except for the
* SERIES I though).
*/
static struct zorro_device_id gvp11_zorro_tbl[] = {
{ ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff },
{ ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff },
{ ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff },
{ ZORRO_PROD_GVP_A530_SCSI, ~0x01ffffff },
{ ZORRO_PROD_GVP_COMBO_030_R4_SCSI, ~0x01ffffff },
{ ZORRO_PROD_GVP_A1291, ~0x07ffffff },
{ ZORRO_PROD_GVP_GFORCE_040_SCSI_1, ~0x07ffffff },
{ 0 }
};
MODULE_DEVICE_TABLE(zorro, gvp11_zorro_tbl);
static struct zorro_driver gvp11_driver = {
.name = "gvp11",
.id_table = gvp11_zorro_tbl,
.probe = gvp11_probe,
.remove = gvp11_remove,
};
static int __init gvp11_init(void)
{
return zorro_register_driver(&gvp11_driver);
}
module_init(gvp11_init);
static void __exit gvp11_exit(void)
{
zorro_unregister_driver(&gvp11_driver);
}
module_exit(gvp11_exit);
MODULE_DESCRIPTION("GVP Series II SCSI");
MODULE_LICENSE("GPL");
| gpl-2.0 |
globalgang/linux-at91 | net/tipc/msg.c | 2372 | 3694 | /*
* net/tipc/msg.c: TIPC message header routines
*
* Copyright (c) 2000-2006, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "msg.h"
u32 tipc_msg_tot_importance(struct tipc_msg *m)
{
if (likely(msg_isdata(m))) {
if (likely(msg_orignode(m) == tipc_own_addr))
return msg_importance(m);
return msg_importance(m) + 4;
}
if ((msg_user(m) == MSG_FRAGMENTER) &&
(msg_type(m) == FIRST_FRAGMENT))
return msg_importance(msg_get_wrapped(m));
return msg_importance(m);
}
void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
u32 hsize, u32 destnode)
{
memset(m, 0, hsize);
msg_set_version(m);
msg_set_user(m, user);
msg_set_hdr_sz(m, hsize);
msg_set_size(m, hsize);
msg_set_prevnode(m, tipc_own_addr);
msg_set_type(m, type);
msg_set_orignode(m, tipc_own_addr);
msg_set_destnode(m, destnode);
}
/**
* tipc_msg_build - create message using specified header and data
*
* Note: Caller must not hold any locks in case copy_from_user() is interrupted!
*
* Returns message data size or errno
*/
int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
u32 num_sect, unsigned int total_len,
int max_size, int usrmem, struct sk_buff **buf)
{
int dsz, sz, hsz, pos, res, cnt;
dsz = total_len;
pos = hsz = msg_hdr_sz(hdr);
sz = hsz + dsz;
msg_set_size(hdr, sz);
if (unlikely(sz > max_size)) {
*buf = NULL;
return dsz;
}
*buf = tipc_buf_acquire(sz);
if (!(*buf))
return -ENOMEM;
skb_copy_to_linear_data(*buf, hdr, hsz);
for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
if (likely(usrmem))
res = !copy_from_user((*buf)->data + pos,
msg_sect[cnt].iov_base,
msg_sect[cnt].iov_len);
else
skb_copy_to_linear_data_offset(*buf, pos,
msg_sect[cnt].iov_base,
msg_sect[cnt].iov_len);
pos += msg_sect[cnt].iov_len;
}
if (likely(res))
return dsz;
kfree_skb(*buf);
*buf = NULL;
return -EFAULT;
}
| gpl-2.0 |
Evisceration/lge-kernel-sniper | fs/xfs/xfs_alloc.c | 2372 | 84316 | /*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
#define XFSA_FIXUP_BNO_OK 1
#define XFSA_FIXUP_CNT_OK 2
STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
STATIC void xfs_alloc_busy_trim(struct xfs_alloc_arg *,
xfs_agblock_t, xfs_extlen_t, xfs_agblock_t *, xfs_extlen_t *);
/*
* Lookup the record equal to [bno, len] in the btree given by cur.
*/
STATIC int /* error */
xfs_alloc_lookup_eq(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
}
/*
* Lookup the first record greater than or equal to [bno, len]
* in the btree given by cur.
*/
STATIC int /* error */
xfs_alloc_lookup_ge(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
}
/*
* Lookup the first record less than or equal to [bno, len]
* in the btree given by cur.
*/
int /* error */
xfs_alloc_lookup_le(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len, /* length of extent */
int *stat) /* success/failure */
{
cur->bc_rec.a.ar_startblock = bno;
cur->bc_rec.a.ar_blockcount = len;
return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
}
/*
* Update the record referred to by cur to the value given
* by [bno, len].
* This either works (return 0) or gets an EFSCORRUPTED error.
*/
STATIC int /* error */
xfs_alloc_update(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
xfs_extlen_t len) /* length of extent */
{
union xfs_btree_rec rec;
rec.alloc.ar_startblock = cpu_to_be32(bno);
rec.alloc.ar_blockcount = cpu_to_be32(len);
return xfs_btree_update(cur, &rec);
}
/*
* Get the data from the pointed-to record.
*/
int /* error */
xfs_alloc_get_rec(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t *bno, /* output: starting block of extent */
xfs_extlen_t *len, /* output: length of extent */
int *stat) /* output: success/failure */
{
union xfs_btree_rec *rec;
int error;
error = xfs_btree_get_rec(cur, &rec, stat);
if (!error && *stat == 1) {
*bno = be32_to_cpu(rec->alloc.ar_startblock);
*len = be32_to_cpu(rec->alloc.ar_blockcount);
}
return error;
}
/*
* Compute aligned version of the found extent.
* Takes alignment and min length into account.
*/
STATIC void
xfs_alloc_compute_aligned(
xfs_alloc_arg_t *args, /* allocation argument structure */
xfs_agblock_t foundbno, /* starting block in found extent */
xfs_extlen_t foundlen, /* length in found extent */
xfs_agblock_t *resbno, /* result block number */
xfs_extlen_t *reslen) /* result length */
{
xfs_agblock_t bno;
xfs_extlen_t len;
/* Trim busy sections out of found extent */
xfs_alloc_busy_trim(args, foundbno, foundlen, &bno, &len);
if (args->alignment > 1 && len >= args->minlen) {
xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
xfs_extlen_t diff = aligned_bno - bno;
*resbno = aligned_bno;
*reslen = diff >= len ? 0 : len - diff;
} else {
*resbno = bno;
*reslen = len;
}
}
/*
* Compute best start block and diff for "near" allocations.
* freelen >= wantlen already checked by caller.
*/
STATIC xfs_extlen_t /* difference value (absolute) */
xfs_alloc_compute_diff(
xfs_agblock_t wantbno, /* target starting block */
xfs_extlen_t wantlen, /* target length */
xfs_extlen_t alignment, /* target alignment */
xfs_agblock_t freebno, /* freespace's starting block */
xfs_extlen_t freelen, /* freespace's length */
xfs_agblock_t *newbnop) /* result: best start block from free */
{
xfs_agblock_t freeend; /* end of freespace extent */
xfs_agblock_t newbno1; /* return block number */
xfs_agblock_t newbno2; /* other new block number */
xfs_extlen_t newlen1=0; /* length with newbno1 */
xfs_extlen_t newlen2=0; /* length with newbno2 */
xfs_agblock_t wantend; /* end of target extent */
ASSERT(freelen >= wantlen);
freeend = freebno + freelen;
wantend = wantbno + wantlen;
if (freebno >= wantbno) {
if ((newbno1 = roundup(freebno, alignment)) >= freeend)
newbno1 = NULLAGBLOCK;
} else if (freeend >= wantend && alignment > 1) {
newbno1 = roundup(wantbno, alignment);
newbno2 = newbno1 - alignment;
if (newbno1 >= freeend)
newbno1 = NULLAGBLOCK;
else
newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
if (newbno2 < freebno)
newbno2 = NULLAGBLOCK;
else
newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
if (newlen1 < newlen2 ||
(newlen1 == newlen2 &&
XFS_ABSDIFF(newbno1, wantbno) >
XFS_ABSDIFF(newbno2, wantbno)))
newbno1 = newbno2;
} else if (newbno2 != NULLAGBLOCK)
newbno1 = newbno2;
} else if (freeend >= wantend) {
newbno1 = wantbno;
} else if (alignment > 1) {
newbno1 = roundup(freeend - wantlen, alignment);
if (newbno1 > freeend - wantlen &&
newbno1 - alignment >= freebno)
newbno1 -= alignment;
else if (newbno1 >= freeend)
newbno1 = NULLAGBLOCK;
} else
newbno1 = freeend - wantlen;
*newbnop = newbno1;
return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
}
/*
* Fix up the length, based on mod and prod.
* len should be k * prod + mod for some k.
* If len is too small it is returned unchanged.
* If len hits maxlen it is left alone.
*/
STATIC void
xfs_alloc_fix_len(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
xfs_extlen_t k;
xfs_extlen_t rlen;
ASSERT(args->mod < args->prod);
rlen = args->len;
ASSERT(rlen >= args->minlen);
ASSERT(rlen <= args->maxlen);
if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
(args->mod == 0 && rlen < args->prod))
return;
k = rlen % args->prod;
if (k == args->mod)
return;
if (k > args->mod) {
if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
return;
} else {
if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
(int)args->minlen)
return;
}
ASSERT(rlen >= args->minlen);
ASSERT(rlen <= args->maxlen);
args->len = rlen;
}
/*
* Fix up length if there is too little space left in the a.g.
* Return 1 if ok, 0 if too little, should give up.
*/
STATIC int
xfs_alloc_fix_minleft(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
xfs_agf_t *agf; /* a.g. freelist header */
int diff; /* free space difference */
if (args->minleft == 0)
return 1;
agf = XFS_BUF_TO_AGF(args->agbp);
diff = be32_to_cpu(agf->agf_freeblks)
- args->len - args->minleft;
if (diff >= 0)
return 1;
args->len += diff; /* shrink the allocated space */
if (args->len >= args->minlen)
return 1;
args->agbno = NULLAGBLOCK;
return 0;
}
/*
* Update the two btrees, logically removing from freespace the extent
* starting at rbno, rlen blocks. The extent is contained within the
* actual (current) free extent fbno for flen blocks.
* Flags are passed in indicating whether the cursors are set to the
* relevant records.
*/
STATIC int /* error code */
xfs_alloc_fixup_trees(
xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
xfs_agblock_t fbno, /* starting block of free extent */
xfs_extlen_t flen, /* length of free extent */
xfs_agblock_t rbno, /* starting block of returned extent */
xfs_extlen_t rlen, /* length of returned extent */
int flags) /* flags, XFSA_FIXUP_... */
{
int error; /* error code */
int i; /* operation results */
xfs_agblock_t nfbno1; /* first new free startblock */
xfs_agblock_t nfbno2; /* second new free startblock */
xfs_extlen_t nflen1=0; /* first new free length */
xfs_extlen_t nflen2=0; /* second new free length */
/*
* Look up the record in the by-size tree if necessary.
*/
if (flags & XFSA_FIXUP_CNT_OK) {
#ifdef DEBUG
if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(
i == 1 && nfbno1 == fbno && nflen1 == flen);
#endif
} else {
if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
/*
* Look up the record in the by-block tree if necessary.
*/
if (flags & XFSA_FIXUP_BNO_OK) {
#ifdef DEBUG
if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(
i == 1 && nfbno1 == fbno && nflen1 == flen);
#endif
} else {
if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
#ifdef DEBUG
if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
struct xfs_btree_block *bnoblock;
struct xfs_btree_block *cntblock;
bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
XFS_WANT_CORRUPTED_RETURN(
bnoblock->bb_numrecs == cntblock->bb_numrecs);
}
#endif
/*
* Deal with all four cases: the allocated record is contained
* within the freespace record, so we can have new freespace
* at either (or both) end, or no freespace remaining.
*/
if (rbno == fbno && rlen == flen)
nfbno1 = nfbno2 = NULLAGBLOCK;
else if (rbno == fbno) {
nfbno1 = rbno + rlen;
nflen1 = flen - rlen;
nfbno2 = NULLAGBLOCK;
} else if (rbno + rlen == fbno + flen) {
nfbno1 = fbno;
nflen1 = flen - rlen;
nfbno2 = NULLAGBLOCK;
} else {
nfbno1 = fbno;
nflen1 = rbno - fbno;
nfbno2 = rbno + rlen;
nflen2 = (fbno + flen) - nfbno2;
}
/*
* Delete the entry from the by-size btree.
*/
if ((error = xfs_btree_delete(cnt_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
/*
* Add new by-size btree entry(s).
*/
if (nfbno1 != NULLAGBLOCK) {
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 0);
if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
if (nfbno2 != NULLAGBLOCK) {
if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 0);
if ((error = xfs_btree_insert(cnt_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
/*
* Fix up the by-block btree entry(s).
*/
if (nfbno1 == NULLAGBLOCK) {
/*
* No remaining freespace, just delete the by-block tree entry.
*/
if ((error = xfs_btree_delete(bno_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
} else {
/*
* Update the by-block entry to start later|be shorter.
*/
if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
return error;
}
if (nfbno2 != NULLAGBLOCK) {
/*
* 2 resulting free entries, need to add one.
*/
if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 0);
if ((error = xfs_btree_insert(bno_cur, &i)))
return error;
XFS_WANT_CORRUPTED_RETURN(i == 1);
}
return 0;
}
/*
* Read in the allocation group free block array.
*/
STATIC int /* error */
xfs_alloc_read_agfl(
xfs_mount_t *mp, /* mount point structure */
xfs_trans_t *tp, /* transaction pointer */
xfs_agnumber_t agno, /* allocation group number */
xfs_buf_t **bpp) /* buffer for the ag free block array */
{
xfs_buf_t *bp; /* return value */
int error;
ASSERT(agno != NULLAGNUMBER);
error = xfs_trans_read_buf(
mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), 0, &bp);
if (error)
return error;
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGFL, XFS_AGFL_REF);
*bpp = bp;
return 0;
}
STATIC int
xfs_alloc_update_counters(
struct xfs_trans *tp,
struct xfs_perag *pag,
struct xfs_buf *agbp,
long len)
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
pag->pagf_freeblks += len;
be32_add_cpu(&agf->agf_freeblks, len);
xfs_trans_agblocks_delta(tp, len);
if (unlikely(be32_to_cpu(agf->agf_freeblks) >
be32_to_cpu(agf->agf_length)))
return EFSCORRUPTED;
xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
return 0;
}
/*
* Allocation group level functions.
*/
/*
* Allocate a variable extent in the allocation group agno.
* Type and bno are used to determine where in the allocation group the
* extent will start.
* Extent's length (returned in *len) will be between minlen and maxlen,
* and of the form k * prod + mod unless there's nothing that large.
* Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
*/
STATIC int /* error */
xfs_alloc_ag_vextent(
xfs_alloc_arg_t *args) /* argument structure for allocation */
{
int error=0;
ASSERT(args->minlen > 0);
ASSERT(args->maxlen > 0);
ASSERT(args->minlen <= args->maxlen);
ASSERT(args->mod < args->prod);
ASSERT(args->alignment > 0);
/*
* Branch to correct routine based on the type.
*/
args->wasfromfl = 0;
switch (args->type) {
case XFS_ALLOCTYPE_THIS_AG:
error = xfs_alloc_ag_vextent_size(args);
break;
case XFS_ALLOCTYPE_NEAR_BNO:
error = xfs_alloc_ag_vextent_near(args);
break;
case XFS_ALLOCTYPE_THIS_BNO:
error = xfs_alloc_ag_vextent_exact(args);
break;
default:
ASSERT(0);
/* NOTREACHED */
}
if (error || args->agbno == NULLAGBLOCK)
return error;
ASSERT(args->len >= args->minlen);
ASSERT(args->len <= args->maxlen);
ASSERT(!args->wasfromfl || !args->isfl);
ASSERT(args->agbno % args->alignment == 0);
if (!args->wasfromfl) {
error = xfs_alloc_update_counters(args->tp, args->pag,
args->agbp,
-((long)(args->len)));
if (error)
return error;
ASSERT(!xfs_alloc_busy_search(args->mp, args->agno,
args->agbno, args->len));
}
if (!args->isfl) {
xfs_trans_mod_sb(args->tp, args->wasdel ?
XFS_TRANS_SB_RES_FDBLOCKS :
XFS_TRANS_SB_FDBLOCKS,
-((long)(args->len)));
}
XFS_STATS_INC(xs_allocx);
XFS_STATS_ADD(xs_allocb, args->len);
return error;
}
/*
* Allocate a variable extent at exactly agno/bno.
* Extent's length (returned in *len) will be between minlen and maxlen,
* and of the form k * prod + mod unless there's nothing that large.
* Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
*/
STATIC int /* error */
xfs_alloc_ag_vextent_exact(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
int error;
xfs_agblock_t fbno; /* start block of found extent */
xfs_extlen_t flen; /* length of found extent */
xfs_agblock_t tbno; /* start block of trimmed extent */
xfs_extlen_t tlen; /* length of trimmed extent */
xfs_agblock_t tend; /* end block of trimmed extent */
xfs_agblock_t end; /* end of allocated extent */
int i; /* success/failure of operation */
xfs_extlen_t rlen; /* length of returned extent */
ASSERT(args->alignment == 1);
/*
* Allocate/initialize a cursor for the by-number freespace btree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO);
/*
* Lookup bno and minlen in the btree (minlen is irrelevant, really).
* Look for the closest free block <= bno, it must contain bno
* if any free block does.
*/
error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
if (error)
goto error0;
if (!i)
goto not_found;
/*
* Grab the freespace record.
*/
error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
ASSERT(fbno <= args->agbno);
/*
* Check for overlapping busy extents.
*/
xfs_alloc_busy_trim(args, fbno, flen, &tbno, &tlen);
/*
* Give up if the start of the extent is busy, or the freespace isn't
* long enough for the minimum request.
*/
if (tbno > args->agbno)
goto not_found;
if (tlen < args->minlen)
goto not_found;
tend = tbno + tlen;
if (tend < args->agbno + args->minlen)
goto not_found;
/*
* End of extent will be smaller of the freespace end and the
* maximal requested end.
*
* Fix the length according to mod and prod if given.
*/
end = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen);
args->len = end - args->agbno;
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args))
goto not_found;
rlen = args->len;
ASSERT(args->agbno + rlen <= tend);
end = args->agbno + rlen;
/*
* We are allocating agbno for rlen [agbno .. end]
* Allocate/initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT);
ASSERT(args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
args->len, XFSA_FIXUP_BNO_OK);
if (error) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
goto error0;
}
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
args->wasfromfl = 0;
trace_xfs_alloc_exact_done(args);
return 0;
not_found:
/* Didn't find it, return null. */
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
args->agbno = NULLAGBLOCK;
trace_xfs_alloc_exact_notfound(args);
return 0;
error0:
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
trace_xfs_alloc_exact_error(args);
return error;
}
/*
* Search the btree in a given direction via the search cursor and compare
* the records found against the good extent we've already found.
*/
STATIC int
xfs_alloc_find_best_extent(
struct xfs_alloc_arg *args, /* allocation argument structure */
struct xfs_btree_cur **gcur, /* good cursor */
struct xfs_btree_cur **scur, /* searching cursor */
xfs_agblock_t gdiff, /* difference for search comparison */
xfs_agblock_t *sbno, /* extent found by search */
xfs_extlen_t *slen, /* extent length */
xfs_agblock_t *sbnoa, /* aligned extent found by search */
xfs_extlen_t *slena, /* aligned extent length */
int dir) /* 0 = search right, 1 = search left */
{
xfs_agblock_t new;
xfs_agblock_t sdiff;
int error;
int i;
/* The good extent is perfect, no need to search. */
if (!gdiff)
goto out_use_good;
/*
* Look until we find a better one, run out of space or run off the end.
*/
do {
error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
/*
* The good extent is closer than this one.
*/
if (!dir) {
if (*sbnoa >= args->agbno + gdiff)
goto out_use_good;
} else {
if (*sbnoa <= args->agbno - gdiff)
goto out_use_good;
}
/*
* Same distance, compare length and pick the best.
*/
if (*slena >= args->minlen) {
args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
xfs_alloc_fix_len(args);
sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
args->alignment, *sbnoa,
*slena, &new);
/*
* Choose closer size and invalidate other cursor.
*/
if (sdiff < gdiff)
goto out_use_search;
goto out_use_good;
}
if (!dir)
error = xfs_btree_increment(*scur, 0, &i);
else
error = xfs_btree_decrement(*scur, 0, &i);
if (error)
goto error0;
} while (i);
out_use_good:
xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
*scur = NULL;
return 0;
out_use_search:
xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
*gcur = NULL;
return 0;
error0:
/* caller invalidates cursors */
return error;
}
/*
* Allocate a variable extent near bno in the allocation group agno.
* Extent's length (returned in len) will be between minlen and maxlen,
* and of the form k * prod + mod unless there's nothing that large.
* Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
*/
STATIC int /* error */
xfs_alloc_ag_vextent_near(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
xfs_agblock_t gtbno; /* start bno of right side entry */
xfs_agblock_t gtbnoa; /* aligned ... */
xfs_extlen_t gtdiff; /* difference to right side entry */
xfs_extlen_t gtlen; /* length of right side entry */
xfs_extlen_t gtlena; /* aligned ... */
xfs_agblock_t gtnew; /* useful start bno of right side */
int error; /* error code */
int i; /* result code, temporary */
int j; /* result code, temporary */
xfs_agblock_t ltbno; /* start bno of left side entry */
xfs_agblock_t ltbnoa; /* aligned ... */
xfs_extlen_t ltdiff; /* difference to left side entry */
xfs_extlen_t ltlen; /* length of left side entry */
xfs_extlen_t ltlena; /* aligned ... */
xfs_agblock_t ltnew; /* useful start bno of left side */
xfs_extlen_t rlen; /* length of returned extent */
int forced = 0;
#if defined(DEBUG) && defined(__KERNEL__)
/*
* Randomly don't execute the first algorithm.
*/
int dofirst; /* set to do first algorithm */
dofirst = random32() & 1;
#endif
restart:
bno_cur_lt = NULL;
bno_cur_gt = NULL;
ltlen = 0;
gtlena = 0;
ltlena = 0;
/*
* Get a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT);
/*
* See if there are any free extents as big as maxlen.
*/
if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
goto error0;
/*
* If none, then pick up the last entry in the tree unless the
* tree is empty.
*/
if (!i) {
if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno,
<len, &i)))
goto error0;
if (i == 0 || ltlen == 0) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
trace_xfs_alloc_near_noentry(args);
return 0;
}
ASSERT(i == 1);
}
args->wasfromfl = 0;
/*
* First algorithm.
* If the requested extent is large wrt the freespaces available
* in this a.g., then the cursor will be pointing to a btree entry
* near the right edge of the tree. If it's in the last btree leaf
* block, then we just examine all the entries in that block
* that are big enough, and pick the best one.
* This is written as a while loop so we can break out of it,
* but we never loop back to the top.
*/
while (xfs_btree_islastblock(cnt_cur, 0)) {
xfs_extlen_t bdiff;
int besti=0;
xfs_extlen_t blen=0;
xfs_agblock_t bnew=0;
#if defined(DEBUG) && defined(__KERNEL__)
if (!dofirst)
break;
#endif
/*
* Start from the entry that lookup found, sequence through
* all larger free blocks. If we're actually pointing at a
* record smaller than maxlen, go to the start of this block,
* and skip all those smaller than minlen.
*/
if (ltlen || args->alignment > 1) {
cnt_cur->bc_ptrs[0] = 1;
do {
if ((error = xfs_alloc_get_rec(cnt_cur, <bno,
<len, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if (ltlen >= args->minlen)
break;
if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
goto error0;
} while (i);
ASSERT(ltlen >= args->minlen);
if (!i)
break;
}
i = cnt_cur->bc_ptrs[0];
for (j = 1, blen = 0, bdiff = 0;
!error && j && (blen < args->maxlen || bdiff > 0);
error = xfs_btree_increment(cnt_cur, 0, &j)) {
/*
* For each entry, decide if it's better than
* the previous best entry.
*/
if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_alloc_compute_aligned(args, ltbno, ltlen,
<bnoa, <lena);
if (ltlena < args->minlen)
continue;
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
ASSERT(args->len >= args->minlen);
if (args->len < blen)
continue;
ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
args->alignment, ltbnoa, ltlena, <new);
if (ltnew != NULLAGBLOCK &&
(args->len > blen || ltdiff < bdiff)) {
bdiff = ltdiff;
bnew = ltnew;
blen = args->len;
besti = cnt_cur->bc_ptrs[0];
}
}
/*
* It didn't work. We COULD be in a case where
* there's a good record somewhere, so try again.
*/
if (blen == 0)
break;
/*
* Point at the best entry, and retrieve it again.
*/
cnt_cur->bc_ptrs[0] = besti;
if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->len = blen;
if (!xfs_alloc_fix_minleft(args)) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
trace_xfs_alloc_near_nominleft(args);
return 0;
}
blen = args->len;
/*
* We are allocating starting at bnew for blen blocks.
*/
args->agbno = bnew;
ASSERT(bnew >= ltbno);
ASSERT(bnew + blen <= ltbno + ltlen);
/*
* Set up a cursor for the by-bno tree.
*/
bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
args->agbp, args->agno, XFS_BTNUM_BNO);
/*
* Fix up the btree entries.
*/
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
goto error0;
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
trace_xfs_alloc_near_first(args);
return 0;
}
/*
* Second algorithm.
* Search in the by-bno tree to the left and to the right
* simultaneously, until in each case we find a space big enough,
* or run into the edge of the tree. When we run into the edge,
* we deallocate that cursor.
* If both searches succeed, we compare the two spaces and pick
* the better one.
* With alignment, it's possible for both to fail; the upper
* level algorithm that picks allocation groups for allocations
* is not supposed to do this.
*/
/*
* Allocate and initialize the cursor for the leftward search.
*/
bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO);
/*
* Lookup <= bno to find the leftward search's starting point.
*/
if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
goto error0;
if (!i) {
/*
* Didn't find anything; use this cursor for the rightward
* search.
*/
bno_cur_gt = bno_cur_lt;
bno_cur_lt = NULL;
}
/*
* Found something. Duplicate the cursor for the rightward search.
*/
else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
goto error0;
/*
* Increment the cursor, so we will point at the entry just right
* of the leftward entry if any, or to the leftmost entry.
*/
if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
goto error0;
if (!i) {
/*
* It failed, there are no rightward entries.
*/
xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
bno_cur_gt = NULL;
}
/*
* Loop going left with the leftward cursor, right with the
* rightward cursor, until either both directions give up or
* we find an entry at least as big as minlen.
*/
do {
if (bno_cur_lt) {
if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_alloc_compute_aligned(args, ltbno, ltlen,
<bnoa, <lena);
if (ltlena >= args->minlen)
break;
if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
goto error0;
if (!i) {
xfs_btree_del_cursor(bno_cur_lt,
XFS_BTREE_NOERROR);
bno_cur_lt = NULL;
}
}
if (bno_cur_gt) {
if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_alloc_compute_aligned(args, gtbno, gtlen,
>bnoa, >lena);
if (gtlena >= args->minlen)
break;
if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
goto error0;
if (!i) {
xfs_btree_del_cursor(bno_cur_gt,
XFS_BTREE_NOERROR);
bno_cur_gt = NULL;
}
}
} while (bno_cur_lt || bno_cur_gt);
/*
* Got both cursors still active, need to find better entry.
*/
if (bno_cur_lt && bno_cur_gt) {
if (ltlena >= args->minlen) {
/*
* Left side is good, look for a right side entry.
*/
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
args->alignment, ltbnoa, ltlena, <new);
error = xfs_alloc_find_best_extent(args,
&bno_cur_lt, &bno_cur_gt,
ltdiff, >bno, >len,
>bnoa, >lena,
0 /* search right */);
} else {
ASSERT(gtlena >= args->minlen);
/*
* Right side is good, look for a left side entry.
*/
args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
xfs_alloc_fix_len(args);
gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
args->alignment, gtbnoa, gtlena, >new);
error = xfs_alloc_find_best_extent(args,
&bno_cur_gt, &bno_cur_lt,
gtdiff, <bno, <len,
<bnoa, <lena,
1 /* search left */);
}
if (error)
goto error0;
}
/*
* If we couldn't get anything, give up.
*/
if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
if (!forced++) {
trace_xfs_alloc_near_busy(args);
xfs_log_force(args->mp, XFS_LOG_SYNC);
goto restart;
}
trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
return 0;
}
/*
* At this point we have selected a freespace entry, either to the
* left or to the right. If it's on the right, copy all the
* useful variables to the "left" set so we only have one
* copy of this code.
*/
if (bno_cur_gt) {
bno_cur_lt = bno_cur_gt;
bno_cur_gt = NULL;
ltbno = gtbno;
ltbnoa = gtbnoa;
ltlen = gtlen;
ltlena = gtlena;
j = 1;
} else
j = 0;
/*
* Fix up the length and compute the useful address.
*/
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args)) {
trace_xfs_alloc_near_nominleft(args);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
return 0;
}
rlen = args->len;
(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
ltbnoa, ltlena, <new);
ASSERT(ltnew >= ltbno);
ASSERT(ltnew + rlen <= ltbnoa + ltlena);
ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->agbno = ltnew;
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
ltnew, rlen, XFSA_FIXUP_BNO_OK)))
goto error0;
if (j)
trace_xfs_alloc_near_greater(args);
else
trace_xfs_alloc_near_lesser(args);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
return 0;
error0:
trace_xfs_alloc_near_error(args);
if (cnt_cur != NULL)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
if (bno_cur_lt != NULL)
xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
if (bno_cur_gt != NULL)
xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
return error;
}
/*
* Allocate a variable extent anywhere in the allocation group agno.
* Extent's length (returned in len) will be between minlen and maxlen,
* and of the form k * prod + mod unless there's nothing that large.
* Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
*/
STATIC int /* error */
xfs_alloc_ag_vextent_size(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
int error; /* error result */
xfs_agblock_t fbno; /* start of found freespace */
xfs_extlen_t flen; /* length of found freespace */
int i; /* temp status variable */
xfs_agblock_t rbno; /* returned block number */
xfs_extlen_t rlen; /* length of returned extent */
int forced = 0;
restart:
/*
* Allocate and initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_CNT);
bno_cur = NULL;
/*
* Look for an entry >= maxlen+alignment-1 blocks.
*/
if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
args->maxlen + args->alignment - 1, &i)))
goto error0;
/*
* If none or we have busy extents that we cannot allocate from, then
* we have to settle for a smaller extent. In the case that there are
* no large extents, this will return the last entry in the tree unless
* the tree is empty. In the case that there are only busy large
* extents, this will return the largest small extent unless there
* are no smaller extents available.
*/
if (!i || forced > 1) {
error = xfs_alloc_ag_vextent_small(args, cnt_cur,
&fbno, &flen, &i);
if (error)
goto error0;
if (i == 0 || flen == 0) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
trace_xfs_alloc_size_noentry(args);
return 0;
}
ASSERT(i == 1);
xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
} else {
/*
* Search for a non-busy extent that is large enough.
* If we are at low space, don't check, or if we fall of
* the end of the btree, turn off the busy check and
* restart.
*/
for (;;) {
error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
if (error)
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_alloc_compute_aligned(args, fbno, flen,
&rbno, &rlen);
if (rlen >= args->maxlen)
break;
error = xfs_btree_increment(cnt_cur, 0, &i);
if (error)
goto error0;
if (i == 0) {
/*
* Our only valid extents must have been busy.
* Make it unbusy by forcing the log out and
* retrying. If we've been here before, forcing
* the log isn't making the extents available,
* which means they have probably been freed in
* this transaction. In that case, we have to
* give up on them and we'll attempt a minlen
* allocation the next time around.
*/
xfs_btree_del_cursor(cnt_cur,
XFS_BTREE_NOERROR);
trace_xfs_alloc_size_busy(args);
if (!forced++)
xfs_log_force(args->mp, XFS_LOG_SYNC);
goto restart;
}
}
}
/*
* In the first case above, we got the last entry in the
* by-size btree. Now we check to see if the space hits maxlen
* once aligned; if not, we search left for something better.
* This can't happen in the second case above.
*/
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
(rlen <= flen && rbno + rlen <= fbno + flen), error0);
if (rlen < args->maxlen) {
xfs_agblock_t bestfbno;
xfs_extlen_t bestflen;
xfs_agblock_t bestrbno;
xfs_extlen_t bestrlen;
bestrlen = rlen;
bestrbno = rbno;
bestflen = flen;
bestfbno = fbno;
for (;;) {
if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
goto error0;
if (i == 0)
break;
if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
&i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if (flen < bestrlen)
break;
xfs_alloc_compute_aligned(args, fbno, flen,
&rbno, &rlen);
rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
(rlen <= flen && rbno + rlen <= fbno + flen),
error0);
if (rlen > bestrlen) {
bestrlen = rlen;
bestrbno = rbno;
bestflen = flen;
bestfbno = fbno;
if (rlen == args->maxlen)
break;
}
}
if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
&i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
rlen = bestrlen;
rbno = bestrbno;
flen = bestflen;
fbno = bestfbno;
}
args->wasfromfl = 0;
/*
* Fix up the length.
*/
args->len = rlen;
if (rlen < args->minlen) {
if (!forced++) {
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
trace_xfs_alloc_size_busy(args);
xfs_log_force(args->mp, XFS_LOG_SYNC);
goto restart;
}
goto out_nominleft;
}
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args))
goto out_nominleft;
rlen = args->len;
XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
/*
* Allocate and initialize a cursor for the by-block tree.
*/
bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
args->agno, XFS_BTNUM_BNO);
if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
rbno, rlen, XFSA_FIXUP_CNT_OK)))
goto error0;
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
cnt_cur = bno_cur = NULL;
args->len = rlen;
args->agbno = rbno;
XFS_WANT_CORRUPTED_GOTO(
args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
trace_xfs_alloc_size_done(args);
return 0;
error0:
trace_xfs_alloc_size_error(args);
if (cnt_cur)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
if (bno_cur)
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
return error;
out_nominleft:
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
trace_xfs_alloc_size_nominleft(args);
args->agbno = NULLAGBLOCK;
return 0;
}
/*
* Deal with the case where only small freespaces remain.
* Either return the contents of the last freespace record,
* or allocate space from the freelist if there is nothing in the tree.
*/
STATIC int /* error */
xfs_alloc_ag_vextent_small(
xfs_alloc_arg_t *args, /* allocation argument structure */
xfs_btree_cur_t *ccur, /* by-size cursor */
xfs_agblock_t *fbnop, /* result block number */
xfs_extlen_t *flenp, /* result length */
int *stat) /* status: 0-freelist, 1-normal/none */
{
int error;
xfs_agblock_t fbno;
xfs_extlen_t flen;
int i;
if ((error = xfs_btree_decrement(ccur, 0, &i)))
goto error0;
if (i) {
if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
}
/*
* Nothing in the btree, try the freelist. Make sure
* to respect minleft even when pulling from the
* freelist.
*/
else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
(be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
> args->minleft)) {
error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
if (error)
goto error0;
if (fbno != NULLAGBLOCK) {
xfs_alloc_busy_reuse(args->mp, args->agno, fbno, 1,
args->userdata);
if (args->userdata) {
xfs_buf_t *bp;
bp = xfs_btree_get_bufs(args->mp, args->tp,
args->agno, fbno, 0);
xfs_trans_binval(args->tp, bp);
}
args->len = 1;
args->agbno = fbno;
XFS_WANT_CORRUPTED_GOTO(
args->agbno + args->len <=
be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
error0);
args->wasfromfl = 1;
trace_xfs_alloc_small_freelist(args);
*stat = 0;
return 0;
}
/*
* Nothing in the freelist.
*/
else
flen = 0;
}
/*
* Can't allocate from the freelist for some reason.
*/
else {
fbno = NULLAGBLOCK;
flen = 0;
}
/*
* Can't do the allocation, give up.
*/
if (flen < args->minlen) {
args->agbno = NULLAGBLOCK;
trace_xfs_alloc_small_notenough(args);
flen = 0;
}
*fbnop = fbno;
*flenp = flen;
*stat = 1;
trace_xfs_alloc_small_done(args);
return 0;
error0:
trace_xfs_alloc_small_error(args);
return error;
}
/*
* Free the extent starting at agno/bno for length.
*/
STATIC int /* error */
xfs_free_ag_extent(
xfs_trans_t *tp, /* transaction pointer */
xfs_buf_t *agbp, /* buffer for a.g. freelist header */
xfs_agnumber_t agno, /* allocation group number */
xfs_agblock_t bno, /* starting block number */
xfs_extlen_t len, /* length of extent */
int isfl) /* set if is freelist blocks - no sb acctg */
{
xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
int error; /* error return value */
xfs_agblock_t gtbno; /* start of right neighbor block */
xfs_extlen_t gtlen; /* length of right neighbor block */
int haveleft; /* have a left neighbor block */
int haveright; /* have a right neighbor block */
int i; /* temp, result code */
xfs_agblock_t ltbno; /* start of left neighbor block */
xfs_extlen_t ltlen; /* length of left neighbor block */
xfs_mount_t *mp; /* mount point struct for filesystem */
xfs_agblock_t nbno; /* new starting block of freespace */
xfs_extlen_t nlen; /* new length of freespace */
xfs_perag_t *pag; /* per allocation group data */
mp = tp->t_mountp;
/*
* Allocate and initialize a cursor for the by-block btree.
*/
bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
cnt_cur = NULL;
/*
* Look for a neighboring block on the left (lower block numbers)
* that is contiguous with this space.
*/
if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
goto error0;
if (haveleft) {
/*
* There is a block to our left.
*/
if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* It's not contiguous, though.
*/
if (ltbno + ltlen < bno)
haveleft = 0;
else {
/*
* If this failure happens the request to free this
* space was invalid, it's (partly) already free.
* Very bad.
*/
XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
}
}
/*
* Look for a neighboring block on the right (higher block numbers)
* that is contiguous with this space.
*/
if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
goto error0;
if (haveright) {
/*
* There is a block to our right.
*/
if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* It's not contiguous, though.
*/
if (bno + len < gtbno)
haveright = 0;
else {
/*
* If this failure happens the request to free this
* space was invalid, it's (partly) already free.
* Very bad.
*/
XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
}
}
/*
* Now allocate and initialize a cursor for the by-size tree.
*/
cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
/*
* Have both left and right contiguous neighbors.
* Merge all three into a single free block.
*/
if (haveleft && haveright) {
/*
* Delete the old by-size entry on the left.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* Delete the old by-size entry on the right.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* Delete the old by-block entry for the right block.
*/
if ((error = xfs_btree_delete(bno_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* Move the by-block cursor back to the left neighbor.
*/
if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
#ifdef DEBUG
/*
* Check that this is the right record: delete didn't
* mangle the cursor.
*/
{
xfs_agblock_t xxbno;
xfs_extlen_t xxlen;
if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
&i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(
i == 1 && xxbno == ltbno && xxlen == ltlen,
error0);
}
#endif
/*
* Update remaining by-block entry to the new, joined block.
*/
nbno = ltbno;
nlen = len + ltlen + gtlen;
if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
goto error0;
}
/*
* Have only a left contiguous neighbor.
* Merge it together with the new freespace.
*/
else if (haveleft) {
/*
* Delete the old by-size entry on the left.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* Back up the by-block cursor to the left neighbor, and
* update its length.
*/
if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
nbno = ltbno;
nlen = len + ltlen;
if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
goto error0;
}
/*
* Have only a right contiguous neighbor.
* Merge it together with the new freespace.
*/
else if (haveright) {
/*
* Delete the old by-size entry on the right.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
if ((error = xfs_btree_delete(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
/*
* Update the starting block and length of the right
* neighbor in the by-block tree.
*/
nbno = bno;
nlen = len + gtlen;
if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
goto error0;
}
/*
* No contiguous neighbors.
* Insert the new freespace into the by-block tree.
*/
else {
nbno = bno;
nlen = len;
if ((error = xfs_btree_insert(bno_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
}
xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
bno_cur = NULL;
/*
* In all cases we need to insert the new freespace in the by-size tree.
*/
if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
if ((error = xfs_btree_insert(cnt_cur, &i)))
goto error0;
XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
cnt_cur = NULL;
/*
* Update the freespace totals in the ag and superblock.
*/
pag = xfs_perag_get(mp, agno);
error = xfs_alloc_update_counters(tp, pag, agbp, len);
xfs_perag_put(pag);
if (error)
goto error0;
if (!isfl)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
XFS_STATS_INC(xs_freex);
XFS_STATS_ADD(xs_freeb, len);
trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
return 0;
error0:
trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
if (bno_cur)
xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
if (cnt_cur)
xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
return error;
}
/*
* Visible (exported) allocation/free functions.
* Some of these are used just by xfs_alloc_btree.c and this file.
*/
/*
* Compute and fill in value of m_ag_maxlevels.
*/
void
xfs_alloc_compute_maxlevels(
xfs_mount_t *mp) /* file system mount structure */
{
int level;
uint maxblocks;
uint maxleafents;
int minleafrecs;
int minnoderecs;
maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
minleafrecs = mp->m_alloc_mnr[0];
minnoderecs = mp->m_alloc_mnr[1];
maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
for (level = 1; maxblocks > 1; level++)
maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
mp->m_ag_maxlevels = level;
}
/*
* Find the length of the longest extent in an AG.
*/
xfs_extlen_t
xfs_alloc_longest_free_extent(
struct xfs_mount *mp,
struct xfs_perag *pag)
{
xfs_extlen_t need, delta = 0;
need = XFS_MIN_FREELIST_PAG(pag, mp);
if (need > pag->pagf_flcount)
delta = need - pag->pagf_flcount;
if (pag->pagf_longest > delta)
return pag->pagf_longest - delta;
return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
}
/*
* Decide whether to use this allocation group for this allocation.
* If so, fix up the btree freelist's size.
*/
STATIC int /* error */
xfs_alloc_fix_freelist(
xfs_alloc_arg_t *args, /* allocation argument structure */
int flags) /* XFS_ALLOC_FLAG_... */
{
xfs_buf_t *agbp; /* agf buffer pointer */
xfs_agf_t *agf; /* a.g. freespace structure pointer */
xfs_buf_t *agflbp;/* agfl buffer pointer */
xfs_agblock_t bno; /* freelist block */
xfs_extlen_t delta; /* new blocks needed in freelist */
int error; /* error result code */
xfs_extlen_t longest;/* longest extent in allocation group */
xfs_mount_t *mp; /* file system mount point structure */
xfs_extlen_t need; /* total blocks needed in freelist */
xfs_perag_t *pag; /* per-ag information structure */
xfs_alloc_arg_t targs; /* local allocation arguments */
xfs_trans_t *tp; /* transaction pointer */
mp = args->mp;
pag = args->pag;
tp = args->tp;
if (!pag->pagf_init) {
if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
&agbp)))
return error;
if (!pag->pagf_init) {
ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
args->agbp = NULL;
return 0;
}
} else
agbp = NULL;
/*
* If this is a metadata preferred pag and we are user data
* then try somewhere else if we are not being asked to
* try harder at this point
*/
if (pag->pagf_metadata && args->userdata &&
(flags & XFS_ALLOC_FLAG_TRYLOCK)) {
ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
args->agbp = NULL;
return 0;
}
if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
/*
* If it looks like there isn't a long enough extent, or enough
* total blocks, reject it.
*/
need = XFS_MIN_FREELIST_PAG(pag, mp);
longest = xfs_alloc_longest_free_extent(mp, pag);
if ((args->minlen + args->alignment + args->minalignslop - 1) >
longest ||
((int)(pag->pagf_freeblks + pag->pagf_flcount -
need - args->total) < (int)args->minleft)) {
if (agbp)
xfs_trans_brelse(tp, agbp);
args->agbp = NULL;
return 0;
}
}
/*
* Get the a.g. freespace buffer.
* Can fail if we're not blocking on locks, and it's held.
*/
if (agbp == NULL) {
if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
&agbp)))
return error;
if (agbp == NULL) {
ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
args->agbp = NULL;
return 0;
}
}
/*
* Figure out how many blocks we should have in the freelist.
*/
agf = XFS_BUF_TO_AGF(agbp);
need = XFS_MIN_FREELIST(agf, mp);
/*
* If there isn't enough total or single-extent, reject it.
*/
if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
delta = need > be32_to_cpu(agf->agf_flcount) ?
(need - be32_to_cpu(agf->agf_flcount)) : 0;
longest = be32_to_cpu(agf->agf_longest);
longest = (longest > delta) ? (longest - delta) :
(be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
if ((args->minlen + args->alignment + args->minalignslop - 1) >
longest ||
((int)(be32_to_cpu(agf->agf_freeblks) +
be32_to_cpu(agf->agf_flcount) - need - args->total) <
(int)args->minleft)) {
xfs_trans_brelse(tp, agbp);
args->agbp = NULL;
return 0;
}
}
/*
* Make the freelist shorter if it's too long.
*/
while (be32_to_cpu(agf->agf_flcount) > need) {
xfs_buf_t *bp;
error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
if (error)
return error;
if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
return error;
bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
xfs_trans_binval(tp, bp);
}
/*
* Initialize the args structure.
*/
targs.tp = tp;
targs.mp = mp;
targs.agbp = agbp;
targs.agno = args->agno;
targs.mod = targs.minleft = targs.wasdel = targs.userdata =
targs.minalignslop = 0;
targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
targs.type = XFS_ALLOCTYPE_THIS_AG;
targs.pag = pag;
if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
return error;
/*
* Make the freelist longer if it's too short.
*/
while (be32_to_cpu(agf->agf_flcount) < need) {
targs.agbno = 0;
targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
/*
* Allocate as many blocks as possible at once.
*/
if ((error = xfs_alloc_ag_vextent(&targs))) {
xfs_trans_brelse(tp, agflbp);
return error;
}
/*
* Stop if we run out. Won't happen if callers are obeying
* the restrictions correctly. Can happen for free calls
* on a completely full ag.
*/
if (targs.agbno == NULLAGBLOCK) {
if (flags & XFS_ALLOC_FLAG_FREEING)
break;
xfs_trans_brelse(tp, agflbp);
args->agbp = NULL;
return 0;
}
/*
* Put each allocated block on the list.
*/
for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
error = xfs_alloc_put_freelist(tp, agbp,
agflbp, bno, 0);
if (error)
return error;
}
}
xfs_trans_brelse(tp, agflbp);
args->agbp = agbp;
return 0;
}
/*
* Get a block from the freelist.
* Returns with the buffer for the block gotten.
*/
int /* error */
xfs_alloc_get_freelist(
xfs_trans_t *tp, /* transaction pointer */
xfs_buf_t *agbp, /* buffer containing the agf structure */
xfs_agblock_t *bnop, /* block address retrieved from freelist */
int btreeblk) /* destination is a AGF btree */
{
xfs_agf_t *agf; /* a.g. freespace structure */
xfs_agfl_t *agfl; /* a.g. freelist structure */
xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
xfs_agblock_t bno; /* block number returned */
int error;
int logflags;
xfs_mount_t *mp; /* mount structure */
xfs_perag_t *pag; /* per allocation group data */
agf = XFS_BUF_TO_AGF(agbp);
/*
* Freelist is empty, give up.
*/
if (!agf->agf_flcount) {
*bnop = NULLAGBLOCK;
return 0;
}
/*
* Read the array of free blocks.
*/
mp = tp->t_mountp;
if ((error = xfs_alloc_read_agfl(mp, tp,
be32_to_cpu(agf->agf_seqno), &agflbp)))
return error;
agfl = XFS_BUF_TO_AGFL(agflbp);
/*
* Get the block number and update the data structures.
*/
bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
be32_add_cpu(&agf->agf_flfirst, 1);
xfs_trans_brelse(tp, agflbp);
if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
agf->agf_flfirst = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_add_cpu(&agf->agf_flcount, -1);
xfs_trans_agflist_delta(tp, -1);
pag->pagf_flcount--;
xfs_perag_put(pag);
logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
if (btreeblk) {
be32_add_cpu(&agf->agf_btreeblks, 1);
pag->pagf_btreeblks++;
logflags |= XFS_AGF_BTREEBLKS;
}
xfs_alloc_log_agf(tp, agbp, logflags);
*bnop = bno;
return 0;
}
/*
* Log the given fields from the agf structure.
*/
void
xfs_alloc_log_agf(
xfs_trans_t *tp, /* transaction pointer */
xfs_buf_t *bp, /* buffer for a.g. freelist header */
int fields) /* mask of fields to be logged (XFS_AGF_...) */
{
int first; /* first byte offset */
int last; /* last byte offset */
static const short offsets[] = {
offsetof(xfs_agf_t, agf_magicnum),
offsetof(xfs_agf_t, agf_versionnum),
offsetof(xfs_agf_t, agf_seqno),
offsetof(xfs_agf_t, agf_length),
offsetof(xfs_agf_t, agf_roots[0]),
offsetof(xfs_agf_t, agf_levels[0]),
offsetof(xfs_agf_t, agf_flfirst),
offsetof(xfs_agf_t, agf_fllast),
offsetof(xfs_agf_t, agf_flcount),
offsetof(xfs_agf_t, agf_freeblks),
offsetof(xfs_agf_t, agf_longest),
offsetof(xfs_agf_t, agf_btreeblks),
sizeof(xfs_agf_t)
};
trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
}
/*
* Interface for inode allocation to force the pag data to be initialized.
*/
int /* error */
xfs_alloc_pagf_init(
xfs_mount_t *mp, /* file system mount structure */
xfs_trans_t *tp, /* transaction pointer */
xfs_agnumber_t agno, /* allocation group number */
int flags) /* XFS_ALLOC_FLAGS_... */
{
xfs_buf_t *bp;
int error;
if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
return error;
if (bp)
xfs_trans_brelse(tp, bp);
return 0;
}
/*
* Put the block on the freelist for the allocation group.
*/
int /* error */
xfs_alloc_put_freelist(
xfs_trans_t *tp, /* transaction pointer */
xfs_buf_t *agbp, /* buffer for a.g. freelist header */
xfs_buf_t *agflbp,/* buffer for a.g. free block array */
xfs_agblock_t bno, /* block being freed */
int btreeblk) /* block came from a AGF btree */
{
xfs_agf_t *agf; /* a.g. freespace structure */
xfs_agfl_t *agfl; /* a.g. free block array */
__be32 *blockp;/* pointer to array entry */
int error;
int logflags;
xfs_mount_t *mp; /* mount structure */
xfs_perag_t *pag; /* per allocation group data */
agf = XFS_BUF_TO_AGF(agbp);
mp = tp->t_mountp;
if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
be32_to_cpu(agf->agf_seqno), &agflbp)))
return error;
agfl = XFS_BUF_TO_AGFL(agflbp);
be32_add_cpu(&agf->agf_fllast, 1);
if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
agf->agf_fllast = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_add_cpu(&agf->agf_flcount, 1);
xfs_trans_agflist_delta(tp, 1);
pag->pagf_flcount++;
logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
if (btreeblk) {
be32_add_cpu(&agf->agf_btreeblks, -1);
pag->pagf_btreeblks--;
logflags |= XFS_AGF_BTREEBLKS;
}
xfs_perag_put(pag);
xfs_alloc_log_agf(tp, agbp, logflags);
ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
*blockp = cpu_to_be32(bno);
xfs_alloc_log_agf(tp, agbp, logflags);
xfs_trans_log_buf(tp, agflbp,
(int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
(int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
sizeof(xfs_agblock_t) - 1));
return 0;
}
/*
* Read in the allocation group header (free/alloc section).
*/
int /* error */
xfs_read_agf(
struct xfs_mount *mp, /* mount point structure */
struct xfs_trans *tp, /* transaction pointer */
xfs_agnumber_t agno, /* allocation group number */
int flags, /* XFS_BUF_ */
struct xfs_buf **bpp) /* buffer for the ag freelist header */
{
struct xfs_agf *agf; /* ag freelist header */
int agf_ok; /* set if agf is consistent */
int error;
ASSERT(agno != NULLAGNUMBER);
error = xfs_trans_read_buf(
mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), flags, bpp);
if (error)
return error;
if (!*bpp)
return 0;
ASSERT(!XFS_BUF_GETERROR(*bpp));
agf = XFS_BUF_TO_AGF(*bpp);
/*
* Validate the magic number of the agf block.
*/
agf_ok =
be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC &&
XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) &&
be32_to_cpu(agf->agf_seqno) == agno;
if (xfs_sb_version_haslazysbcount(&mp->m_sb))
agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
be32_to_cpu(agf->agf_length);
if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
XFS_RANDOM_ALLOC_READ_AGF))) {
XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
XFS_ERRLEVEL_LOW, mp, agf);
xfs_trans_brelse(tp, *bpp);
return XFS_ERROR(EFSCORRUPTED);
}
XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGF, XFS_AGF_REF);
return 0;
}
/*
* Read in the allocation group header (free/alloc section).
*/
int /* error */
xfs_alloc_read_agf(
struct xfs_mount *mp, /* mount point structure */
struct xfs_trans *tp, /* transaction pointer */
xfs_agnumber_t agno, /* allocation group number */
int flags, /* XFS_ALLOC_FLAG_... */
struct xfs_buf **bpp) /* buffer for the ag freelist header */
{
struct xfs_agf *agf; /* ag freelist header */
struct xfs_perag *pag; /* per allocation group data */
int error;
ASSERT(agno != NULLAGNUMBER);
error = xfs_read_agf(mp, tp, agno,
(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
bpp);
if (error)
return error;
if (!*bpp)
return 0;
ASSERT(!XFS_BUF_GETERROR(*bpp));
agf = XFS_BUF_TO_AGF(*bpp);
pag = xfs_perag_get(mp, agno);
if (!pag->pagf_init) {
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
pag->pagf_longest = be32_to_cpu(agf->agf_longest);
pag->pagf_levels[XFS_BTNUM_BNOi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
pag->pagf_levels[XFS_BTNUM_CNTi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
spin_lock_init(&pag->pagb_lock);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
pag->pagf_init = 1;
}
#ifdef DEBUG
else if (!XFS_FORCED_SHUTDOWN(mp)) {
ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
}
#endif
xfs_perag_put(pag);
return 0;
}
/*
* Allocate an extent (variable-size).
* Depending on the allocation type, we either look in a single allocation
* group or loop over the allocation groups to find the result.
*/
int /* error */
xfs_alloc_vextent(
xfs_alloc_arg_t *args) /* allocation argument structure */
{
xfs_agblock_t agsize; /* allocation group size */
int error;
int flags; /* XFS_ALLOC_FLAG_... locking flags */
xfs_extlen_t minleft;/* minimum left value, temp copy */
xfs_mount_t *mp; /* mount structure pointer */
xfs_agnumber_t sagno; /* starting allocation group number */
xfs_alloctype_t type; /* input allocation type */
int bump_rotor = 0;
int no_min = 0;
xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
mp = args->mp;
type = args->otype = args->type;
args->agbno = NULLAGBLOCK;
/*
* Just fix this up, for the case where the last a.g. is shorter
* (or there's only one a.g.) and the caller couldn't easily figure
* that out (xfs_bmap_alloc).
*/
agsize = mp->m_sb.sb_agblocks;
if (args->maxlen > agsize)
args->maxlen = agsize;
if (args->alignment == 0)
args->alignment = 1;
ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
ASSERT(args->minlen <= args->maxlen);
ASSERT(args->minlen <= agsize);
ASSERT(args->mod < args->prod);
if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
args->minlen > args->maxlen || args->minlen > agsize ||
args->mod >= args->prod) {
args->fsbno = NULLFSBLOCK;
trace_xfs_alloc_vextent_badargs(args);
return 0;
}
minleft = args->minleft;
switch (type) {
case XFS_ALLOCTYPE_THIS_AG:
case XFS_ALLOCTYPE_NEAR_BNO:
case XFS_ALLOCTYPE_THIS_BNO:
/*
* These three force us into a single a.g.
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
args->pag = xfs_perag_get(mp, args->agno);
args->minleft = 0;
error = xfs_alloc_fix_freelist(args, 0);
args->minleft = minleft;
if (error) {
trace_xfs_alloc_vextent_nofix(args);
goto error0;
}
if (!args->agbp) {
trace_xfs_alloc_vextent_noagbp(args);
break;
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
if ((error = xfs_alloc_ag_vextent(args)))
goto error0;
break;
case XFS_ALLOCTYPE_START_BNO:
/*
* Try near allocation first, then anywhere-in-ag after
* the first a.g. fails.
*/
if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
(mp->m_flags & XFS_MOUNT_32BITINODES)) {
args->fsbno = XFS_AGB_TO_FSB(mp,
((mp->m_agfrotor / rotorstep) %
mp->m_sb.sb_agcount), 0);
bump_rotor = 1;
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
args->type = XFS_ALLOCTYPE_NEAR_BNO;
/* FALLTHROUGH */
case XFS_ALLOCTYPE_ANY_AG:
case XFS_ALLOCTYPE_START_AG:
case XFS_ALLOCTYPE_FIRST_AG:
/*
* Rotate through the allocation groups looking for a winner.
*/
if (type == XFS_ALLOCTYPE_ANY_AG) {
/*
* Start with the last place we left off.
*/
args->agno = sagno = (mp->m_agfrotor / rotorstep) %
mp->m_sb.sb_agcount;
args->type = XFS_ALLOCTYPE_THIS_AG;
flags = XFS_ALLOC_FLAG_TRYLOCK;
} else if (type == XFS_ALLOCTYPE_FIRST_AG) {
/*
* Start with allocation group given by bno.
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
args->type = XFS_ALLOCTYPE_THIS_AG;
sagno = 0;
flags = 0;
} else {
if (type == XFS_ALLOCTYPE_START_AG)
args->type = XFS_ALLOCTYPE_THIS_AG;
/*
* Start with the given allocation group.
*/
args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
flags = XFS_ALLOC_FLAG_TRYLOCK;
}
/*
* Loop over allocation groups twice; first time with
* trylock set, second time without.
*/
for (;;) {
args->pag = xfs_perag_get(mp, args->agno);
if (no_min) args->minleft = 0;
error = xfs_alloc_fix_freelist(args, flags);
args->minleft = minleft;
if (error) {
trace_xfs_alloc_vextent_nofix(args);
goto error0;
}
/*
* If we get a buffer back then the allocation will fly.
*/
if (args->agbp) {
if ((error = xfs_alloc_ag_vextent(args)))
goto error0;
break;
}
trace_xfs_alloc_vextent_loopfailed(args);
/*
* Didn't work, figure out the next iteration.
*/
if (args->agno == sagno &&
type == XFS_ALLOCTYPE_START_BNO)
args->type = XFS_ALLOCTYPE_THIS_AG;
/*
* For the first allocation, we can try any AG to get
* space. However, if we already have allocated a
* block, we don't want to try AGs whose number is below
* sagno. Otherwise, we may end up with out-of-order
* locking of AGF, which might cause deadlock.
*/
if (++(args->agno) == mp->m_sb.sb_agcount) {
if (args->firstblock != NULLFSBLOCK)
args->agno = sagno;
else
args->agno = 0;
}
/*
* Reached the starting a.g., must either be done
* or switch to non-trylock mode.
*/
if (args->agno == sagno) {
if (no_min == 1) {
args->agbno = NULLAGBLOCK;
trace_xfs_alloc_vextent_allfailed(args);
break;
}
if (flags == 0) {
no_min = 1;
} else {
flags = 0;
if (type == XFS_ALLOCTYPE_START_BNO) {
args->agbno = XFS_FSB_TO_AGBNO(mp,
args->fsbno);
args->type = XFS_ALLOCTYPE_NEAR_BNO;
}
}
}
xfs_perag_put(args->pag);
}
if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
if (args->agno == sagno)
mp->m_agfrotor = (mp->m_agfrotor + 1) %
(mp->m_sb.sb_agcount * rotorstep);
else
mp->m_agfrotor = (args->agno * rotorstep + 1) %
(mp->m_sb.sb_agcount * rotorstep);
}
break;
default:
ASSERT(0);
/* NOTREACHED */
}
if (args->agbno == NULLAGBLOCK)
args->fsbno = NULLFSBLOCK;
else {
args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
#ifdef DEBUG
ASSERT(args->len >= args->minlen);
ASSERT(args->len <= args->maxlen);
ASSERT(args->agbno % args->alignment == 0);
XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
args->len);
#endif
}
xfs_perag_put(args->pag);
return 0;
error0:
xfs_perag_put(args->pag);
return error;
}
/*
* Free an extent.
* Just break up the extent address and hand off to xfs_free_ag_extent
* after fixing up the freelist.
*/
int /* error */
xfs_free_extent(
xfs_trans_t *tp, /* transaction pointer */
xfs_fsblock_t bno, /* starting block number of extent */
xfs_extlen_t len) /* length of extent */
{
xfs_alloc_arg_t args;
int error;
ASSERT(len != 0);
memset(&args, 0, sizeof(xfs_alloc_arg_t));
args.tp = tp;
args.mp = tp->t_mountp;
/*
* validate that the block number is legal - the enables us to detect
* and handle a silent filesystem corruption rather than crashing.
*/
args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
if (args.agno >= args.mp->m_sb.sb_agcount)
return EFSCORRUPTED;
args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
if (args.agbno >= args.mp->m_sb.sb_agblocks)
return EFSCORRUPTED;
args.pag = xfs_perag_get(args.mp, args.agno);
ASSERT(args.pag);
error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
if (error)
goto error0;
/* validate the extent size is legal now we have the agf locked */
if (args.agbno + len >
be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
error = EFSCORRUPTED;
goto error0;
}
error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
if (!error)
xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0);
error0:
xfs_perag_put(args.pag);
return error;
}
void
xfs_alloc_busy_insert(
struct xfs_trans *tp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len,
unsigned int flags)
{
struct xfs_busy_extent *new;
struct xfs_busy_extent *busyp;
struct xfs_perag *pag;
struct rb_node **rbp;
struct rb_node *parent = NULL;
new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
if (!new) {
/*
* No Memory! Since it is now not possible to track the free
* block, make this a synchronous transaction to insure that
* the block is not reused before this transaction commits.
*/
trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len);
xfs_trans_set_sync(tp);
return;
}
new->agno = agno;
new->bno = bno;
new->length = len;
INIT_LIST_HEAD(&new->list);
new->flags = flags;
/* trace before insert to be able to see failed inserts */
trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
pag = xfs_perag_get(tp->t_mountp, new->agno);
spin_lock(&pag->pagb_lock);
rbp = &pag->pagb_tree.rb_node;
while (*rbp) {
parent = *rbp;
busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
if (new->bno < busyp->bno) {
rbp = &(*rbp)->rb_left;
ASSERT(new->bno + new->length <= busyp->bno);
} else if (new->bno > busyp->bno) {
rbp = &(*rbp)->rb_right;
ASSERT(bno >= busyp->bno + busyp->length);
} else {
ASSERT(0);
}
}
rb_link_node(&new->rb_node, parent, rbp);
rb_insert_color(&new->rb_node, &pag->pagb_tree);
list_add(&new->list, &tp->t_busy);
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
}
/*
* Search for a busy extent within the range of the extent we are about to
* allocate. You need to be holding the busy extent tree lock when calling
* xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
* extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
* match. This is done so that a non-zero return indicates an overlap that
* will require a synchronous transaction, but it can still be
* used to distinguish between a partial or exact match.
*/
int
xfs_alloc_busy_search(
struct xfs_mount *mp,
xfs_agnumber_t agno,
xfs_agblock_t bno,
xfs_extlen_t len)
{
struct xfs_perag *pag;
struct rb_node *rbp;
struct xfs_busy_extent *busyp;
int match = 0;
pag = xfs_perag_get(mp, agno);
spin_lock(&pag->pagb_lock);
rbp = pag->pagb_tree.rb_node;
/* find closest start bno overlap */
while (rbp) {
busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
if (bno < busyp->bno) {
/* may overlap, but exact start block is lower */
if (bno + len > busyp->bno)
match = -1;
rbp = rbp->rb_left;
} else if (bno > busyp->bno) {
/* may overlap, but exact start block is higher */
if (bno < busyp->bno + busyp->length)
match = -1;
rbp = rbp->rb_right;
} else {
/* bno matches busyp, length determines exact match */
match = (busyp->length == len) ? 1 : -1;
break;
}
}
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
return match;
}
/*
* The found free extent [fbno, fend] overlaps part or all of the given busy
* extent. If the overlap covers the beginning, the end, or all of the busy
* extent, the overlapping portion can be made unbusy and used for the
* allocation. We can't split a busy extent because we can't modify a
* transaction/CIL context busy list, but we can update an entries block
* number or length.
*
* Returns true if the extent can safely be reused, or false if the search
* needs to be restarted.
*/
STATIC bool
xfs_alloc_busy_update_extent(
struct xfs_mount *mp,
struct xfs_perag *pag,
struct xfs_busy_extent *busyp,
xfs_agblock_t fbno,
xfs_extlen_t flen,
bool userdata)
{
xfs_agblock_t fend = fbno + flen;
xfs_agblock_t bbno = busyp->bno;
xfs_agblock_t bend = bbno + busyp->length;
/*
* This extent is currently being discarded. Give the thread
* performing the discard a chance to mark the extent unbusy
* and retry.
*/
if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
spin_unlock(&pag->pagb_lock);
delay(1);
spin_lock(&pag->pagb_lock);
return false;
}
/*
* If there is a busy extent overlapping a user allocation, we have
* no choice but to force the log and retry the search.
*
* Fortunately this does not happen during normal operation, but
* only if the filesystem is very low on space and has to dip into
* the AGFL for normal allocations.
*/
if (userdata)
goto out_force_log;
if (bbno < fbno && bend > fend) {
/*
* Case 1:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +---------+
* fbno fend
*/
/*
* We would have to split the busy extent to be able to track
* it correct, which we cannot do because we would have to
* modify the list of busy extents attached to the transaction
* or CIL context, which is immutable.
*
* Force out the log to clear the busy extent and retry the
* search.
*/
goto out_force_log;
} else if (bbno >= fbno && bend <= fend) {
/*
* Case 2:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +-----------------+
* fbno fend
*
* Case 3:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +--------------------------+
* fbno fend
*
* Case 4:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +--------------------------+
* fbno fend
*
* Case 5:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +-----------------------------------+
* fbno fend
*
*/
/*
* The busy extent is fully covered by the extent we are
* allocating, and can simply be removed from the rbtree.
* However we cannot remove it from the immutable list
* tracking busy extents in the transaction or CIL context,
* so set the length to zero to mark it invalid.
*
* We also need to restart the busy extent search from the
* tree root, because erasing the node can rearrange the
* tree topology.
*/
rb_erase(&busyp->rb_node, &pag->pagb_tree);
busyp->length = 0;
return false;
} else if (fend < bend) {
/*
* Case 6:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +---------+
* fbno fend
*
* Case 7:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +------------------+
* fbno fend
*
*/
busyp->bno = fend;
} else if (bbno < fbno) {
/*
* Case 8:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +-------------+
* fbno fend
*
* Case 9:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +----------------------+
* fbno fend
*/
busyp->length = fbno - busyp->bno;
} else {
ASSERT(0);
}
trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen);
return true;
out_force_log:
spin_unlock(&pag->pagb_lock);
xfs_log_force(mp, XFS_LOG_SYNC);
trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen);
spin_lock(&pag->pagb_lock);
return false;
}
/*
* For a given extent [fbno, flen], make sure we can reuse it safely.
*/
void
xfs_alloc_busy_reuse(
struct xfs_mount *mp,
xfs_agnumber_t agno,
xfs_agblock_t fbno,
xfs_extlen_t flen,
bool userdata)
{
struct xfs_perag *pag;
struct rb_node *rbp;
ASSERT(flen > 0);
pag = xfs_perag_get(mp, agno);
spin_lock(&pag->pagb_lock);
restart:
rbp = pag->pagb_tree.rb_node;
while (rbp) {
struct xfs_busy_extent *busyp =
rb_entry(rbp, struct xfs_busy_extent, rb_node);
xfs_agblock_t bbno = busyp->bno;
xfs_agblock_t bend = bbno + busyp->length;
if (fbno + flen <= bbno) {
rbp = rbp->rb_left;
continue;
} else if (fbno >= bend) {
rbp = rbp->rb_right;
continue;
}
if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen,
userdata))
goto restart;
}
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
}
/*
* For a given extent [fbno, flen], search the busy extent list to find a
* subset of the extent that is not busy. If *rlen is smaller than
* args->minlen no suitable extent could be found, and the higher level
* code needs to force out the log and retry the allocation.
*/
STATIC void
xfs_alloc_busy_trim(
struct xfs_alloc_arg *args,
xfs_agblock_t bno,
xfs_extlen_t len,
xfs_agblock_t *rbno,
xfs_extlen_t *rlen)
{
xfs_agblock_t fbno;
xfs_extlen_t flen;
struct rb_node *rbp;
ASSERT(len > 0);
spin_lock(&args->pag->pagb_lock);
restart:
fbno = bno;
flen = len;
rbp = args->pag->pagb_tree.rb_node;
while (rbp && flen >= args->minlen) {
struct xfs_busy_extent *busyp =
rb_entry(rbp, struct xfs_busy_extent, rb_node);
xfs_agblock_t fend = fbno + flen;
xfs_agblock_t bbno = busyp->bno;
xfs_agblock_t bend = bbno + busyp->length;
if (fend <= bbno) {
rbp = rbp->rb_left;
continue;
} else if (fbno >= bend) {
rbp = rbp->rb_right;
continue;
}
/*
* If this is a metadata allocation, try to reuse the busy
* extent instead of trimming the allocation.
*/
if (!args->userdata &&
!(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
busyp, fbno, flen,
false))
goto restart;
continue;
}
if (bbno <= fbno) {
/* start overlap */
/*
* Case 1:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +---------+
* fbno fend
*
* Case 2:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +-------------+
* fbno fend
*
* Case 3:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +-------------+
* fbno fend
*
* Case 4:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +-----------------+
* fbno fend
*
* No unbusy region in extent, return failure.
*/
if (fend <= bend)
goto fail;
/*
* Case 5:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +----------------------+
* fbno fend
*
* Case 6:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +--------------------------+
* fbno fend
*
* Needs to be trimmed to:
* +-------+
* fbno fend
*/
fbno = bend;
} else if (bend >= fend) {
/* end overlap */
/*
* Case 7:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +------------------+
* fbno fend
*
* Case 8:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +--------------------------+
* fbno fend
*
* Needs to be trimmed to:
* +-------+
* fbno fend
*/
fend = bbno;
} else {
/* middle overlap */
/*
* Case 9:
* bbno bend
* +BBBBBBBBBBBBBBBBB+
* +-----------------------------------+
* fbno fend
*
* Can be trimmed to:
* +-------+ OR +-------+
* fbno fend fbno fend
*
* Backward allocation leads to significant
* fragmentation of directories, which degrades
* directory performance, therefore we always want to
* choose the option that produces forward allocation
* patterns.
* Preferring the lower bno extent will make the next
* request use "fend" as the start of the next
* allocation; if the segment is no longer busy at
* that point, we'll get a contiguous allocation, but
* even if it is still busy, we will get a forward
* allocation.
* We try to avoid choosing the segment at "bend",
* because that can lead to the next allocation
* taking the segment at "fbno", which would be a
* backward allocation. We only use the segment at
* "fbno" if it is much larger than the current
* requested size, because in that case there's a
* good chance subsequent allocations will be
* contiguous.
*/
if (bbno - fbno >= args->maxlen) {
/* left candidate fits perfect */
fend = bbno;
} else if (fend - bend >= args->maxlen * 4) {
/* right candidate has enough free space */
fbno = bend;
} else if (bbno - fbno >= args->minlen) {
/* left candidate fits minimum requirement */
fend = bbno;
} else {
goto fail;
}
}
flen = fend - fbno;
}
spin_unlock(&args->pag->pagb_lock);
if (fbno != bno || flen != len) {
trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len,
fbno, flen);
}
*rbno = fbno;
*rlen = flen;
return;
fail:
/*
* Return a zero extent length as failure indications. All callers
* re-check if the trimmed extent satisfies the minlen requirement.
*/
spin_unlock(&args->pag->pagb_lock);
trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
*rbno = fbno;
*rlen = 0;
}
static void
xfs_alloc_busy_clear_one(
struct xfs_mount *mp,
struct xfs_perag *pag,
struct xfs_busy_extent *busyp)
{
if (busyp->length) {
trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
busyp->length);
rb_erase(&busyp->rb_node, &pag->pagb_tree);
}
list_del_init(&busyp->list);
kmem_free(busyp);
}
/*
* Remove all extents on the passed in list from the busy extents tree.
* If do_discard is set skip extents that need to be discarded, and mark
* these as undergoing a discard operation instead.
*/
void
xfs_alloc_busy_clear(
struct xfs_mount *mp,
struct list_head *list,
bool do_discard)
{
struct xfs_busy_extent *busyp, *n;
struct xfs_perag *pag = NULL;
xfs_agnumber_t agno = NULLAGNUMBER;
list_for_each_entry_safe(busyp, n, list, list) {
if (busyp->agno != agno) {
if (pag) {
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
}
pag = xfs_perag_get(mp, busyp->agno);
spin_lock(&pag->pagb_lock);
agno = busyp->agno;
}
if (do_discard && busyp->length &&
!(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
else
xfs_alloc_busy_clear_one(mp, pag, busyp);
}
if (pag) {
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
}
}
/*
* Callback for list_sort to sort busy extents by the AG they reside in.
*/
int
xfs_busy_extent_ag_cmp(
void *priv,
struct list_head *a,
struct list_head *b)
{
return container_of(a, struct xfs_busy_extent, list)->agno -
container_of(b, struct xfs_busy_extent, list)->agno;
}
| gpl-2.0 |
oppo-source/R7plus-5.1-kernel-source | drivers/misc/sgi-xp/xpc_uv.c | 2628 | 47909 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition Communication (XPC) uv-based functions.
*
* Architecture specific implementation of common functions.
*
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <asm/uv/uv_hub.h>
#if defined CONFIG_X86_64
#include <asm/uv/bios.h>
#include <asm/uv/uv_irq.h>
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
#endif
#include "../sgi-gru/gru.h"
#include "../sgi-gru/grukservices.h"
#include "xpc.h"
#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
struct uv_IO_APIC_route_entry {
__u64 vector : 8,
delivery_mode : 3,
dest_mode : 1,
delivery_status : 1,
polarity : 1,
__reserved_1 : 1,
trigger : 1,
mask : 1,
__reserved_2 : 15,
dest : 32;
};
#endif
static struct xpc_heartbeat_uv *xpc_heartbeat_uv;
#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_ACTIVATE_MSG_SIZE_UV)
#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
XPC_NOTIFY_MSG_SIZE_UV)
#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
static int xpc_mq_node = -1;
static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
static int
xpc_setup_partitions_uv(void)
{
short partid;
struct xpc_partition_uv *part_uv;
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
part_uv = &xpc_partitions[partid].sn.uv;
mutex_init(&part_uv->cached_activate_gru_mq_desc_mutex);
spin_lock_init(&part_uv->flags_lock);
part_uv->remote_act_state = XPC_P_AS_INACTIVE;
}
return 0;
}
static void
xpc_teardown_partitions_uv(void)
{
short partid;
struct xpc_partition_uv *part_uv;
unsigned long irq_flags;
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
part_uv = &xpc_partitions[partid].sn.uv;
if (part_uv->cached_activate_gru_mq_desc != NULL) {
mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
kfree(part_uv->cached_activate_gru_mq_desc);
part_uv->cached_activate_gru_mq_desc = NULL;
mutex_unlock(&part_uv->
cached_activate_gru_mq_desc_mutex);
}
}
}
static int
xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
{
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
UV_AFFINITY_CPU);
if (mq->irq < 0)
return mq->irq;
mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
mq->irq = SGI_XPC_ACTIVATE;
else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
mq->irq = SGI_XPC_NOTIFY;
else
return -EINVAL;
mq->mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mq->mmr_value);
#else
#error not a supported configuration
#endif
return 0;
}
static void
xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
{
#if defined CONFIG_X86_64
uv_teardown_irq(mq->irq);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
int mmr_pnode;
unsigned long mmr_value;
mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
mmr_value = 1UL << 16;
uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
#else
#error not a supported configuration
#endif
}
static int
xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
#if defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
ret = sn_mq_watchlist_alloc(mmr_pnode, (void *)uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
ret);
return -EBUSY;
}
#elif defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_alloc(uv_gpa(mq->address),
mq->order, &mq->mmr_offset);
if (ret < 0) {
dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
"ret=%d\n", ret);
return ret;
}
#else
#error not a supported configuration
#endif
mq->watchlist_num = ret;
return 0;
}
static void
xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
{
int ret;
int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
#if defined CONFIG_X86_64
ret = uv_bios_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != BIOS_STATUS_SUCCESS);
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
ret = sn_mq_watchlist_free(mmr_pnode, mq->watchlist_num);
BUG_ON(ret != SALRET_OK);
#else
#error not a supported configuration
#endif
}
static struct xpc_gru_mq_uv *
xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
irq_handler_t irq_handler)
{
enum xp_retval xp_ret;
int ret;
int nid;
int nasid;
int pg_order;
struct page *page;
struct xpc_gru_mq_uv *mq;
struct uv_IO_APIC_route_entry *mmr_value;
mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
if (mq == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
"a xpc_gru_mq_uv structure\n");
ret = -ENOMEM;
goto out_0;
}
mq->gru_mq_desc = kzalloc(sizeof(struct gru_message_queue_desc),
GFP_KERNEL);
if (mq->gru_mq_desc == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
"a gru_message_queue_desc structure\n");
ret = -ENOMEM;
goto out_1;
}
pg_order = get_order(mq_size);
mq->order = pg_order + PAGE_SHIFT;
mq_size = 1UL << mq->order;
mq->mmr_blade = uv_cpu_to_blade_id(cpu);
nid = cpu_to_node(cpu);
page = alloc_pages_exact_node(nid,
GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
"bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
ret = -ENOMEM;
goto out_2;
}
mq->address = page_address(page);
/* enable generation of irq when GRU mq operation occurs to this mq */
ret = xpc_gru_mq_watchlist_alloc_uv(mq);
if (ret != 0)
goto out_3;
ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
if (ret != 0)
goto out_4;
ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
if (ret != 0) {
dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
mq->irq, -ret);
goto out_5;
}
nasid = UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpu));
mmr_value = (struct uv_IO_APIC_route_entry *)&mq->mmr_value;
ret = gru_create_message_queue(mq->gru_mq_desc, mq->address, mq_size,
nasid, mmr_value->vector, mmr_value->dest);
if (ret != 0) {
dev_err(xpc_part, "gru_create_message_queue() returned "
"error=%d\n", ret);
ret = -EINVAL;
goto out_6;
}
/* allow other partitions to access this GRU mq */
xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
if (xp_ret != xpSuccess) {
ret = -EACCES;
goto out_6;
}
return mq;
/* something went wrong */
out_6:
free_irq(mq->irq, NULL);
out_5:
xpc_release_gru_mq_irq_uv(mq);
out_4:
xpc_gru_mq_watchlist_free_uv(mq);
out_3:
free_pages((unsigned long)mq->address, pg_order);
out_2:
kfree(mq->gru_mq_desc);
out_1:
kfree(mq);
out_0:
return ERR_PTR(ret);
}
static void
xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
{
unsigned int mq_size;
int pg_order;
int ret;
/* disallow other partitions to access GRU mq */
mq_size = 1UL << mq->order;
ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
BUG_ON(ret != xpSuccess);
/* unregister irq handler and release mq irq/vector mapping */
free_irq(mq->irq, NULL);
xpc_release_gru_mq_irq_uv(mq);
/* disable generation of irq when GRU mq op occurs to this mq */
xpc_gru_mq_watchlist_free_uv(mq);
pg_order = mq->order - PAGE_SHIFT;
free_pages((unsigned long)mq->address, pg_order);
kfree(mq);
}
static enum xp_retval
xpc_send_gru_msg(struct gru_message_queue_desc *gru_mq_desc, void *msg,
size_t msg_size)
{
enum xp_retval xp_ret;
int ret;
while (1) {
ret = gru_send_message_gpa(gru_mq_desc, msg, msg_size);
if (ret == MQE_OK) {
xp_ret = xpSuccess;
break;
}
if (ret == MQE_QUEUE_FULL) {
dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
"error=MQE_QUEUE_FULL\n");
/* !!! handle QLimit reached; delay & try again */
/* ??? Do we add a limit to the number of retries? */
(void)msleep_interruptible(10);
} else if (ret == MQE_CONGESTION) {
dev_dbg(xpc_chan, "gru_send_message_gpa() returned "
"error=MQE_CONGESTION\n");
/* !!! handle LB Overflow; simply try again */
/* ??? Do we add a limit to the number of retries? */
} else {
/* !!! Currently this is MQE_UNEXPECTED_CB_ERR */
dev_err(xpc_chan, "gru_send_message_gpa() returned "
"error=%d\n", ret);
xp_ret = xpGruSendMqError;
break;
}
}
return xp_ret;
}
static void
xpc_process_activate_IRQ_rcvd_uv(void)
{
unsigned long irq_flags;
short partid;
struct xpc_partition *part;
u8 act_state_req;
DBUG_ON(xpc_activate_IRQ_rcvd == 0);
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
part = &xpc_partitions[partid];
if (part->sn.uv.act_state_req == 0)
continue;
xpc_activate_IRQ_rcvd--;
BUG_ON(xpc_activate_IRQ_rcvd < 0);
act_state_req = part->sn.uv.act_state_req;
part->sn.uv.act_state_req = 0;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
if (part->act_state == XPC_P_AS_INACTIVE)
xpc_activate_partition(part);
else if (part->act_state == XPC_P_AS_DEACTIVATING)
XPC_DEACTIVATE_PARTITION(part, xpReactivating);
} else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
if (part->act_state == XPC_P_AS_INACTIVE)
xpc_activate_partition(part);
else
XPC_DEACTIVATE_PARTITION(part, xpReactivating);
} else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);
} else {
BUG();
}
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (xpc_activate_IRQ_rcvd == 0)
break;
}
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
}
static void
xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
struct xpc_activate_mq_msghdr_uv *msg_hdr,
int part_setup,
int *wakeup_hb_checker)
{
unsigned long irq_flags;
struct xpc_partition_uv *part_uv = &part->sn.uv;
struct xpc_openclose_args *args;
part_uv->remote_act_state = msg_hdr->act_state;
switch (msg_hdr->type) {
case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV:
/* syncing of remote_act_state was just done above */
break;
case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: {
struct xpc_activate_mq_msg_activate_req_uv *msg;
/*
* ??? Do we deal here with ts_jiffies being different
* ??? if act_state != XPC_P_AS_INACTIVE instead of
* ??? below?
*/
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_activate_req_uv, hdr);
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV;
part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */
part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies;
part_uv->heartbeat_gpa = msg->heartbeat_gpa;
if (msg->activate_gru_mq_desc_gpa !=
part_uv->activate_gru_mq_desc_gpa) {
spin_lock(&part_uv->flags_lock);
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
spin_unlock(&part_uv->flags_lock);
part_uv->activate_gru_mq_desc_gpa =
msg->activate_gru_mq_desc_gpa;
}
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
(*wakeup_hb_checker)++;
break;
}
case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: {
struct xpc_activate_mq_msg_deactivate_req_uv *msg;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_deactivate_req_uv, hdr);
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
part_uv->reason = msg->reason;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
(*wakeup_hb_checker)++;
return;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_closerequest_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closerequest_uv,
hdr);
args = &part->remote_openclose_args[msg->ch_number];
args->reason = msg->reason;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
break;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: {
struct xpc_activate_mq_msg_chctl_closereply_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_closereply_uv,
hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
break;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: {
struct xpc_activate_mq_msg_chctl_openrequest_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openrequest_uv,
hdr);
args = &part->remote_openclose_args[msg->ch_number];
args->entry_size = msg->entry_size;
args->local_nentries = msg->local_nentries;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
break;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: {
struct xpc_activate_mq_msg_chctl_openreply_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_openreply_uv, hdr);
args = &part->remote_openclose_args[msg->ch_number];
args->remote_nentries = msg->remote_nentries;
args->local_nentries = msg->local_nentries;
args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
break;
}
case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: {
struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg;
if (!part_setup)
break;
msg = container_of(msg_hdr, struct
xpc_activate_mq_msg_chctl_opencomplete_uv, hdr);
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
}
case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_ENGAGED_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
break;
case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV:
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags &= ~XPC_P_ENGAGED_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
break;
default:
dev_err(xpc_part, "received unknown activate_mq msg type=%d "
"from partition=%d\n", msg_hdr->type, XPC_PARTID(part));
/* get hb checker to deactivate from the remote partition */
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
part_uv->reason = xpBadMsgType;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
(*wakeup_hb_checker)++;
return;
}
if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies &&
part->remote_rp_ts_jiffies != 0) {
/*
* ??? Does what we do here need to be sensitive to
* ??? act_state or remote_act_state?
*/
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
(*wakeup_hb_checker)++;
}
}
static irqreturn_t
xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
{
struct xpc_activate_mq_msghdr_uv *msg_hdr;
short partid;
struct xpc_partition *part;
int wakeup_hb_checker = 0;
int part_referenced;
while (1) {
msg_hdr = gru_get_next_message(xpc_activate_mq_uv->gru_mq_desc);
if (msg_hdr == NULL)
break;
partid = msg_hdr->partid;
if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() "
"received invalid partid=0x%x in message\n",
partid);
} else {
part = &xpc_partitions[partid];
part_referenced = xpc_part_ref(part);
xpc_handle_activate_mq_msg_uv(part, msg_hdr,
part_referenced,
&wakeup_hb_checker);
if (part_referenced)
xpc_part_deref(part);
}
gru_free_message(xpc_activate_mq_uv->gru_mq_desc, msg_hdr);
}
if (wakeup_hb_checker)
wake_up_interruptible(&xpc_activate_IRQ_wq);
return IRQ_HANDLED;
}
static enum xp_retval
xpc_cache_remote_gru_mq_desc_uv(struct gru_message_queue_desc *gru_mq_desc,
unsigned long gru_mq_desc_gpa)
{
enum xp_retval ret;
ret = xp_remote_memcpy(uv_gpa(gru_mq_desc), gru_mq_desc_gpa,
sizeof(struct gru_message_queue_desc));
if (ret == xpSuccess)
gru_mq_desc->mq = NULL;
return ret;
}
static enum xp_retval
xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
int msg_type)
{
struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
struct xpc_partition_uv *part_uv = &part->sn.uv;
struct gru_message_queue_desc *gru_mq_desc;
unsigned long irq_flags;
enum xp_retval ret;
DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);
msg_hdr->type = msg_type;
msg_hdr->partid = xp_partition_id;
msg_hdr->act_state = part->act_state;
msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;
mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
again:
if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
if (gru_mq_desc == NULL) {
gru_mq_desc = kmalloc(sizeof(struct
gru_message_queue_desc),
GFP_KERNEL);
if (gru_mq_desc == NULL) {
ret = xpNoMemory;
goto done;
}
part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
}
ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
part_uv->
activate_gru_mq_desc_gpa);
if (ret != xpSuccess)
goto done;
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
}
/* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
msg_size);
if (ret != xpSuccess) {
smp_rmb(); /* ensure a fresh copy of part_uv->flags */
if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
goto again;
}
done:
mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
return ret;
}
static void
xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg,
size_t msg_size, int msg_type)
{
enum xp_retval ret;
ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
if (unlikely(ret != xpSuccess))
XPC_DEACTIVATE_PARTITION(part, ret);
}
static void
xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags,
void *msg, size_t msg_size, int msg_type)
{
struct xpc_partition *part = &xpc_partitions[ch->partid];
enum xp_retval ret;
ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type);
if (unlikely(ret != xpSuccess)) {
if (irq_flags != NULL)
spin_unlock_irqrestore(&ch->lock, *irq_flags);
XPC_DEACTIVATE_PARTITION(part, ret);
if (irq_flags != NULL)
spin_lock_irqsave(&ch->lock, *irq_flags);
}
}
static void
xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req)
{
unsigned long irq_flags;
struct xpc_partition_uv *part_uv = &part->sn.uv;
/*
* !!! Make our side think that the remote partition sent an activate
* !!! mq message our way by doing what the activate IRQ handler would
* !!! do had one really been sent.
*/
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = act_state_req;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
wake_up_interruptible(&xpc_activate_IRQ_wq);
}
static enum xp_retval
xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
size_t *len)
{
s64 status;
enum xp_retval ret;
#if defined CONFIG_X86_64
status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
(u64 *)len);
if (status == BIOS_STATUS_SUCCESS)
ret = xpSuccess;
else if (status == BIOS_STATUS_MORE_PASSES)
ret = xpNeedMoreInfo;
else
ret = xpBiosError;
#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
if (status == SALRET_OK)
ret = xpSuccess;
else if (status == SALRET_MORE_PASSES)
ret = xpNeedMoreInfo;
else
ret = xpSalError;
#else
#error not a supported configuration
#endif
return ret;
}
static int
xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp)
{
xpc_heartbeat_uv =
&xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat;
rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv);
rp->sn.uv.activate_gru_mq_desc_gpa =
uv_gpa(xpc_activate_mq_uv->gru_mq_desc);
return 0;
}
static void
xpc_allow_hb_uv(short partid)
{
}
static void
xpc_disallow_hb_uv(short partid)
{
}
static void
xpc_disallow_all_hbs_uv(void)
{
}
static void
xpc_increment_heartbeat_uv(void)
{
xpc_heartbeat_uv->value++;
}
static void
xpc_offline_heartbeat_uv(void)
{
xpc_increment_heartbeat_uv();
xpc_heartbeat_uv->offline = 1;
}
static void
xpc_online_heartbeat_uv(void)
{
xpc_increment_heartbeat_uv();
xpc_heartbeat_uv->offline = 0;
}
static void
xpc_heartbeat_init_uv(void)
{
xpc_heartbeat_uv->value = 1;
xpc_heartbeat_uv->offline = 0;
}
static void
xpc_heartbeat_exit_uv(void)
{
xpc_offline_heartbeat_uv();
}
static enum xp_retval
xpc_get_remote_heartbeat_uv(struct xpc_partition *part)
{
struct xpc_partition_uv *part_uv = &part->sn.uv;
enum xp_retval ret;
ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat),
part_uv->heartbeat_gpa,
sizeof(struct xpc_heartbeat_uv));
if (ret != xpSuccess)
return ret;
if (part_uv->cached_heartbeat.value == part->last_heartbeat &&
!part_uv->cached_heartbeat.offline) {
ret = xpNoHeartbeat;
} else {
part->last_heartbeat = part_uv->cached_heartbeat.value;
}
return ret;
}
static void
xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp,
unsigned long remote_rp_gpa, int nasid)
{
short partid = remote_rp->SAL_partid;
struct xpc_partition *part = &xpc_partitions[partid];
struct xpc_activate_mq_msg_activate_req_uv msg;
part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */
part->remote_rp_ts_jiffies = remote_rp->ts_jiffies;
part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa;
part->sn.uv.activate_gru_mq_desc_gpa =
remote_rp->sn.uv.activate_gru_mq_desc_gpa;
/*
* ??? Is it a good idea to make this conditional on what is
* ??? potentially stale state information?
*/
if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) {
msg.rp_gpa = uv_gpa(xpc_rsvd_page);
msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa;
msg.activate_gru_mq_desc_gpa =
xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV);
}
if (part->act_state == XPC_P_AS_INACTIVE)
xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
}
static void
xpc_request_partition_reactivation_uv(struct xpc_partition *part)
{
xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV);
}
static void
xpc_request_partition_deactivation_uv(struct xpc_partition *part)
{
struct xpc_activate_mq_msg_deactivate_req_uv msg;
/*
* ??? Is it a good idea to make this conditional on what is
* ??? potentially stale state information?
*/
if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING &&
part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) {
msg.reason = part->reason;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV);
}
}
static void
xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part)
{
/* nothing needs to be done */
return;
}
static void
xpc_init_fifo_uv(struct xpc_fifo_head_uv *head)
{
head->first = NULL;
head->last = NULL;
spin_lock_init(&head->lock);
head->n_entries = 0;
}
static void *
xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head)
{
unsigned long irq_flags;
struct xpc_fifo_entry_uv *first;
spin_lock_irqsave(&head->lock, irq_flags);
first = head->first;
if (head->first != NULL) {
head->first = first->next;
if (head->first == NULL)
head->last = NULL;
head->n_entries--;
BUG_ON(head->n_entries < 0);
first->next = NULL;
}
spin_unlock_irqrestore(&head->lock, irq_flags);
return first;
}
static void
xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head,
struct xpc_fifo_entry_uv *last)
{
unsigned long irq_flags;
last->next = NULL;
spin_lock_irqsave(&head->lock, irq_flags);
if (head->last != NULL)
head->last->next = last;
else
head->first = last;
head->last = last;
head->n_entries++;
spin_unlock_irqrestore(&head->lock, irq_flags);
}
static int
xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head)
{
return head->n_entries;
}
/*
* Setup the channel structures that are uv specific.
*/
static enum xp_retval
xpc_setup_ch_structures_uv(struct xpc_partition *part)
{
struct xpc_channel_uv *ch_uv;
int ch_number;
for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
ch_uv = &part->channels[ch_number].sn.uv;
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
xpc_init_fifo_uv(&ch_uv->recv_msg_list);
}
return xpSuccess;
}
/*
* Teardown the channel structures that are uv specific.
*/
static void
xpc_teardown_ch_structures_uv(struct xpc_partition *part)
{
/* nothing needs to be done */
return;
}
static enum xp_retval
xpc_make_first_contact_uv(struct xpc_partition *part)
{
struct xpc_activate_mq_msg_uv msg;
/*
* We send a sync msg to get the remote partition's remote_act_state
* updated to our current act_state which at this point should
* be XPC_P_AS_ACTIVATING.
*/
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV);
while (!((part->sn.uv.remote_act_state == XPC_P_AS_ACTIVATING) ||
(part->sn.uv.remote_act_state == XPC_P_AS_ACTIVE))) {
dev_dbg(xpc_part, "waiting to make first contact with "
"partition %d\n", XPC_PARTID(part));
/* wait a 1/4 of a second or so */
(void)msleep_interruptible(250);
if (part->act_state == XPC_P_AS_DEACTIVATING)
return part->reason;
}
return xpSuccess;
}
static u64
xpc_get_chctl_all_flags_uv(struct xpc_partition *part)
{
unsigned long irq_flags;
union xpc_channel_ctl_flags chctl;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
chctl = part->chctl;
if (chctl.all_flags != 0)
part->chctl.all_flags = 0;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
return chctl.all_flags;
}
static enum xp_retval
xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
struct xpc_send_msg_slot_uv *msg_slot;
unsigned long irq_flags;
int nentries;
int entry;
size_t nbytes;
for (nentries = ch->local_nentries; nentries > 0; nentries--) {
nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv);
ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL);
if (ch_uv->send_msg_slots == NULL)
continue;
for (entry = 0; entry < nentries; entry++) {
msg_slot = &ch_uv->send_msg_slots[entry];
msg_slot->msg_slot_number = entry;
xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list,
&msg_slot->next);
}
spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->local_nentries)
ch->local_nentries = nentries;
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpSuccess;
}
return xpNoMemory;
}
static enum xp_retval
xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
struct xpc_notify_mq_msg_uv *msg_slot;
unsigned long irq_flags;
int nentries;
int entry;
size_t nbytes;
for (nentries = ch->remote_nentries; nentries > 0; nentries--) {
nbytes = nentries * ch->entry_size;
ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL);
if (ch_uv->recv_msg_slots == NULL)
continue;
for (entry = 0; entry < nentries; entry++) {
msg_slot = ch_uv->recv_msg_slots +
entry * ch->entry_size;
msg_slot->hdr.msg_slot_number = entry;
}
spin_lock_irqsave(&ch->lock, irq_flags);
if (nentries < ch->remote_nentries)
ch->remote_nentries = nentries;
spin_unlock_irqrestore(&ch->lock, irq_flags);
return xpSuccess;
}
return xpNoMemory;
}
/*
* Allocate msg_slots associated with the channel.
*/
static enum xp_retval
xpc_setup_msg_structures_uv(struct xpc_channel *ch)
{
static enum xp_retval ret;
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
DBUG_ON(ch->flags & XPC_C_SETUP);
ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
gru_message_queue_desc),
GFP_KERNEL);
if (ch_uv->cached_notify_gru_mq_desc == NULL)
return xpNoMemory;
ret = xpc_allocate_send_msg_slot_uv(ch);
if (ret == xpSuccess) {
ret = xpc_allocate_recv_msg_slot_uv(ch);
if (ret != xpSuccess) {
kfree(ch_uv->send_msg_slots);
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
}
}
return ret;
}
/*
* Free up msg_slots and clear other stuff that were setup for the specified
* channel.
*/
static void
xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
DBUG_ON(!spin_is_locked(&ch->lock));
kfree(ch_uv->cached_notify_gru_mq_desc);
ch_uv->cached_notify_gru_mq_desc = NULL;
if (ch->flags & XPC_C_SETUP) {
xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
kfree(ch_uv->send_msg_slots);
xpc_init_fifo_uv(&ch_uv->recv_msg_list);
kfree(ch_uv->recv_msg_slots);
}
}
static void
xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_closerequest_uv msg;
msg.ch_number = ch->number;
msg.reason = ch->reason;
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV);
}
static void
xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_closereply_uv msg;
msg.ch_number = ch->number;
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV);
}
static void
xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_openrequest_uv msg;
msg.ch_number = ch->number;
msg.entry_size = ch->entry_size;
msg.local_nentries = ch->local_nentries;
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV);
}
static void
xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_openreply_uv msg;
msg.ch_number = ch->number;
msg.local_nentries = ch->local_nentries;
msg.remote_nentries = ch->remote_nentries;
msg.notify_gru_mq_desc_gpa = uv_gpa(xpc_notify_mq_uv->gru_mq_desc);
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV);
}
static void
xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags)
{
struct xpc_activate_mq_msg_chctl_opencomplete_uv msg;
msg.ch_number = ch->number;
xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV);
}
static void
xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number)
{
unsigned long irq_flags;
spin_lock_irqsave(&part->chctl_lock, irq_flags);
part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST;
spin_unlock_irqrestore(&part->chctl_lock, irq_flags);
xpc_wakeup_channel_mgr(part);
}
static enum xp_retval
xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
unsigned long gru_mq_desc_gpa)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
gru_mq_desc_gpa);
}
static void
xpc_indicate_partition_engaged_uv(struct xpc_partition *part)
{
struct xpc_activate_mq_msg_uv msg;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV);
}
static void
xpc_indicate_partition_disengaged_uv(struct xpc_partition *part)
{
struct xpc_activate_mq_msg_uv msg;
xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg),
XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV);
}
static void
xpc_assume_partition_disengaged_uv(short partid)
{
struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv;
unsigned long irq_flags;
spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
part_uv->flags &= ~XPC_P_ENGAGED_UV;
spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
}
static int
xpc_partition_engaged_uv(short partid)
{
return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0;
}
static int
xpc_any_partition_engaged_uv(void)
{
struct xpc_partition_uv *part_uv;
short partid;
for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
part_uv = &xpc_partitions[partid].sn.uv;
if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0)
return 1;
}
return 0;
}
static enum xp_retval
xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags,
struct xpc_send_msg_slot_uv **address_of_msg_slot)
{
enum xp_retval ret;
struct xpc_send_msg_slot_uv *msg_slot;
struct xpc_fifo_entry_uv *entry;
while (1) {
entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list);
if (entry != NULL)
break;
if (flags & XPC_NOWAIT)
return xpNoWait;
ret = xpc_allocate_msg_wait(ch);
if (ret != xpInterrupted && ret != xpTimeout)
return ret;
}
msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next);
*address_of_msg_slot = msg_slot;
return xpSuccess;
}
static void
xpc_free_msg_slot_uv(struct xpc_channel *ch,
struct xpc_send_msg_slot_uv *msg_slot)
{
xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next);
/* wakeup anyone waiting for a free msg slot */
if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
wake_up(&ch->msg_allocate_wq);
}
static void
xpc_notify_sender_uv(struct xpc_channel *ch,
struct xpc_send_msg_slot_uv *msg_slot,
enum xp_retval reason)
{
xpc_notify_func func = msg_slot->func;
if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) {
atomic_dec(&ch->n_to_notify);
dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p "
"msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
msg_slot->msg_slot_number, ch->partid, ch->number);
func(reason, ch->partid, ch->number, msg_slot->key);
dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p "
"msg_slot_number=%d partid=%d channel=%d\n", msg_slot,
msg_slot->msg_slot_number, ch->partid, ch->number);
}
}
static void
xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch,
struct xpc_notify_mq_msg_uv *msg)
{
struct xpc_send_msg_slot_uv *msg_slot;
int entry = msg->hdr.msg_slot_number % ch->local_nentries;
msg_slot = &ch->sn.uv.send_msg_slots[entry];
BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number);
msg_slot->msg_slot_number += ch->local_nentries;
if (msg_slot->func != NULL)
xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered);
xpc_free_msg_slot_uv(ch, msg_slot);
}
static void
xpc_handle_notify_mq_msg_uv(struct xpc_partition *part,
struct xpc_notify_mq_msg_uv *msg)
{
struct xpc_partition_uv *part_uv = &part->sn.uv;
struct xpc_channel *ch;
struct xpc_channel_uv *ch_uv;
struct xpc_notify_mq_msg_uv *msg_slot;
unsigned long irq_flags;
int ch_number = msg->hdr.ch_number;
if (unlikely(ch_number >= part->nchannels)) {
dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid "
"channel number=0x%x in message from partid=%d\n",
ch_number, XPC_PARTID(part));
/* get hb checker to deactivate from the remote partition */
spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
if (part_uv->act_state_req == 0)
xpc_activate_IRQ_rcvd++;
part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV;
part_uv->reason = xpBadChannelNumber;
spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);
wake_up_interruptible(&xpc_activate_IRQ_wq);
return;
}
ch = &part->channels[ch_number];
xpc_msgqueue_ref(ch);
if (!(ch->flags & XPC_C_CONNECTED)) {
xpc_msgqueue_deref(ch);
return;
}
/* see if we're really dealing with an ACK for a previously sent msg */
if (msg->hdr.size == 0) {
xpc_handle_notify_mq_ack_uv(ch, msg);
xpc_msgqueue_deref(ch);
return;
}
/* we're dealing with a normal message sent via the notify_mq */
ch_uv = &ch->sn.uv;
msg_slot = ch_uv->recv_msg_slots +
(msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size;
BUG_ON(msg_slot->hdr.size != 0);
memcpy(msg_slot, msg, msg->hdr.size);
xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next);
if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) {
/*
* If there is an existing idle kthread get it to deliver
* the payload, otherwise we'll have to get the channel mgr
* for this partition to create a kthread to do the delivery.
*/
if (atomic_read(&ch->kthreads_idle) > 0)
wake_up_nr(&ch->idle_wq, 1);
else
xpc_send_chctl_local_msgrequest_uv(part, ch->number);
}
xpc_msgqueue_deref(ch);
}
static irqreturn_t
xpc_handle_notify_IRQ_uv(int irq, void *dev_id)
{
struct xpc_notify_mq_msg_uv *msg;
short partid;
struct xpc_partition *part;
while ((msg = gru_get_next_message(xpc_notify_mq_uv->gru_mq_desc)) !=
NULL) {
partid = msg->hdr.partid;
if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received "
"invalid partid=0x%x in message\n", partid);
} else {
part = &xpc_partitions[partid];
if (xpc_part_ref(part)) {
xpc_handle_notify_mq_msg_uv(part, msg);
xpc_part_deref(part);
}
}
gru_free_message(xpc_notify_mq_uv->gru_mq_desc, msg);
}
return IRQ_HANDLED;
}
static int
xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch)
{
return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list);
}
static void
xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number)
{
struct xpc_channel *ch = &part->channels[ch_number];
int ndeliverable_payloads;
xpc_msgqueue_ref(ch);
ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch);
if (ndeliverable_payloads > 0 &&
(ch->flags & XPC_C_CONNECTED) &&
(ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) {
xpc_activate_kthreads(ch, ndeliverable_payloads);
}
xpc_msgqueue_deref(ch);
}
static enum xp_retval
xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload,
u16 payload_size, u8 notify_type, xpc_notify_func func,
void *key)
{
enum xp_retval ret = xpSuccess;
struct xpc_send_msg_slot_uv *msg_slot = NULL;
struct xpc_notify_mq_msg_uv *msg;
u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV];
size_t msg_size;
DBUG_ON(notify_type != XPC_N_CALL);
msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size;
if (msg_size > ch->entry_size)
return xpPayloadTooBig;
xpc_msgqueue_ref(ch);
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
goto out_1;
}
if (!(ch->flags & XPC_C_CONNECTED)) {
ret = xpNotConnected;
goto out_1;
}
ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot);
if (ret != xpSuccess)
goto out_1;
if (func != NULL) {
atomic_inc(&ch->n_to_notify);
msg_slot->key = key;
smp_wmb(); /* a non-NULL func must hit memory after the key */
msg_slot->func = func;
if (ch->flags & XPC_C_DISCONNECTING) {
ret = ch->reason;
goto out_2;
}
}
msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer;
msg->hdr.partid = xp_partition_id;
msg->hdr.ch_number = ch->number;
msg->hdr.size = msg_size;
msg->hdr.msg_slot_number = msg_slot->msg_slot_number;
memcpy(&msg->payload, payload, payload_size);
ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
msg_size);
if (ret == xpSuccess)
goto out_1;
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
out_2:
if (func != NULL) {
/*
* Try to NULL the msg_slot's func field. If we fail, then
* xpc_notify_senders_of_disconnect_uv() beat us to it, in which
* case we need to pretend we succeeded to send the message
* since the user will get a callout for the disconnect error
* by xpc_notify_senders_of_disconnect_uv(), and to also get an
* error returned here will confuse them. Additionally, since
* in this case the channel is being disconnected we don't need
* to put the the msg_slot back on the free list.
*/
if (cmpxchg(&msg_slot->func, func, NULL) != func) {
ret = xpSuccess;
goto out_1;
}
msg_slot->key = NULL;
atomic_dec(&ch->n_to_notify);
}
xpc_free_msg_slot_uv(ch, msg_slot);
out_1:
xpc_msgqueue_deref(ch);
return ret;
}
/*
* Tell the callers of xpc_send_notify() that the status of their payloads
* is unknown because the channel is now disconnecting.
*
* We don't worry about putting these msg_slots on the free list since the
* msg_slots themselves are about to be kfree'd.
*/
static void
xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
{
struct xpc_send_msg_slot_uv *msg_slot;
int entry;
DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));
for (entry = 0; entry < ch->local_nentries; entry++) {
if (atomic_read(&ch->n_to_notify) == 0)
break;
msg_slot = &ch->sn.uv.send_msg_slots[entry];
if (msg_slot->func != NULL)
xpc_notify_sender_uv(ch, msg_slot, ch->reason);
}
}
/*
* Get the next deliverable message's payload.
*/
static void *
xpc_get_deliverable_payload_uv(struct xpc_channel *ch)
{
struct xpc_fifo_entry_uv *entry;
struct xpc_notify_mq_msg_uv *msg;
void *payload = NULL;
if (!(ch->flags & XPC_C_DISCONNECTING)) {
entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list);
if (entry != NULL) {
msg = container_of(entry, struct xpc_notify_mq_msg_uv,
hdr.u.next);
payload = &msg->payload;
}
}
return payload;
}
static void
xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
{
struct xpc_notify_mq_msg_uv *msg;
enum xp_retval ret;
msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload);
/* return an ACK to the sender of this message */
msg->hdr.partid = xp_partition_id;
msg->hdr.size = 0; /* size of zero indicates this is an ACK */
ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg,
sizeof(struct xpc_notify_mq_msghdr_uv));
if (ret != xpSuccess)
XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
}
static struct xpc_arch_operations xpc_arch_ops_uv = {
.setup_partitions = xpc_setup_partitions_uv,
.teardown_partitions = xpc_teardown_partitions_uv,
.process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
.get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv,
.setup_rsvd_page = xpc_setup_rsvd_page_uv,
.allow_hb = xpc_allow_hb_uv,
.disallow_hb = xpc_disallow_hb_uv,
.disallow_all_hbs = xpc_disallow_all_hbs_uv,
.increment_heartbeat = xpc_increment_heartbeat_uv,
.offline_heartbeat = xpc_offline_heartbeat_uv,
.online_heartbeat = xpc_online_heartbeat_uv,
.heartbeat_init = xpc_heartbeat_init_uv,
.heartbeat_exit = xpc_heartbeat_exit_uv,
.get_remote_heartbeat = xpc_get_remote_heartbeat_uv,
.request_partition_activation =
xpc_request_partition_activation_uv,
.request_partition_reactivation =
xpc_request_partition_reactivation_uv,
.request_partition_deactivation =
xpc_request_partition_deactivation_uv,
.cancel_partition_deactivation_request =
xpc_cancel_partition_deactivation_request_uv,
.setup_ch_structures = xpc_setup_ch_structures_uv,
.teardown_ch_structures = xpc_teardown_ch_structures_uv,
.make_first_contact = xpc_make_first_contact_uv,
.get_chctl_all_flags = xpc_get_chctl_all_flags_uv,
.send_chctl_closerequest = xpc_send_chctl_closerequest_uv,
.send_chctl_closereply = xpc_send_chctl_closereply_uv,
.send_chctl_openrequest = xpc_send_chctl_openrequest_uv,
.send_chctl_openreply = xpc_send_chctl_openreply_uv,
.send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv,
.process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv,
.save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv,
.setup_msg_structures = xpc_setup_msg_structures_uv,
.teardown_msg_structures = xpc_teardown_msg_structures_uv,
.indicate_partition_engaged = xpc_indicate_partition_engaged_uv,
.indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv,
.assume_partition_disengaged = xpc_assume_partition_disengaged_uv,
.partition_engaged = xpc_partition_engaged_uv,
.any_partition_engaged = xpc_any_partition_engaged_uv,
.n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv,
.send_payload = xpc_send_payload_uv,
.get_deliverable_payload = xpc_get_deliverable_payload_uv,
.received_payload = xpc_received_payload_uv,
.notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv,
};
static int
xpc_init_mq_node(int nid)
{
int cpu;
get_online_cpus();
for_each_cpu(cpu, cpumask_of_node(nid)) {
xpc_activate_mq_uv =
xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid,
XPC_ACTIVATE_IRQ_NAME,
xpc_handle_activate_IRQ_uv);
if (!IS_ERR(xpc_activate_mq_uv))
break;
}
if (IS_ERR(xpc_activate_mq_uv)) {
put_online_cpus();
return PTR_ERR(xpc_activate_mq_uv);
}
for_each_cpu(cpu, cpumask_of_node(nid)) {
xpc_notify_mq_uv =
xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid,
XPC_NOTIFY_IRQ_NAME,
xpc_handle_notify_IRQ_uv);
if (!IS_ERR(xpc_notify_mq_uv))
break;
}
if (IS_ERR(xpc_notify_mq_uv)) {
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
put_online_cpus();
return PTR_ERR(xpc_notify_mq_uv);
}
put_online_cpus();
return 0;
}
int
xpc_init_uv(void)
{
int nid;
int ret = 0;
xpc_arch_ops = xpc_arch_ops_uv;
if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
XPC_MSG_HDR_MAX_SIZE);
return -E2BIG;
}
if (xpc_mq_node < 0)
for_each_online_node(nid) {
ret = xpc_init_mq_node(nid);
if (!ret)
break;
}
else
ret = xpc_init_mq_node(xpc_mq_node);
if (ret < 0)
dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n",
-ret);
return ret;
}
void
xpc_exit_uv(void)
{
xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
}
module_param(xpc_mq_node, int, 0);
MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues.");
| gpl-2.0 |
losfair/MiracleKernel | drivers/watchdog/booke_wdt.c | 3140 | 7504 | /*
* Watchdog timer for PowerPC Book-E systems
*
* Author: Matthew McClintock
* Maintainer: Kumar Gala <galak@kernel.crashing.org>
*
* Copyright 2005, 2008, 2010-2011 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/miscdevice.h>
#include <linux/notifier.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
#include <asm/reg_booke.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/div64.h>
/* If the kernel parameter wdt=1, the watchdog will be enabled at boot.
* Also, the wdt_period sets the watchdog timer period timeout.
* For E500 cpus the wdt_period sets which bit changing from 0->1 will
* trigger a watchog timeout. This watchdog timeout will occur 3 times, the
* first time nothing will happen, the second time a watchdog exception will
* occur, and the final time the board will reset.
*/
u32 booke_wdt_enabled;
u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
#ifdef CONFIG_FSL_BOOKE
#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
#define WDTP_MASK (WDTP(0x3f))
#else
#define WDTP(x) (TCR_WP(x))
#define WDTP_MASK (TCR_WP_MASK)
#endif
static DEFINE_SPINLOCK(booke_wdt_lock);
/* For the specified period, determine the number of seconds
* corresponding to the reset time. There will be a watchdog
* exception at approximately 3/5 of this time.
*
* The formula to calculate this is given by:
* 2.5 * (2^(63-period+1)) / timebase_freq
*
* In order to simplify things, we assume that period is
* at least 1. This will still result in a very long timeout.
*/
static unsigned long long period_to_sec(unsigned int period)
{
unsigned long long tmp = 1ULL << (64 - period);
unsigned long tmp2 = ppc_tb_freq;
/* tmp may be a very large number and we don't want to overflow,
* so divide the timebase freq instead of multiplying tmp
*/
tmp2 = tmp2 / 5 * 2;
do_div(tmp, tmp2);
return tmp;
}
/*
* This procedure will find the highest period which will give a timeout
* greater than the one required. e.g. for a bus speed of 66666666 and
* and a parameter of 2 secs, then this procedure will return a value of 38.
*/
static unsigned int sec_to_period(unsigned int secs)
{
unsigned int period;
for (period = 63; period > 0; period--) {
if (period_to_sec(period) >= secs)
return period;
}
return 0;
}
static void __booke_wdt_set(void *data)
{
u32 val;
val = mfspr(SPRN_TCR);
val &= ~WDTP_MASK;
val |= WDTP(booke_wdt_period);
mtspr(SPRN_TCR, val);
}
static void booke_wdt_set(void)
{
on_each_cpu(__booke_wdt_set, NULL, 0);
}
static void __booke_wdt_ping(void *data)
{
mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
}
static void booke_wdt_ping(void)
{
on_each_cpu(__booke_wdt_ping, NULL, 0);
}
static void __booke_wdt_enable(void *data)
{
u32 val;
/* clear status before enabling watchdog */
__booke_wdt_ping(NULL);
val = mfspr(SPRN_TCR);
val &= ~WDTP_MASK;
val |= (TCR_WIE|TCR_WRC(WRC_CHIP)|WDTP(booke_wdt_period));
mtspr(SPRN_TCR, val);
}
/**
* booke_wdt_disable - disable the watchdog on the given CPU
*
* This function is called on each CPU. It disables the watchdog on that CPU.
*
* TCR[WRC] cannot be changed once it has been set to non-zero, but we can
* effectively disable the watchdog by setting its period to the maximum value.
*/
static void __booke_wdt_disable(void *data)
{
u32 val;
val = mfspr(SPRN_TCR);
val &= ~(TCR_WIE | WDTP_MASK);
mtspr(SPRN_TCR, val);
/* clear status to make sure nothing is pending */
__booke_wdt_ping(NULL);
}
static ssize_t booke_wdt_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
booke_wdt_ping();
return count;
}
static struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "PowerPC Book-E Watchdog",
};
static long booke_wdt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
u32 tmp = 0;
u32 __user *p = (u32 __user *)arg;
switch (cmd) {
case WDIOC_GETSUPPORT:
if (copy_to_user((void *)arg, &ident, sizeof(ident)))
return -EFAULT;
case WDIOC_GETSTATUS:
return put_user(0, p);
case WDIOC_GETBOOTSTATUS:
/* XXX: something is clearing TSR */
tmp = mfspr(SPRN_TSR) & TSR_WRS(3);
/* returns CARDRESET if last reset was caused by the WDT */
return (tmp ? WDIOF_CARDRESET : 0);
case WDIOC_SETOPTIONS:
if (get_user(tmp, p))
return -EINVAL;
if (tmp == WDIOS_ENABLECARD) {
booke_wdt_ping();
break;
} else
return -EINVAL;
return 0;
case WDIOC_KEEPALIVE:
booke_wdt_ping();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(tmp, p))
return -EFAULT;
#ifdef CONFIG_FSL_BOOKE
/* period of 1 gives the largest possible timeout */
if (tmp > period_to_sec(1))
return -EINVAL;
booke_wdt_period = sec_to_period(tmp);
#else
booke_wdt_period = tmp;
#endif
booke_wdt_set();
return 0;
case WDIOC_GETTIMEOUT:
return put_user(booke_wdt_period, p);
default:
return -ENOTTY;
}
return 0;
}
/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
static unsigned long wdt_is_active;
static int booke_wdt_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &wdt_is_active))
return -EBUSY;
spin_lock(&booke_wdt_lock);
if (booke_wdt_enabled == 0) {
booke_wdt_enabled = 1;
on_each_cpu(__booke_wdt_enable, NULL, 0);
pr_debug("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
period_to_sec(booke_wdt_period));
}
spin_unlock(&booke_wdt_lock);
return nonseekable_open(inode, file);
}
static int booke_wdt_release(struct inode *inode, struct file *file)
{
#ifndef CONFIG_WATCHDOG_NOWAYOUT
/* Normally, the watchdog is disabled when /dev/watchdog is closed, but
* if CONFIG_WATCHDOG_NOWAYOUT is defined, then it means that the
* watchdog should remain enabled. So we disable it only if
* CONFIG_WATCHDOG_NOWAYOUT is not defined.
*/
on_each_cpu(__booke_wdt_disable, NULL, 0);
booke_wdt_enabled = 0;
pr_debug("booke_wdt: watchdog disabled\n");
#endif
clear_bit(0, &wdt_is_active);
return 0;
}
static const struct file_operations booke_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = booke_wdt_write,
.unlocked_ioctl = booke_wdt_ioctl,
.open = booke_wdt_open,
.release = booke_wdt_release,
};
static struct miscdevice booke_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &booke_wdt_fops,
};
static void __exit booke_wdt_exit(void)
{
misc_deregister(&booke_wdt_miscdev);
}
static int __init booke_wdt_init(void)
{
int ret = 0;
pr_info("booke_wdt: powerpc book-e watchdog driver loaded\n");
ident.firmware_version = cur_cpu_spec->pvr_value;
ret = misc_register(&booke_wdt_miscdev);
if (ret) {
pr_err("booke_wdt: cannot register device (minor=%u, ret=%i)\n",
WATCHDOG_MINOR, ret);
return ret;
}
spin_lock(&booke_wdt_lock);
if (booke_wdt_enabled == 1) {
pr_info("booke_wdt: watchdog enabled (timeout = %llu sec)\n",
period_to_sec(booke_wdt_period));
on_each_cpu(__booke_wdt_enable, NULL, 0);
}
spin_unlock(&booke_wdt_lock);
return ret;
}
module_init(booke_wdt_init);
module_exit(booke_wdt_exit);
MODULE_DESCRIPTION("PowerPC Book-E watchdog driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
guilhem/LGE975_G_Kitkat_Android_V20a_Kernel | drivers/hwmon/adm1021.c | 3652 | 14486 | /*
* adm1021.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and
* Philip Edelbrock <phil@netroedge.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
enum chips {
adm1021, adm1023, max1617, max1617a, thmc10, lm84, gl523sm, mc1066 };
/* adm1021 constants specified below */
/* The adm1021 registers */
/* Read-only */
/* For nr in 0-1 */
#define ADM1021_REG_TEMP(nr) (nr)
#define ADM1021_REG_STATUS 0x02
/* 0x41 = AD, 0x49 = TI, 0x4D = Maxim, 0x23 = Genesys , 0x54 = Onsemi */
#define ADM1021_REG_MAN_ID 0xFE
/* ADM1021 = 0x0X, ADM1023 = 0x3X */
#define ADM1021_REG_DEV_ID 0xFF
/* These use different addresses for reading/writing */
#define ADM1021_REG_CONFIG_R 0x03
#define ADM1021_REG_CONFIG_W 0x09
#define ADM1021_REG_CONV_RATE_R 0x04
#define ADM1021_REG_CONV_RATE_W 0x0A
/* These are for the ADM1023's additional precision on the remote temp sensor */
#define ADM1023_REG_REM_TEMP_PREC 0x10
#define ADM1023_REG_REM_OFFSET 0x11
#define ADM1023_REG_REM_OFFSET_PREC 0x12
#define ADM1023_REG_REM_TOS_PREC 0x13
#define ADM1023_REG_REM_THYST_PREC 0x14
/* limits */
/* For nr in 0-1 */
#define ADM1021_REG_TOS_R(nr) (0x05 + 2 * (nr))
#define ADM1021_REG_TOS_W(nr) (0x0B + 2 * (nr))
#define ADM1021_REG_THYST_R(nr) (0x06 + 2 * (nr))
#define ADM1021_REG_THYST_W(nr) (0x0C + 2 * (nr))
/* write-only */
#define ADM1021_REG_ONESHOT 0x0F
/* Initial values */
/*
* Note: Even though I left the low and high limits named os and hyst,
* they don't quite work like a thermostat the way the LM75 does. I.e.,
* a lower temp than THYST actually triggers an alarm instead of
* clearing it. Weird, ey? --Phil
*/
/* Each client has this additional data */
struct adm1021_data {
struct device *hwmon_dev;
enum chips type;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
char low_power; /* !=0 if device in low power mode */
unsigned long last_updated; /* In jiffies */
int temp_max[2]; /* Register values */
int temp_min[2];
int temp[2];
u8 alarms;
/* Special values for ADM1023 only */
u8 remote_temp_offset;
u8 remote_temp_offset_prec;
};
static int adm1021_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int adm1021_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void adm1021_init_client(struct i2c_client *client);
static int adm1021_remove(struct i2c_client *client);
static struct adm1021_data *adm1021_update_device(struct device *dev);
/* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */
static bool read_only;
static const struct i2c_device_id adm1021_id[] = {
{ "adm1021", adm1021 },
{ "adm1023", adm1023 },
{ "max1617", max1617 },
{ "max1617a", max1617a },
{ "thmc10", thmc10 },
{ "lm84", lm84 },
{ "gl523sm", gl523sm },
{ "mc1066", mc1066 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1021_id);
/* This is the driver that will be inserted */
static struct i2c_driver adm1021_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "adm1021",
},
.probe = adm1021_probe,
.remove = adm1021_remove,
.id_table = adm1021_id,
.detect = adm1021_detect,
.address_list = normal_i2c,
};
static ssize_t show_temp(struct device *dev,
struct device_attribute *devattr, char *buf)
{
int index = to_sensor_dev_attr(devattr)->index;
struct adm1021_data *data = adm1021_update_device(dev);
return sprintf(buf, "%d\n", data->temp[index]);
}
static ssize_t show_temp_max(struct device *dev,
struct device_attribute *devattr, char *buf)
{
int index = to_sensor_dev_attr(devattr)->index;
struct adm1021_data *data = adm1021_update_device(dev);
return sprintf(buf, "%d\n", data->temp_max[index]);
}
static ssize_t show_temp_min(struct device *dev,
struct device_attribute *devattr, char *buf)
{
int index = to_sensor_dev_attr(devattr)->index;
struct adm1021_data *data = adm1021_update_device(dev);
return sprintf(buf, "%d\n", data->temp_min[index]);
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
int index = to_sensor_dev_attr(attr)->index;
struct adm1021_data *data = adm1021_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms >> index) & 1);
}
static ssize_t show_alarms(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct adm1021_data *data = adm1021_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
static ssize_t set_temp_max(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct adm1021_data *data = i2c_get_clientdata(client);
long temp;
int err;
err = kstrtol(buf, 10, &temp);
if (err)
return err;
temp /= 1000;
mutex_lock(&data->update_lock);
data->temp_max[index] = SENSORS_LIMIT(temp, -128, 127);
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
data->temp_max[index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_temp_min(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
int index = to_sensor_dev_attr(devattr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct adm1021_data *data = i2c_get_clientdata(client);
long temp;
int err;
err = kstrtol(buf, 10, &temp);
if (err)
return err;
temp /= 1000;
mutex_lock(&data->update_lock);
data->temp_min[index] = SENSORS_LIMIT(temp, -128, 127);
if (!read_only)
i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
data->temp_min[index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_low_power(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct adm1021_data *data = adm1021_update_device(dev);
return sprintf(buf, "%d\n", data->low_power);
}
static ssize_t set_low_power(struct device *dev,
struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1021_data *data = i2c_get_clientdata(client);
char low_power;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
low_power = val != 0;
mutex_lock(&data->update_lock);
if (low_power != data->low_power) {
int config = i2c_smbus_read_byte_data(
client, ADM1021_REG_CONFIG_R);
data->low_power = low_power;
i2c_smbus_write_byte_data(client, ADM1021_REG_CONFIG_W,
(config & 0xBF) | (low_power << 6));
}
mutex_unlock(&data->update_lock);
return count;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 0);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
set_temp_min, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
set_temp_max, 1);
static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
set_temp_min, 1);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static DEVICE_ATTR(low_power, S_IWUSR | S_IRUGO, show_low_power, set_low_power);
static struct attribute *adm1021_attributes[] = {
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&dev_attr_alarms.attr,
&dev_attr_low_power.attr,
NULL
};
static const struct attribute_group adm1021_group = {
.attrs = adm1021_attributes,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int adm1021_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
const char *type_name;
int conv_rate, status, config, man_id, dev_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
pr_debug("adm1021: detect failed, "
"smbus byte data not supported!\n");
return -ENODEV;
}
status = i2c_smbus_read_byte_data(client, ADM1021_REG_STATUS);
conv_rate = i2c_smbus_read_byte_data(client,
ADM1021_REG_CONV_RATE_R);
config = i2c_smbus_read_byte_data(client, ADM1021_REG_CONFIG_R);
/* Check unused bits */
if ((status & 0x03) || (config & 0x3F) || (conv_rate & 0xF8)) {
pr_debug("adm1021: detect failed, chip not detected!\n");
return -ENODEV;
}
/* Determine the chip type. */
man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID);
dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID);
if (man_id == 0x4d && dev_id == 0x01)
type_name = "max1617a";
else if (man_id == 0x41) {
if ((dev_id & 0xF0) == 0x30)
type_name = "adm1023";
else
type_name = "adm1021";
} else if (man_id == 0x49)
type_name = "thmc10";
else if (man_id == 0x23)
type_name = "gl523sm";
else if (man_id == 0x54)
type_name = "mc1066";
/* LM84 Mfr ID in a different place, and it has more unused bits */
else if (conv_rate == 0x00
&& (config & 0x7F) == 0x00
&& (status & 0xAB) == 0x00)
type_name = "lm84";
else
type_name = "max1617";
pr_debug("adm1021: Detected chip %s at adapter %d, address 0x%02x.\n",
type_name, i2c_adapter_id(adapter), client->addr);
strlcpy(info->type, type_name, I2C_NAME_SIZE);
return 0;
}
static int adm1021_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adm1021_data *data;
int err;
data = kzalloc(sizeof(struct adm1021_data), GFP_KERNEL);
if (!data) {
pr_debug("adm1021: detect failed, kzalloc failed!\n");
err = -ENOMEM;
goto error0;
}
i2c_set_clientdata(client, data);
data->type = id->driver_data;
mutex_init(&data->update_lock);
/* Initialize the ADM1021 chip */
if (data->type != lm84 && !read_only)
adm1021_init_client(client);
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1021_group);
if (err)
goto error1;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto error3;
}
return 0;
error3:
sysfs_remove_group(&client->dev.kobj, &adm1021_group);
error1:
kfree(data);
error0:
return err;
}
static void adm1021_init_client(struct i2c_client *client)
{
/* Enable ADC and disable suspend mode */
i2c_smbus_write_byte_data(client, ADM1021_REG_CONFIG_W,
i2c_smbus_read_byte_data(client, ADM1021_REG_CONFIG_R) & 0xBF);
/* Set Conversion rate to 1/sec (this can be tinkered with) */
i2c_smbus_write_byte_data(client, ADM1021_REG_CONV_RATE_W, 0x04);
}
static int adm1021_remove(struct i2c_client *client)
{
struct adm1021_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adm1021_group);
kfree(data);
return 0;
}
static struct adm1021_data *adm1021_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct adm1021_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
int i;
dev_dbg(&client->dev, "Starting adm1021 update\n");
for (i = 0; i < 2; i++) {
data->temp[i] = 1000 *
(s8) i2c_smbus_read_byte_data(
client, ADM1021_REG_TEMP(i));
data->temp_max[i] = 1000 *
(s8) i2c_smbus_read_byte_data(
client, ADM1021_REG_TOS_R(i));
data->temp_min[i] = 1000 *
(s8) i2c_smbus_read_byte_data(
client, ADM1021_REG_THYST_R(i));
}
data->alarms = i2c_smbus_read_byte_data(client,
ADM1021_REG_STATUS) & 0x7c;
if (data->type == adm1023) {
/*
* The ADM1023 provides 3 extra bits of precision for
* the remote sensor in extra registers.
*/
data->temp[1] += 125 * (i2c_smbus_read_byte_data(
client, ADM1023_REG_REM_TEMP_PREC) >> 5);
data->temp_max[1] += 125 * (i2c_smbus_read_byte_data(
client, ADM1023_REG_REM_TOS_PREC) >> 5);
data->temp_min[1] += 125 * (i2c_smbus_read_byte_data(
client, ADM1023_REG_REM_THYST_PREC) >> 5);
data->remote_temp_offset =
i2c_smbus_read_byte_data(client,
ADM1023_REG_REM_OFFSET);
data->remote_temp_offset_prec =
i2c_smbus_read_byte_data(client,
ADM1023_REG_REM_OFFSET_PREC);
}
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
module_i2c_driver(adm1021_driver);
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
"Philip Edelbrock <phil@netroedge.com>");
MODULE_DESCRIPTION("adm1021 driver");
MODULE_LICENSE("GPL");
module_param(read_only, bool, 0);
MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
| gpl-2.0 |
civato/V30B-SithLord | drivers/tty/serial/suncore.c | 4164 | 5326 | /* suncore.c
*
* Common SUN serial routines. Based entirely
* upon drivers/sbus/char/sunserial.c which is:
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
*
* Adaptation to new UART layer is:
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/tty.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/serial_core.h>
#include <linux/init.h>
#include <asm/prom.h>
#include "suncore.h"
static int sunserial_current_minor = 64;
int sunserial_register_minors(struct uart_driver *drv, int count)
{
int err = 0;
drv->minor = sunserial_current_minor;
drv->nr += count;
/* Register the driver on the first call */
if (drv->nr == count)
err = uart_register_driver(drv);
if (err == 0) {
sunserial_current_minor += count;
drv->tty_driver->name_base = drv->minor - 64;
}
return err;
}
EXPORT_SYMBOL(sunserial_register_minors);
void sunserial_unregister_minors(struct uart_driver *drv, int count)
{
drv->nr -= count;
sunserial_current_minor -= count;
if (drv->nr == 0)
uart_unregister_driver(drv);
}
EXPORT_SYMBOL(sunserial_unregister_minors);
int sunserial_console_match(struct console *con, struct device_node *dp,
struct uart_driver *drv, int line, bool ignore_line)
{
if (!con)
return 0;
drv->cons = con;
if (of_console_device != dp)
return 0;
if (!ignore_line) {
int off = 0;
if (of_console_options &&
*of_console_options == 'b')
off = 1;
if ((line & 1) != off)
return 0;
}
if (!console_set_on_cmdline) {
con->index = line;
add_preferred_console(con->name, line, NULL);
}
return 1;
}
EXPORT_SYMBOL(sunserial_console_match);
void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
{
const char *mode, *s;
char mode_prop[] = "ttyX-mode";
int baud, bits, stop, cflag;
char parity;
if (!strcmp(uart_dp->name, "rsc") ||
!strcmp(uart_dp->name, "rsc-console") ||
!strcmp(uart_dp->name, "rsc-control")) {
mode = of_get_property(uart_dp,
"ssp-console-modes", NULL);
if (!mode)
mode = "115200,8,n,1,-";
} else if (!strcmp(uart_dp->name, "lom-console")) {
mode = "9600,8,n,1,-";
} else {
struct device_node *dp;
char c;
c = 'a';
if (of_console_options)
c = *of_console_options;
mode_prop[3] = c;
dp = of_find_node_by_path("/options");
mode = of_get_property(dp, mode_prop, NULL);
if (!mode)
mode = "9600,8,n,1,-";
}
cflag = CREAD | HUPCL | CLOCAL;
s = mode;
baud = simple_strtoul(s, NULL, 0);
s = strchr(s, ',');
bits = simple_strtoul(++s, NULL, 0);
s = strchr(s, ',');
parity = *(++s);
s = strchr(s, ',');
stop = simple_strtoul(++s, NULL, 0);
s = strchr(s, ',');
/* XXX handshake is not handled here. */
switch (baud) {
case 150: cflag |= B150; break;
case 300: cflag |= B300; break;
case 600: cflag |= B600; break;
case 1200: cflag |= B1200; break;
case 2400: cflag |= B2400; break;
case 4800: cflag |= B4800; break;
case 9600: cflag |= B9600; break;
case 19200: cflag |= B19200; break;
case 38400: cflag |= B38400; break;
case 57600: cflag |= B57600; break;
case 115200: cflag |= B115200; break;
case 230400: cflag |= B230400; break;
case 460800: cflag |= B460800; break;
default: baud = 9600; cflag |= B9600; break;
}
switch (bits) {
case 5: cflag |= CS5; break;
case 6: cflag |= CS6; break;
case 7: cflag |= CS7; break;
case 8: cflag |= CS8; break;
default: cflag |= CS8; break;
}
switch (parity) {
case 'o': cflag |= (PARENB | PARODD); break;
case 'e': cflag |= PARENB; break;
case 'n': default: break;
}
switch (stop) {
case 2: cflag |= CSTOPB; break;
case 1: default: break;
}
con->cflag = cflag;
}
/* Sun serial MOUSE auto baud rate detection. */
static struct mouse_baud_cflag {
int baud;
unsigned int cflag;
} mouse_baud_table[] = {
{ 1200, B1200 },
{ 2400, B2400 },
{ 4800, B4800 },
{ 9600, B9600 },
{ -1, ~0 },
{ -1, ~0 },
};
unsigned int suncore_mouse_baud_cflag_next(unsigned int cflag, int *new_baud)
{
int i;
for (i = 0; mouse_baud_table[i].baud != -1; i++)
if (mouse_baud_table[i].cflag == (cflag & CBAUD))
break;
i += 1;
if (mouse_baud_table[i].baud == -1)
i = 0;
*new_baud = mouse_baud_table[i].baud;
return mouse_baud_table[i].cflag;
}
EXPORT_SYMBOL(suncore_mouse_baud_cflag_next);
/* Basically, when the baud rate is wrong the mouse spits out
* breaks to us.
*/
int suncore_mouse_baud_detection(unsigned char ch, int is_break)
{
static int mouse_got_break = 0;
static int ctr = 0;
if (is_break) {
/* Let a few normal bytes go by before we jump the gun
* and say we need to try another baud rate.
*/
if (mouse_got_break && ctr < 8)
return 1;
/* Ok, we need to try another baud. */
ctr = 0;
mouse_got_break = 1;
return 2;
}
if (mouse_got_break) {
ctr++;
if (ch == 0x87) {
/* Correct baud rate determined. */
mouse_got_break = 0;
}
return 1;
}
return 0;
}
EXPORT_SYMBOL(suncore_mouse_baud_detection);
static int __init suncore_init(void)
{
return 0;
}
static void __exit suncore_exit(void)
{
}
module_init(suncore_init);
module_exit(suncore_exit);
MODULE_AUTHOR("Eddie C. Dost, David S. Miller");
MODULE_DESCRIPTION("Sun serial common layer");
MODULE_LICENSE("GPL");
| gpl-2.0 |
thicklizard/Sprint-M9 | drivers/media/i2c/sr030pc30.c | 7236 | 22130 | /*
* Driver for SiliconFile SR030PC30 VGA (1/10-Inch) Image Sensor with ISP
*
* Copyright (C) 2010 Samsung Electronics Co., Ltd
* Author: Sylwester Nawrocki, s.nawrocki@samsung.com
*
* Based on original driver authored by Dongsoo Nathaniel Kim
* and HeungJun Kim <riverful.kim@samsung.com>.
*
* Based on mt9v011 Micron Digital Image Sensor driver
* Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-mediabus.h>
#include <media/sr030pc30.h>
static int debug;
module_param(debug, int, 0644);
#define MODULE_NAME "SR030PC30"
/*
* Register offsets within a page
* b15..b8 - page id, b7..b0 - register address
*/
#define POWER_CTRL_REG 0x0001
#define PAGEMODE_REG 0x03
#define DEVICE_ID_REG 0x0004
#define NOON010PC30_ID 0x86
#define SR030PC30_ID 0x8C
#define VDO_CTL1_REG 0x0010
#define SUBSAMPL_NONE_VGA 0
#define SUBSAMPL_QVGA 0x10
#define SUBSAMPL_QQVGA 0x20
#define VDO_CTL2_REG 0x0011
#define SYNC_CTL_REG 0x0012
#define WIN_ROWH_REG 0x0020
#define WIN_ROWL_REG 0x0021
#define WIN_COLH_REG 0x0022
#define WIN_COLL_REG 0x0023
#define WIN_HEIGHTH_REG 0x0024
#define WIN_HEIGHTL_REG 0x0025
#define WIN_WIDTHH_REG 0x0026
#define WIN_WIDTHL_REG 0x0027
#define HBLANKH_REG 0x0040
#define HBLANKL_REG 0x0041
#define VSYNCH_REG 0x0042
#define VSYNCL_REG 0x0043
/* page 10 */
#define ISP_CTL_REG(n) (0x1010 + (n))
#define YOFS_REG 0x1040
#define DARK_YOFS_REG 0x1041
#define AG_ABRTH_REG 0x1050
#define SAT_CTL_REG 0x1060
#define BSAT_REG 0x1061
#define RSAT_REG 0x1062
#define AG_SAT_TH_REG 0x1063
/* page 11 */
#define ZLPF_CTRL_REG 0x1110
#define ZLPF_CTRL2_REG 0x1112
#define ZLPF_AGH_THR_REG 0x1121
#define ZLPF_THR_REG 0x1160
#define ZLPF_DYN_THR_REG 0x1160
/* page 12 */
#define YCLPF_CTL1_REG 0x1240
#define YCLPF_CTL2_REG 0x1241
#define YCLPF_THR_REG 0x1250
#define BLPF_CTL_REG 0x1270
#define BLPF_THR1_REG 0x1274
#define BLPF_THR2_REG 0x1275
/* page 14 - Lens Shading Compensation */
#define LENS_CTRL_REG 0x1410
#define LENS_XCEN_REG 0x1420
#define LENS_YCEN_REG 0x1421
#define LENS_R_COMP_REG 0x1422
#define LENS_G_COMP_REG 0x1423
#define LENS_B_COMP_REG 0x1424
/* page 15 - Color correction */
#define CMC_CTL_REG 0x1510
#define CMC_OFSGH_REG 0x1514
#define CMC_OFSGL_REG 0x1516
#define CMC_SIGN_REG 0x1517
/* Color correction coefficients */
#define CMC_COEF_REG(n) (0x1530 + (n))
/* Color correction offset coefficients */
#define CMC_OFS_REG(n) (0x1540 + (n))
/* page 16 - Gamma correction */
#define GMA_CTL_REG 0x1610
/* Gamma correction coefficients 0.14 */
#define GMA_COEF_REG(n) (0x1630 + (n))
/* page 20 - Auto Exposure */
#define AE_CTL1_REG 0x2010
#define AE_CTL2_REG 0x2011
#define AE_FRM_CTL_REG 0x2020
#define AE_FINE_CTL_REG(n) (0x2028 + (n))
#define EXP_TIMEH_REG 0x2083
#define EXP_TIMEM_REG 0x2084
#define EXP_TIMEL_REG 0x2085
#define EXP_MMINH_REG 0x2086
#define EXP_MMINL_REG 0x2087
#define EXP_MMAXH_REG 0x2088
#define EXP_MMAXM_REG 0x2089
#define EXP_MMAXL_REG 0x208A
/* page 22 - Auto White Balance */
#define AWB_CTL1_REG 0x2210
#define AWB_ENABLE 0x80
#define AWB_CTL2_REG 0x2211
#define MWB_ENABLE 0x01
/* RGB gain control (manual WB) when AWB_CTL1[7]=0 */
#define AWB_RGAIN_REG 0x2280
#define AWB_GGAIN_REG 0x2281
#define AWB_BGAIN_REG 0x2282
#define AWB_RMAX_REG 0x2283
#define AWB_RMIN_REG 0x2284
#define AWB_BMAX_REG 0x2285
#define AWB_BMIN_REG 0x2286
/* R, B gain range in bright light conditions */
#define AWB_RMAXB_REG 0x2287
#define AWB_RMINB_REG 0x2288
#define AWB_BMAXB_REG 0x2289
#define AWB_BMINB_REG 0x228A
/* manual white balance, when AWB_CTL2[0]=1 */
#define MWB_RGAIN_REG 0x22B2
#define MWB_BGAIN_REG 0x22B3
/* the token to mark an array end */
#define REG_TERM 0xFFFF
/* Minimum and maximum exposure time in ms */
#define EXPOS_MIN_MS 1
#define EXPOS_MAX_MS 125
struct sr030pc30_info {
struct v4l2_subdev sd;
const struct sr030pc30_platform_data *pdata;
const struct sr030pc30_format *curr_fmt;
const struct sr030pc30_frmsize *curr_win;
unsigned int auto_wb:1;
unsigned int auto_exp:1;
unsigned int hflip:1;
unsigned int vflip:1;
unsigned int sleep:1;
unsigned int exposure;
u8 blue_balance;
u8 red_balance;
u8 i2c_reg_page;
};
struct sr030pc30_format {
enum v4l2_mbus_pixelcode code;
enum v4l2_colorspace colorspace;
u16 ispctl1_reg;
};
struct sr030pc30_frmsize {
u16 width;
u16 height;
int vid_ctl1;
};
struct i2c_regval {
u16 addr;
u16 val;
};
static const struct v4l2_queryctrl sr030pc30_ctrl[] = {
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto White Balance",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1,
}, {
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
.minimum = 0,
.maximum = 127,
.step = 1,
.default_value = 64,
.flags = 0,
}, {
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Balance",
.minimum = 0,
.maximum = 127,
.step = 1,
.default_value = 64,
}, {
.id = V4L2_CID_EXPOSURE_AUTO,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Auto Exposure",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1,
}, {
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Exposure",
.minimum = EXPOS_MIN_MS,
.maximum = EXPOS_MAX_MS,
.step = 1,
.default_value = 1,
}, {
}
};
/* supported resolutions */
static const struct sr030pc30_frmsize sr030pc30_sizes[] = {
{
.width = 640,
.height = 480,
.vid_ctl1 = SUBSAMPL_NONE_VGA,
}, {
.width = 320,
.height = 240,
.vid_ctl1 = SUBSAMPL_QVGA,
}, {
.width = 160,
.height = 120,
.vid_ctl1 = SUBSAMPL_QQVGA,
},
};
/* supported pixel formats */
static const struct sr030pc30_format sr030pc30_formats[] = {
{
.code = V4L2_MBUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x03,
}, {
.code = V4L2_MBUS_FMT_YVYU8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x02,
}, {
.code = V4L2_MBUS_FMT_VYUY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0,
}, {
.code = V4L2_MBUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x01,
}, {
.code = V4L2_MBUS_FMT_RGB565_2X8_BE,
.colorspace = V4L2_COLORSPACE_JPEG,
.ispctl1_reg = 0x40,
},
};
static const struct i2c_regval sr030pc30_base_regs[] = {
/* Window size and position within pixel matrix */
{ WIN_ROWH_REG, 0x00 }, { WIN_ROWL_REG, 0x06 },
{ WIN_COLH_REG, 0x00 }, { WIN_COLL_REG, 0x06 },
{ WIN_HEIGHTH_REG, 0x01 }, { WIN_HEIGHTL_REG, 0xE0 },
{ WIN_WIDTHH_REG, 0x02 }, { WIN_WIDTHL_REG, 0x80 },
{ HBLANKH_REG, 0x01 }, { HBLANKL_REG, 0x50 },
{ VSYNCH_REG, 0x00 }, { VSYNCL_REG, 0x14 },
{ SYNC_CTL_REG, 0 },
/* Color corection and saturation */
{ ISP_CTL_REG(0), 0x30 }, { YOFS_REG, 0x80 },
{ DARK_YOFS_REG, 0x04 }, { AG_ABRTH_REG, 0x78 },
{ SAT_CTL_REG, 0x1F }, { BSAT_REG, 0x90 },
{ AG_SAT_TH_REG, 0xF0 }, { 0x1064, 0x80 },
{ CMC_CTL_REG, 0x03 }, { CMC_OFSGH_REG, 0x3C },
{ CMC_OFSGL_REG, 0x2C }, { CMC_SIGN_REG, 0x2F },
{ CMC_COEF_REG(0), 0xCB }, { CMC_OFS_REG(0), 0x87 },
{ CMC_COEF_REG(1), 0x61 }, { CMC_OFS_REG(1), 0x18 },
{ CMC_COEF_REG(2), 0x16 }, { CMC_OFS_REG(2), 0x91 },
{ CMC_COEF_REG(3), 0x23 }, { CMC_OFS_REG(3), 0x94 },
{ CMC_COEF_REG(4), 0xCE }, { CMC_OFS_REG(4), 0x9f },
{ CMC_COEF_REG(5), 0x2B }, { CMC_OFS_REG(5), 0x33 },
{ CMC_COEF_REG(6), 0x01 }, { CMC_OFS_REG(6), 0x00 },
{ CMC_COEF_REG(7), 0x34 }, { CMC_OFS_REG(7), 0x94 },
{ CMC_COEF_REG(8), 0x75 }, { CMC_OFS_REG(8), 0x14 },
/* Color corection coefficients */
{ GMA_CTL_REG, 0x03 }, { GMA_COEF_REG(0), 0x00 },
{ GMA_COEF_REG(1), 0x19 }, { GMA_COEF_REG(2), 0x26 },
{ GMA_COEF_REG(3), 0x3B }, { GMA_COEF_REG(4), 0x5D },
{ GMA_COEF_REG(5), 0x79 }, { GMA_COEF_REG(6), 0x8E },
{ GMA_COEF_REG(7), 0x9F }, { GMA_COEF_REG(8), 0xAF },
{ GMA_COEF_REG(9), 0xBD }, { GMA_COEF_REG(10), 0xCA },
{ GMA_COEF_REG(11), 0xDD }, { GMA_COEF_REG(12), 0xEC },
{ GMA_COEF_REG(13), 0xF7 }, { GMA_COEF_REG(14), 0xFF },
/* Noise reduction, Z-LPF, YC-LPF and BLPF filters setup */
{ ZLPF_CTRL_REG, 0x99 }, { ZLPF_CTRL2_REG, 0x0E },
{ ZLPF_AGH_THR_REG, 0x29 }, { ZLPF_THR_REG, 0x0F },
{ ZLPF_DYN_THR_REG, 0x63 }, { YCLPF_CTL1_REG, 0x23 },
{ YCLPF_CTL2_REG, 0x3B }, { YCLPF_THR_REG, 0x05 },
{ BLPF_CTL_REG, 0x1D }, { BLPF_THR1_REG, 0x05 },
{ BLPF_THR2_REG, 0x04 },
/* Automatic white balance */
{ AWB_CTL1_REG, 0xFB }, { AWB_CTL2_REG, 0x26 },
{ AWB_RMAX_REG, 0x54 }, { AWB_RMIN_REG, 0x2B },
{ AWB_BMAX_REG, 0x57 }, { AWB_BMIN_REG, 0x29 },
{ AWB_RMAXB_REG, 0x50 }, { AWB_RMINB_REG, 0x43 },
{ AWB_BMAXB_REG, 0x30 }, { AWB_BMINB_REG, 0x22 },
/* Auto exposure */
{ AE_CTL1_REG, 0x8C }, { AE_CTL2_REG, 0x04 },
{ AE_FRM_CTL_REG, 0x01 }, { AE_FINE_CTL_REG(0), 0x3F },
{ AE_FINE_CTL_REG(1), 0xA3 }, { AE_FINE_CTL_REG(3), 0x34 },
/* Lens shading compensation */
{ LENS_CTRL_REG, 0x01 }, { LENS_XCEN_REG, 0x80 },
{ LENS_YCEN_REG, 0x70 }, { LENS_R_COMP_REG, 0x53 },
{ LENS_G_COMP_REG, 0x40 }, { LENS_B_COMP_REG, 0x3e },
{ REG_TERM, 0 },
};
static inline struct sr030pc30_info *to_sr030pc30(struct v4l2_subdev *sd)
{
return container_of(sd, struct sr030pc30_info, sd);
}
static inline int set_i2c_page(struct sr030pc30_info *info,
struct i2c_client *client, unsigned int reg)
{
int ret = 0;
u32 page = reg >> 8 & 0xFF;
if (info->i2c_reg_page != page && (reg & 0xFF) != 0x03) {
ret = i2c_smbus_write_byte_data(client, PAGEMODE_REG, page);
if (!ret)
info->i2c_reg_page = page;
}
return ret;
}
static int cam_i2c_read(struct v4l2_subdev *sd, u32 reg_addr)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct sr030pc30_info *info = to_sr030pc30(sd);
int ret = set_i2c_page(info, client, reg_addr);
if (!ret)
ret = i2c_smbus_read_byte_data(client, reg_addr & 0xFF);
return ret;
}
static int cam_i2c_write(struct v4l2_subdev *sd, u32 reg_addr, u32 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct sr030pc30_info *info = to_sr030pc30(sd);
int ret = set_i2c_page(info, client, reg_addr);
if (!ret)
ret = i2c_smbus_write_byte_data(
client, reg_addr & 0xFF, val);
return ret;
}
static inline int sr030pc30_bulk_write_reg(struct v4l2_subdev *sd,
const struct i2c_regval *msg)
{
while (msg->addr != REG_TERM) {
int ret = cam_i2c_write(sd, msg->addr, msg->val);
if (ret)
return ret;
msg++;
}
return 0;
}
/* Device reset and sleep mode control */
static int sr030pc30_pwr_ctrl(struct v4l2_subdev *sd,
bool reset, bool sleep)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
u8 reg = sleep ? 0xF1 : 0xF0;
int ret = 0;
if (reset)
ret = cam_i2c_write(sd, POWER_CTRL_REG, reg | 0x02);
if (!ret) {
ret = cam_i2c_write(sd, POWER_CTRL_REG, reg);
if (!ret) {
info->sleep = sleep;
if (reset)
info->i2c_reg_page = -1;
}
}
return ret;
}
static inline int sr030pc30_enable_autoexposure(struct v4l2_subdev *sd, int on)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
/* auto anti-flicker is also enabled here */
int ret = cam_i2c_write(sd, AE_CTL1_REG, on ? 0xDC : 0x0C);
if (!ret)
info->auto_exp = on;
return ret;
}
static int sr030pc30_set_exposure(struct v4l2_subdev *sd, int value)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
unsigned long expos = value * info->pdata->clk_rate / (8 * 1000);
int ret = cam_i2c_write(sd, EXP_TIMEH_REG, expos >> 16 & 0xFF);
if (!ret)
ret = cam_i2c_write(sd, EXP_TIMEM_REG, expos >> 8 & 0xFF);
if (!ret)
ret = cam_i2c_write(sd, EXP_TIMEL_REG, expos & 0xFF);
if (!ret) { /* Turn off AE */
info->exposure = value;
ret = sr030pc30_enable_autoexposure(sd, 0);
}
return ret;
}
/* Automatic white balance control */
static int sr030pc30_enable_autowhitebalance(struct v4l2_subdev *sd, int on)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
int ret = cam_i2c_write(sd, AWB_CTL2_REG, on ? 0x2E : 0x2F);
if (!ret)
ret = cam_i2c_write(sd, AWB_CTL1_REG, on ? 0xFB : 0x7B);
if (!ret)
info->auto_wb = on;
return ret;
}
static int sr030pc30_set_flip(struct v4l2_subdev *sd)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
s32 reg = cam_i2c_read(sd, VDO_CTL2_REG);
if (reg < 0)
return reg;
reg &= 0x7C;
if (info->hflip)
reg |= 0x01;
if (info->vflip)
reg |= 0x02;
return cam_i2c_write(sd, VDO_CTL2_REG, reg | 0x80);
}
/* Configure resolution, color format and image flip */
static int sr030pc30_set_params(struct v4l2_subdev *sd)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
int ret;
if (!info->curr_win)
return -EINVAL;
/* Configure the resolution through subsampling */
ret = cam_i2c_write(sd, VDO_CTL1_REG,
info->curr_win->vid_ctl1);
if (!ret && info->curr_fmt)
ret = cam_i2c_write(sd, ISP_CTL_REG(0),
info->curr_fmt->ispctl1_reg);
if (!ret)
ret = sr030pc30_set_flip(sd);
return ret;
}
/* Find nearest matching image pixel size. */
static int sr030pc30_try_frame_size(struct v4l2_mbus_framefmt *mf)
{
unsigned int min_err = ~0;
int i = ARRAY_SIZE(sr030pc30_sizes);
const struct sr030pc30_frmsize *fsize = &sr030pc30_sizes[0],
*match = NULL;
while (i--) {
int err = abs(fsize->width - mf->width)
+ abs(fsize->height - mf->height);
if (err < min_err) {
min_err = err;
match = fsize;
}
fsize++;
}
if (match) {
mf->width = match->width;
mf->height = match->height;
return 0;
}
return -EINVAL;
}
static int sr030pc30_queryctrl(struct v4l2_subdev *sd,
struct v4l2_queryctrl *qc)
{
int i;
for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++)
if (qc->id == sr030pc30_ctrl[i].id) {
*qc = sr030pc30_ctrl[i];
v4l2_dbg(1, debug, sd, "%s id: %d\n",
__func__, qc->id);
return 0;
}
return -EINVAL;
}
static inline int sr030pc30_set_bluebalance(struct v4l2_subdev *sd, int value)
{
int ret = cam_i2c_write(sd, MWB_BGAIN_REG, value);
if (!ret)
to_sr030pc30(sd)->blue_balance = value;
return ret;
}
static inline int sr030pc30_set_redbalance(struct v4l2_subdev *sd, int value)
{
int ret = cam_i2c_write(sd, MWB_RGAIN_REG, value);
if (!ret)
to_sr030pc30(sd)->red_balance = value;
return ret;
}
static int sr030pc30_s_ctrl(struct v4l2_subdev *sd,
struct v4l2_control *ctrl)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(sr030pc30_ctrl); i++)
if (ctrl->id == sr030pc30_ctrl[i].id)
break;
if (i == ARRAY_SIZE(sr030pc30_ctrl))
return -EINVAL;
if (ctrl->value < sr030pc30_ctrl[i].minimum ||
ctrl->value > sr030pc30_ctrl[i].maximum)
return -ERANGE;
v4l2_dbg(1, debug, sd, "%s: ctrl_id: %d, value: %d\n",
__func__, ctrl->id, ctrl->value);
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
sr030pc30_enable_autowhitebalance(sd, ctrl->value);
break;
case V4L2_CID_BLUE_BALANCE:
ret = sr030pc30_set_bluebalance(sd, ctrl->value);
break;
case V4L2_CID_RED_BALANCE:
ret = sr030pc30_set_redbalance(sd, ctrl->value);
break;
case V4L2_CID_EXPOSURE_AUTO:
sr030pc30_enable_autoexposure(sd,
ctrl->value == V4L2_EXPOSURE_AUTO);
break;
case V4L2_CID_EXPOSURE:
ret = sr030pc30_set_exposure(sd, ctrl->value);
break;
default:
return -EINVAL;
}
return ret;
}
static int sr030pc30_g_ctrl(struct v4l2_subdev *sd,
struct v4l2_control *ctrl)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
v4l2_dbg(1, debug, sd, "%s: id: %d\n", __func__, ctrl->id);
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
ctrl->value = info->auto_wb;
break;
case V4L2_CID_BLUE_BALANCE:
ctrl->value = info->blue_balance;
break;
case V4L2_CID_RED_BALANCE:
ctrl->value = info->red_balance;
break;
case V4L2_CID_EXPOSURE_AUTO:
ctrl->value = info->auto_exp;
break;
case V4L2_CID_EXPOSURE:
ctrl->value = info->exposure;
break;
default:
return -EINVAL;
}
return 0;
}
static int sr030pc30_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
if (!code || index >= ARRAY_SIZE(sr030pc30_formats))
return -EINVAL;
*code = sr030pc30_formats[index].code;
return 0;
}
static int sr030pc30_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
int ret;
if (!mf)
return -EINVAL;
if (!info->curr_win || !info->curr_fmt) {
ret = sr030pc30_set_params(sd);
if (ret)
return ret;
}
mf->width = info->curr_win->width;
mf->height = info->curr_win->height;
mf->code = info->curr_fmt->code;
mf->colorspace = info->curr_fmt->colorspace;
mf->field = V4L2_FIELD_NONE;
return 0;
}
/* Return nearest media bus frame format. */
static const struct sr030pc30_format *try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
int i = ARRAY_SIZE(sr030pc30_formats);
sr030pc30_try_frame_size(mf);
while (i--)
if (mf->code == sr030pc30_formats[i].code)
break;
mf->code = sr030pc30_formats[i].code;
return &sr030pc30_formats[i];
}
/* Return nearest media bus frame format. */
static int sr030pc30_try_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
if (!sd || !mf)
return -EINVAL;
try_fmt(sd, mf);
return 0;
}
static int sr030pc30_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
if (!sd || !mf)
return -EINVAL;
info->curr_fmt = try_fmt(sd, mf);
return sr030pc30_set_params(sd);
}
static int sr030pc30_base_config(struct v4l2_subdev *sd)
{
struct sr030pc30_info *info = to_sr030pc30(sd);
int ret;
unsigned long expmin, expmax;
ret = sr030pc30_bulk_write_reg(sd, sr030pc30_base_regs);
if (!ret) {
info->curr_fmt = &sr030pc30_formats[0];
info->curr_win = &sr030pc30_sizes[0];
ret = sr030pc30_set_params(sd);
}
if (!ret)
ret = sr030pc30_pwr_ctrl(sd, false, false);
if (!ret && !info->pdata)
return ret;
expmin = EXPOS_MIN_MS * info->pdata->clk_rate / (8 * 1000);
expmax = EXPOS_MAX_MS * info->pdata->clk_rate / (8 * 1000);
v4l2_dbg(1, debug, sd, "%s: expmin= %lx, expmax= %lx", __func__,
expmin, expmax);
/* Setting up manual exposure time range */
ret = cam_i2c_write(sd, EXP_MMINH_REG, expmin >> 8 & 0xFF);
if (!ret)
ret = cam_i2c_write(sd, EXP_MMINL_REG, expmin & 0xFF);
if (!ret)
ret = cam_i2c_write(sd, EXP_MMAXH_REG, expmax >> 16 & 0xFF);
if (!ret)
ret = cam_i2c_write(sd, EXP_MMAXM_REG, expmax >> 8 & 0xFF);
if (!ret)
ret = cam_i2c_write(sd, EXP_MMAXL_REG, expmax & 0xFF);
return ret;
}
static int sr030pc30_s_power(struct v4l2_subdev *sd, int on)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct sr030pc30_info *info = to_sr030pc30(sd);
const struct sr030pc30_platform_data *pdata = info->pdata;
int ret;
if (pdata == NULL) {
WARN(1, "No platform data!\n");
return -EINVAL;
}
/*
* Put sensor into power sleep mode before switching off
* power and disabling MCLK.
*/
if (!on)
sr030pc30_pwr_ctrl(sd, false, true);
/* set_power controls sensor's power and clock */
if (pdata->set_power) {
ret = pdata->set_power(&client->dev, on);
if (ret)
return ret;
}
if (on) {
ret = sr030pc30_base_config(sd);
} else {
ret = 0;
info->curr_win = NULL;
info->curr_fmt = NULL;
}
return ret;
}
static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
.s_power = sr030pc30_s_power,
.queryctrl = sr030pc30_queryctrl,
.s_ctrl = sr030pc30_s_ctrl,
.g_ctrl = sr030pc30_g_ctrl,
};
static const struct v4l2_subdev_video_ops sr030pc30_video_ops = {
.g_mbus_fmt = sr030pc30_g_fmt,
.s_mbus_fmt = sr030pc30_s_fmt,
.try_mbus_fmt = sr030pc30_try_fmt,
.enum_mbus_fmt = sr030pc30_enum_fmt,
};
static const struct v4l2_subdev_ops sr030pc30_ops = {
.core = &sr030pc30_core_ops,
.video = &sr030pc30_video_ops,
};
/*
* Detect sensor type. Return 0 if SR030PC30 was detected
* or -ENODEV otherwise.
*/
static int sr030pc30_detect(struct i2c_client *client)
{
const struct sr030pc30_platform_data *pdata
= client->dev.platform_data;
int ret;
/* Enable sensor's power and clock */
if (pdata->set_power) {
ret = pdata->set_power(&client->dev, 1);
if (ret)
return ret;
}
ret = i2c_smbus_read_byte_data(client, DEVICE_ID_REG);
if (pdata->set_power)
pdata->set_power(&client->dev, 0);
if (ret < 0) {
dev_err(&client->dev, "%s: I2C read failed\n", __func__);
return ret;
}
return ret == SR030PC30_ID ? 0 : -ENODEV;
}
static int sr030pc30_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct sr030pc30_info *info;
struct v4l2_subdev *sd;
const struct sr030pc30_platform_data *pdata
= client->dev.platform_data;
int ret;
if (!pdata) {
dev_err(&client->dev, "No platform data!");
return -EIO;
}
ret = sr030pc30_detect(client);
if (ret)
return ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
sd = &info->sd;
strcpy(sd->name, MODULE_NAME);
info->pdata = client->dev.platform_data;
v4l2_i2c_subdev_init(sd, client, &sr030pc30_ops);
info->i2c_reg_page = -1;
info->hflip = 1;
info->auto_exp = 1;
info->exposure = 30;
return 0;
}
static int sr030pc30_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct sr030pc30_info *info = to_sr030pc30(sd);
v4l2_device_unregister_subdev(sd);
kfree(info);
return 0;
}
static const struct i2c_device_id sr030pc30_id[] = {
{ MODULE_NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, sr030pc30_id);
static struct i2c_driver sr030pc30_i2c_driver = {
.driver = {
.name = MODULE_NAME
},
.probe = sr030pc30_probe,
.remove = sr030pc30_remove,
.id_table = sr030pc30_id,
};
module_i2c_driver(sr030pc30_i2c_driver);
MODULE_DESCRIPTION("Siliconfile SR030PC30 camera driver");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
emceethemouth/kernel_cancro | drivers/media/video/saa7164/saa7164-cards.c | 8772 | 21639 | /*
* Driver for the NXP SAA7164 PCIe bridge
*
* Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "saa7164.h"
/* The Bridge API needs to understand register widths (in bytes) for the
* attached I2C devices, so we can simplify the virtual i2c mechansms
* and keep the -i2c.c implementation clean.
*/
#define REGLEN_8bit 1
#define REGLEN_16bit 2
struct saa7164_board saa7164_boards[] = {
[SAA7164_BOARD_UNKNOWN] = {
/* Bridge will not load any firmware, without knowing
* the rev this would be fatal. */
.name = "Unknown",
},
[SAA7164_BOARD_UNKNOWN_REV2] = {
/* Bridge will load the v2 f/w and dump descriptors */
/* Required during new board bringup */
.name = "Generic Rev2",
.chiprev = SAA7164_CHIP_REV2,
},
[SAA7164_BOARD_UNKNOWN_REV3] = {
/* Bridge will load the v2 f/w and dump descriptors */
/* Required during new board bringup */
.name = "Generic Rev3",
.chiprev = SAA7164_CHIP_REV3,
},
[SAA7164_BOARD_HAUPPAUGE_HVR2200] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x1d,
.type = SAA7164_UNIT_EEPROM,
.name = "4K EEPROM",
.i2c_bus_nr = SAA7164_I2C_BUS_0,
.i2c_bus_addr = 0xa0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x04,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1b,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1e,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x10 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1f,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x12 >> 1,
.i2c_reg_len = REGLEN_8bit,
} },
},
[SAA7164_BOARD_HAUPPAUGE_HVR2200_2] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV2,
.unit = {{
.id = 0x06,
.type = SAA7164_UNIT_EEPROM,
.name = "4K EEPROM",
.i2c_bus_nr = SAA7164_I2C_BUS_0,
.i2c_bus_addr = 0xa0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x04,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x05,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x10 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1e,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1f,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x12 >> 1,
.i2c_reg_len = REGLEN_8bit,
} },
},
[SAA7164_BOARD_HAUPPAUGE_HVR2200_3] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV2,
.unit = {{
.id = 0x1d,
.type = SAA7164_UNIT_EEPROM,
.name = "4K EEPROM",
.i2c_bus_nr = SAA7164_I2C_BUS_0,
.i2c_bus_addr = 0xa0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x04,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x05,
.type = SAA7164_UNIT_ANALOG_DEMODULATOR,
.name = "TDA8290-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x84 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1b,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1c,
.type = SAA7164_UNIT_ANALOG_DEMODULATOR,
.name = "TDA8290-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x84 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1e,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x10 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1f,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x12 >> 1,
.i2c_reg_len = REGLEN_8bit,
} },
},
[SAA7164_BOARD_HAUPPAUGE_HVR2200_4] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x1d,
.type = SAA7164_UNIT_EEPROM,
.name = "4K EEPROM",
.i2c_bus_nr = SAA7164_I2C_BUS_0,
.i2c_bus_addr = 0xa0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x04,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x05,
.type = SAA7164_UNIT_ANALOG_DEMODULATOR,
.name = "TDA8290-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x84 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1b,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1c,
.type = SAA7164_UNIT_ANALOG_DEMODULATOR,
.name = "TDA8290-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x84 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1e,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x10 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1f,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x12 >> 1,
.i2c_reg_len = REGLEN_8bit,
} },
},
[SAA7164_BOARD_HAUPPAUGE_HVR2250] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x22,
.type = SAA7164_UNIT_EEPROM,
.name = "4K EEPROM",
.i2c_bus_nr = SAA7164_I2C_BUS_0,
.i2c_bus_addr = 0xa0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x04,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x07,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-1 (TOP)",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x32 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x08,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-1 (QAM)",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x34 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x1e,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x20,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-2 (TOP)",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x32 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x23,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-2 (QAM)",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x34 >> 1,
.i2c_reg_len = REGLEN_8bit,
} },
},
[SAA7164_BOARD_HAUPPAUGE_HVR2250_2] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x28,
.type = SAA7164_UNIT_EEPROM,
.name = "4K EEPROM",
.i2c_bus_nr = SAA7164_I2C_BUS_0,
.i2c_bus_addr = 0xa0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x04,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x07,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-1 (TOP)",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x32 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x08,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-1 (QAM)",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x34 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x24,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x26,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-2 (TOP)",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x32 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x29,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-2 (QAM)",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x34 >> 1,
.i2c_reg_len = REGLEN_8bit,
} },
},
[SAA7164_BOARD_HAUPPAUGE_HVR2250_3] = {
.name = "Hauppauge WinTV-HVR2250",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
.portc = SAA7164_MPEG_ENCODER,
.portd = SAA7164_MPEG_ENCODER,
.porte = SAA7164_MPEG_VBI,
.portf = SAA7164_MPEG_VBI,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x26,
.type = SAA7164_UNIT_EEPROM,
.name = "4K EEPROM",
.i2c_bus_nr = SAA7164_I2C_BUS_0,
.i2c_bus_addr = 0xa0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x04,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x07,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-1 (TOP)",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x32 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x08,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-1 (QAM)",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x34 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x22,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x24,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-2 (TOP)",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x32 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x27,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "CX24228/S5H1411-2 (QAM)",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x34 >> 1,
.i2c_reg_len = REGLEN_8bit,
} },
},
[SAA7164_BOARD_HAUPPAUGE_HVR2200_5] = {
.name = "Hauppauge WinTV-HVR2200",
.porta = SAA7164_MPEG_DVB,
.portb = SAA7164_MPEG_DVB,
.chiprev = SAA7164_CHIP_REV3,
.unit = {{
.id = 0x23,
.type = SAA7164_UNIT_EEPROM,
.name = "4K EEPROM",
.i2c_bus_nr = SAA7164_I2C_BUS_0,
.i2c_bus_addr = 0xa0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x04,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x05,
.type = SAA7164_UNIT_ANALOG_DEMODULATOR,
.name = "TDA8290-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x84 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x21,
.type = SAA7164_UNIT_TUNER,
.name = "TDA18271-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0xc0 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x22,
.type = SAA7164_UNIT_ANALOG_DEMODULATOR,
.name = "TDA8290-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x84 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x24,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-1",
.i2c_bus_nr = SAA7164_I2C_BUS_1,
.i2c_bus_addr = 0x10 >> 1,
.i2c_reg_len = REGLEN_8bit,
}, {
.id = 0x25,
.type = SAA7164_UNIT_DIGITAL_DEMODULATOR,
.name = "TDA10048-2",
.i2c_bus_nr = SAA7164_I2C_BUS_2,
.i2c_bus_addr = 0x12 >> 1,
.i2c_reg_len = REGLEN_8bit,
} },
},
};
const unsigned int saa7164_bcount = ARRAY_SIZE(saa7164_boards);
/* ------------------------------------------------------------------ */
/* PCI subsystem IDs */
struct saa7164_subid saa7164_subids[] = {
{
.subvendor = 0x0070,
.subdevice = 0x8880,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2250,
}, {
.subvendor = 0x0070,
.subdevice = 0x8810,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2250,
}, {
.subvendor = 0x0070,
.subdevice = 0x8980,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2200,
}, {
.subvendor = 0x0070,
.subdevice = 0x8900,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2200_2,
}, {
.subvendor = 0x0070,
.subdevice = 0x8901,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2200_3,
}, {
.subvendor = 0x0070,
.subdevice = 0x88A1,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2250_3,
}, {
.subvendor = 0x0070,
.subdevice = 0x8891,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2250_2,
}, {
.subvendor = 0x0070,
.subdevice = 0x8851,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2250_2,
}, {
.subvendor = 0x0070,
.subdevice = 0x8940,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2200_4,
}, {
.subvendor = 0x0070,
.subdevice = 0x8953,
.card = SAA7164_BOARD_HAUPPAUGE_HVR2200_5,
},
};
const unsigned int saa7164_idcount = ARRAY_SIZE(saa7164_subids);
void saa7164_card_list(struct saa7164_dev *dev)
{
int i;
if (0 == dev->pci->subsystem_vendor &&
0 == dev->pci->subsystem_device) {
printk(KERN_ERR
"%s: Board has no valid PCIe Subsystem ID and can't\n"
"%s: be autodetected. Pass card=<n> insmod option to\n"
"%s: workaround that. Send complaints to the vendor\n"
"%s: of the TV card. Best regards,\n"
"%s: -- tux\n",
dev->name, dev->name, dev->name, dev->name, dev->name);
} else {
printk(KERN_ERR
"%s: Your board isn't known (yet) to the driver.\n"
"%s: Try to pick one of the existing card configs via\n"
"%s: card=<n> insmod option. Updating to the latest\n"
"%s: version might help as well.\n",
dev->name, dev->name, dev->name, dev->name);
}
printk(KERN_ERR "%s: Here are valid choices for the card=<n> insmod "
"option:\n", dev->name);
for (i = 0; i < saa7164_bcount; i++)
printk(KERN_ERR "%s: card=%d -> %s\n",
dev->name, i, saa7164_boards[i].name);
}
/* TODO: clean this define up into the -cards.c structs */
#define PCIEBRIDGE_UNITID 2
void saa7164_gpio_setup(struct saa7164_dev *dev)
{
switch (dev->board) {
case SAA7164_BOARD_HAUPPAUGE_HVR2200:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_3:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_4:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_5:
case SAA7164_BOARD_HAUPPAUGE_HVR2250:
case SAA7164_BOARD_HAUPPAUGE_HVR2250_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2250_3:
/*
GPIO 2: s5h1411 / tda10048-1 demod reset
GPIO 3: s5h1411 / tda10048-2 demod reset
GPIO 7: IRBlaster Zilog reset
*/
/* Reset parts by going in and out of reset */
saa7164_api_clear_gpiobit(dev, PCIEBRIDGE_UNITID, 2);
saa7164_api_clear_gpiobit(dev, PCIEBRIDGE_UNITID, 3);
msleep(20);
saa7164_api_set_gpiobit(dev, PCIEBRIDGE_UNITID, 2);
saa7164_api_set_gpiobit(dev, PCIEBRIDGE_UNITID, 3);
break;
}
}
static void hauppauge_eeprom(struct saa7164_dev *dev, u8 *eeprom_data)
{
struct tveeprom tv;
/* TODO: Assumption: eeprom on bus 0 */
tveeprom_hauppauge_analog(&dev->i2c_bus[0].i2c_client, &tv,
eeprom_data);
/* Make sure we support the board model */
switch (tv.model) {
case 88001:
/* Development board - Limit circulation */
/* WinTV-HVR2250 (PCIe, Retail, full-height bracket)
* ATSC/QAM (TDA18271/S5H1411) and basic analog, no IR, FM */
case 88021:
/* WinTV-HVR2250 (PCIe, Retail, full-height bracket)
* ATSC/QAM (TDA18271/S5H1411) and basic analog, MCE CIR, FM */
break;
case 88041:
/* WinTV-HVR2250 (PCIe, Retail, full-height bracket)
* ATSC/QAM (TDA18271/S5H1411) and basic analog, no IR, FM */
break;
case 88061:
/* WinTV-HVR2250 (PCIe, Retail, full-height bracket)
* ATSC/QAM (TDA18271/S5H1411) and basic analog, FM */
break;
case 89519:
case 89609:
/* WinTV-HVR2200 (PCIe, Retail, full-height)
* DVB-T (TDA18271/TDA10048) and basic analog, no IR */
break;
case 89619:
/* WinTV-HVR2200 (PCIe, Retail, half-height)
* DVB-T (TDA18271/TDA10048) and basic analog, no IR */
break;
default:
printk(KERN_ERR "%s: Warning: Unknown Hauppauge model #%d\n",
dev->name, tv.model);
break;
}
printk(KERN_INFO "%s: Hauppauge eeprom: model=%d\n", dev->name,
tv.model);
}
void saa7164_card_setup(struct saa7164_dev *dev)
{
static u8 eeprom[256];
if (dev->i2c_bus[0].i2c_rc == 0) {
if (saa7164_api_read_eeprom(dev, &eeprom[0],
sizeof(eeprom)) < 0)
return;
}
switch (dev->board) {
case SAA7164_BOARD_HAUPPAUGE_HVR2200:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_3:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_4:
case SAA7164_BOARD_HAUPPAUGE_HVR2200_5:
case SAA7164_BOARD_HAUPPAUGE_HVR2250:
case SAA7164_BOARD_HAUPPAUGE_HVR2250_2:
case SAA7164_BOARD_HAUPPAUGE_HVR2250_3:
hauppauge_eeprom(dev, &eeprom[0]);
break;
}
}
/* With most other drivers, the kernel expects to communicate with subdrivers
* through i2c. This bridge does not allow that, it does not expose any direct
* access to I2C. Instead we have to communicate through the device f/w for
* register access to 'processing units'. Each unit has a unique
* id, regardless of how the physical implementation occurs across
* the three physical i2c busses. The being said if we want leverge of
* the existing kernel drivers for tuners and demods we have to 'speak i2c',
* to this bridge implements 3 virtual i2c buses. This is a helper function
* for those.
*
* Description: Translate the kernels notion of an i2c address and bus into
* the appropriate unitid.
*/
int saa7164_i2caddr_to_unitid(struct saa7164_i2c *bus, int addr)
{
/* For a given bus and i2c device address, return the saa7164 unique
* unitid. < 0 on error */
struct saa7164_dev *dev = bus->dev;
struct saa7164_unit *unit;
int i;
for (i = 0; i < SAA7164_MAX_UNITS; i++) {
unit = &saa7164_boards[dev->board].unit[i];
if (unit->type == SAA7164_UNIT_UNDEFINED)
continue;
if ((bus->nr == unit->i2c_bus_nr) &&
(addr == unit->i2c_bus_addr))
return unit->id;
}
return -1;
}
/* The 7164 API needs to know the i2c register length in advance.
* this is a helper function. Based on a specific chip addr and bus return the
* reg length.
*/
int saa7164_i2caddr_to_reglen(struct saa7164_i2c *bus, int addr)
{
/* For a given bus and i2c device address, return the
* saa7164 registry address width. < 0 on error
*/
struct saa7164_dev *dev = bus->dev;
struct saa7164_unit *unit;
int i;
for (i = 0; i < SAA7164_MAX_UNITS; i++) {
unit = &saa7164_boards[dev->board].unit[i];
if (unit->type == SAA7164_UNIT_UNDEFINED)
continue;
if ((bus->nr == unit->i2c_bus_nr) &&
(addr == unit->i2c_bus_addr))
return unit->i2c_reg_len;
}
return -1;
}
/* TODO: implement a 'findeeprom' functio like the above and fix any other
* eeprom related todo's in -api.c.
*/
/* Translate a unitid into a x readable device name, for display purposes. */
char *saa7164_unitid_name(struct saa7164_dev *dev, u8 unitid)
{
char *undefed = "UNDEFINED";
char *bridge = "BRIDGE";
struct saa7164_unit *unit;
int i;
if (unitid == 0)
return bridge;
for (i = 0; i < SAA7164_MAX_UNITS; i++) {
unit = &saa7164_boards[dev->board].unit[i];
if (unit->type == SAA7164_UNIT_UNDEFINED)
continue;
if (unitid == unit->id)
return unit->name;
}
return undefed;
}
| gpl-2.0 |
drewx2/android_kernel_htc_dlx | virt/drivers/video/cfbcopyarea.c | 9028 | 11287 | /*
* Generic function for frame buffer with packed pixels of any depth.
*
* Copyright (C) 1999-2005 James Simmons <jsimmons@www.infradead.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* NOTES:
*
* This is for cfb packed pixels. Iplan and such are incorporated in the
* drivers that need them.
*
* FIXME
*
* Also need to add code to deal with cards endians that are different than
* the native cpu endians. I also need to deal with MSB position in the word.
*
* The two functions or copying forward and backward could be split up like
* the ones for filling, i.e. in aligned and unaligned versions. This would
* help moving some redundant computations and branches out of the loop, too.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <asm/types.h>
#include <asm/io.h>
#include "fb_draw.h"
#if BITS_PER_LONG == 32
# define FB_WRITEL fb_writel
# define FB_READL fb_readl
#else
# define FB_WRITEL fb_writeq
# define FB_READL fb_readq
#endif
/*
* Generic bitwise copy algorithm
*/
static void
bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
const unsigned long __iomem *src, int src_idx, int bits,
unsigned n, u32 bswapmask)
{
unsigned long first, last;
int const shift = dst_idx-src_idx;
int left, right;
first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
if (!shift) {
// Same alignment for source and dest
if (dst_idx+n <= bits) {
// Single word
if (last)
first &= last;
FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
} else {
// Multiple destination words
// Leading bits
if (first != ~0UL) {
FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
dst++;
src++;
n -= bits - dst_idx;
}
// Main chunk
n /= bits;
while (n >= 8) {
FB_WRITEL(FB_READL(src++), dst++);
FB_WRITEL(FB_READL(src++), dst++);
FB_WRITEL(FB_READL(src++), dst++);
FB_WRITEL(FB_READL(src++), dst++);
FB_WRITEL(FB_READL(src++), dst++);
FB_WRITEL(FB_READL(src++), dst++);
FB_WRITEL(FB_READL(src++), dst++);
FB_WRITEL(FB_READL(src++), dst++);
n -= 8;
}
while (n--)
FB_WRITEL(FB_READL(src++), dst++);
// Trailing bits
if (last)
FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
}
} else {
/* Different alignment for source and dest */
unsigned long d0, d1;
int m;
right = shift & (bits - 1);
left = -shift & (bits - 1);
bswapmask &= shift;
if (dst_idx+n <= bits) {
// Single destination word
if (last)
first &= last;
d0 = FB_READL(src);
d0 = fb_rev_pixels_in_long(d0, bswapmask);
if (shift > 0) {
// Single source word
d0 >>= right;
} else if (src_idx+n <= bits) {
// Single source word
d0 <<= left;
} else {
// 2 source words
d1 = FB_READL(src + 1);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
d0 = d0<<left | d1>>right;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
} else {
// Multiple destination words
/** We must always remember the last value read, because in case
SRC and DST overlap bitwise (e.g. when moving just one pixel in
1bpp), we always collect one full long for DST and that might
overlap with the current long from SRC. We store this value in
'd0'. */
d0 = FB_READL(src++);
d0 = fb_rev_pixels_in_long(d0, bswapmask);
// Leading bits
if (shift > 0) {
// Single source word
d1 = d0;
d0 >>= right;
dst++;
n -= bits - dst_idx;
} else {
// 2 source words
d1 = FB_READL(src++);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
d0 = d0<<left | d1>>right;
dst++;
n -= bits - dst_idx;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
d0 = d1;
// Main chunk
m = n % bits;
n /= bits;
while ((n >= 4) && !bswapmask) {
d1 = FB_READL(src++);
FB_WRITEL(d0 << left | d1 >> right, dst++);
d0 = d1;
d1 = FB_READL(src++);
FB_WRITEL(d0 << left | d1 >> right, dst++);
d0 = d1;
d1 = FB_READL(src++);
FB_WRITEL(d0 << left | d1 >> right, dst++);
d0 = d1;
d1 = FB_READL(src++);
FB_WRITEL(d0 << left | d1 >> right, dst++);
d0 = d1;
n -= 4;
}
while (n--) {
d1 = FB_READL(src++);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
d0 = d0 << left | d1 >> right;
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(d0, dst++);
d0 = d1;
}
// Trailing bits
if (last) {
if (m <= right) {
// Single source word
d0 <<= left;
} else {
// 2 source words
d1 = FB_READL(src);
d1 = fb_rev_pixels_in_long(d1,
bswapmask);
d0 = d0<<left | d1>>right;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
}
}
}
}
/*
* Generic bitwise copy algorithm, operating backward
*/
static void
bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
const unsigned long __iomem *src, int src_idx, int bits,
unsigned n, u32 bswapmask)
{
unsigned long first, last;
int shift;
dst += (n-1)/bits;
src += (n-1)/bits;
if ((n-1) % bits) {
dst_idx += (n-1) % bits;
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= bits - 1;
src_idx += (n-1) % bits;
src += src_idx >> (ffs(bits) - 1);
src_idx &= bits - 1;
}
shift = dst_idx-src_idx;
first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask);
last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits),
bswapmask);
if (!shift) {
// Same alignment for source and dest
if ((unsigned long)dst_idx+1 >= n) {
// Single word
if (last)
first &= last;
FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
} else {
// Multiple destination words
// Leading bits
if (first != ~0UL) {
FB_WRITEL( comp( FB_READL(src), FB_READL(dst), first), dst);
dst--;
src--;
n -= dst_idx+1;
}
// Main chunk
n /= bits;
while (n >= 8) {
FB_WRITEL(FB_READL(src--), dst--);
FB_WRITEL(FB_READL(src--), dst--);
FB_WRITEL(FB_READL(src--), dst--);
FB_WRITEL(FB_READL(src--), dst--);
FB_WRITEL(FB_READL(src--), dst--);
FB_WRITEL(FB_READL(src--), dst--);
FB_WRITEL(FB_READL(src--), dst--);
FB_WRITEL(FB_READL(src--), dst--);
n -= 8;
}
while (n--)
FB_WRITEL(FB_READL(src--), dst--);
// Trailing bits
if (last)
FB_WRITEL( comp( FB_READL(src), FB_READL(dst), last), dst);
}
} else {
// Different alignment for source and dest
unsigned long d0, d1;
int m;
int const left = -shift & (bits-1);
int const right = shift & (bits-1);
bswapmask &= shift;
if ((unsigned long)dst_idx+1 >= n) {
// Single destination word
if (last)
first &= last;
d0 = FB_READL(src);
if (shift < 0) {
// Single source word
d0 <<= left;
} else if (1+(unsigned long)src_idx >= n) {
// Single source word
d0 >>= right;
} else {
// 2 source words
d1 = FB_READL(src - 1);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
d0 = d0>>right | d1<<left;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
} else {
// Multiple destination words
/** We must always remember the last value read, because in case
SRC and DST overlap bitwise (e.g. when moving just one pixel in
1bpp), we always collect one full long for DST and that might
overlap with the current long from SRC. We store this value in
'd0'. */
d0 = FB_READL(src--);
d0 = fb_rev_pixels_in_long(d0, bswapmask);
// Leading bits
if (shift < 0) {
// Single source word
d1 = d0;
d0 <<= left;
} else {
// 2 source words
d1 = FB_READL(src--);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
d0 = d0>>right | d1<<left;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
d0 = d1;
dst--;
n -= dst_idx+1;
// Main chunk
m = n % bits;
n /= bits;
while ((n >= 4) && !bswapmask) {
d1 = FB_READL(src--);
FB_WRITEL(d0 >> right | d1 << left, dst--);
d0 = d1;
d1 = FB_READL(src--);
FB_WRITEL(d0 >> right | d1 << left, dst--);
d0 = d1;
d1 = FB_READL(src--);
FB_WRITEL(d0 >> right | d1 << left, dst--);
d0 = d1;
d1 = FB_READL(src--);
FB_WRITEL(d0 >> right | d1 << left, dst--);
d0 = d1;
n -= 4;
}
while (n--) {
d1 = FB_READL(src--);
d1 = fb_rev_pixels_in_long(d1, bswapmask);
d0 = d0 >> right | d1 << left;
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(d0, dst--);
d0 = d1;
}
// Trailing bits
if (last) {
if (m <= left) {
// Single source word
d0 >>= right;
} else {
// 2 source words
d1 = FB_READL(src);
d1 = fb_rev_pixels_in_long(d1,
bswapmask);
d0 = d0>>right | d1<<left;
}
d0 = fb_rev_pixels_in_long(d0, bswapmask);
FB_WRITEL(comp(d0, FB_READL(dst), last), dst);
}
}
}
}
void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
{
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 height = area->height, width = area->width;
unsigned long const bits_per_line = p->fix.line_length*8u;
unsigned long __iomem *dst = NULL, *src = NULL;
int bits = BITS_PER_LONG, bytes = bits >> 3;
int dst_idx = 0, src_idx = 0, rev_copy = 0;
u32 bswapmask = fb_compute_bswapmask(p);
if (p->state != FBINFO_STATE_RUNNING)
return;
/* if the beginning of the target area might overlap with the end of
the source area, be have to copy the area reverse. */
if ((dy == sy && dx > sx) || (dy > sy)) {
dy += height;
sy += height;
rev_copy = 1;
}
// split the base of the framebuffer into a long-aligned address and the
// index of the first bit
dst = src = (unsigned long __iomem *)((unsigned long)p->screen_base & ~(bytes-1));
dst_idx = src_idx = 8*((unsigned long)p->screen_base & (bytes-1));
// add offset of source and target area
dst_idx += dy*bits_per_line + dx*p->var.bits_per_pixel;
src_idx += sy*bits_per_line + sx*p->var.bits_per_pixel;
if (p->fbops->fb_sync)
p->fbops->fb_sync(p);
if (rev_copy) {
while (height--) {
dst_idx -= bits_per_line;
src_idx -= bits_per_line;
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bytes - 1);
src += src_idx >> (ffs(bits) - 1);
src_idx &= (bytes - 1);
bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
width*p->var.bits_per_pixel, bswapmask);
}
} else {
while (height--) {
dst += dst_idx >> (ffs(bits) - 1);
dst_idx &= (bytes - 1);
src += src_idx >> (ffs(bits) - 1);
src_idx &= (bytes - 1);
bitcpy(p, dst, dst_idx, src, src_idx, bits,
width*p->var.bits_per_pixel, bswapmask);
dst_idx += bits_per_line;
src_idx += bits_per_line;
}
}
}
EXPORT_SYMBOL(cfb_copyarea);
MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
MODULE_DESCRIPTION("Generic software accelerated copyarea");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Hashcode/android_kernel_samsung-jf-common | drivers/tty/n_tracesink.c | 12100 | 7253 | /*
* n_tracesink.c - Trace data router and sink path through tty space.
*
* Copyright (C) Intel 2011
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* The trace sink uses the Linux line discipline framework to receive
* trace data coming from the PTI source line discipline driver
* to a user-desired tty port, like USB.
* This is to provide a way to extract modem trace data on
* devices that do not have a PTI HW module, or just need modem
* trace data to come out of a different HW output port.
* This is part of a solution for the P1149.7, compact JTAG, standard.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/ioctl.h>
#include <linux/tty.h>
#include <linux/tty_ldisc.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <asm-generic/bug.h>
#include "n_tracesink.h"
/*
* Other ldisc drivers use 65536 which basically means,
* 'I can always accept 64k' and flow control is off.
* This number is deemed appropriate for this driver.
*/
#define RECEIVE_ROOM 65536
#define DRIVERNAME "n_tracesink"
/*
* there is a quirk with this ldisc is he can write data
* to a tty from anyone calling his kernel API, which
* meets customer requirements in the drivers/misc/pti.c
* project. So he needs to know when he can and cannot write when
* the API is called. In theory, the API can be called
* after an init() but before a successful open() which
* would crash the system if tty is not checked.
*/
static struct tty_struct *this_tty;
static DEFINE_MUTEX(writelock);
/**
* n_tracesink_open() - Called when a tty is opened by a SW entity.
* @tty: terminal device to the ldisc.
*
* Return:
* 0 for success,
* -EFAULT = couldn't get a tty kref n_tracesink will sit
* on top of
* -EEXIST = open() called successfully once and it cannot
* be called again.
*
* Caveats: open() should only be successful the first time a
* SW entity calls it.
*/
static int n_tracesink_open(struct tty_struct *tty)
{
int retval = -EEXIST;
mutex_lock(&writelock);
if (this_tty == NULL) {
this_tty = tty_kref_get(tty);
if (this_tty == NULL) {
retval = -EFAULT;
} else {
tty->disc_data = this_tty;
tty_driver_flush_buffer(tty);
retval = 0;
}
}
mutex_unlock(&writelock);
return retval;
}
/**
* n_tracesink_close() - close connection
* @tty: terminal device to the ldisc.
*
* Called when a software entity wants to close a connection.
*/
static void n_tracesink_close(struct tty_struct *tty)
{
mutex_lock(&writelock);
tty_driver_flush_buffer(tty);
tty_kref_put(this_tty);
this_tty = NULL;
tty->disc_data = NULL;
mutex_unlock(&writelock);
}
/**
* n_tracesink_read() - read request from user space
* @tty: terminal device passed into the ldisc.
* @file: pointer to open file object.
* @buf: pointer to the data buffer that gets eventually returned.
* @nr: number of bytes of the data buffer that is returned.
*
* function that allows read() functionality in userspace. By default if this
* is not implemented it returns -EIO. This module is functioning like a
* router via n_tracesink_receivebuf(), and there is no real requirement
* to implement this function. However, an error return value other than
* -EIO should be used just to show that there was an intent not to have
* this function implemented. Return value based on read() man pages.
*
* Return:
* -EINVAL
*/
static ssize_t n_tracesink_read(struct tty_struct *tty, struct file *file,
unsigned char __user *buf, size_t nr) {
return -EINVAL;
}
/**
* n_tracesink_write() - Function that allows write() in userspace.
* @tty: terminal device passed into the ldisc.
* @file: pointer to open file object.
* @buf: pointer to the data buffer that gets eventually returned.
* @nr: number of bytes of the data buffer that is returned.
*
* By default if this is not implemented, it returns -EIO.
* This should not be implemented, ever, because
* 1. this driver is functioning like a router via
* n_tracesink_receivebuf()
* 2. No writes to HW will ever go through this line discpline driver.
* However, an error return value other than -EIO should be used
* just to show that there was an intent not to have this function
* implemented. Return value based on write() man pages.
*
* Return:
* -EINVAL
*/
static ssize_t n_tracesink_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr) {
return -EINVAL;
}
/**
* n_tracesink_datadrain() - Kernel API function used to route
* trace debugging data to user-defined
* port like USB.
*
* @buf: Trace debuging data buffer to write to tty target
* port. Null value will return with no write occurring.
* @count: Size of buf. Value of 0 or a negative number will
* return with no write occuring.
*
* Caveat: If this line discipline does not set the tty it sits
* on top of via an open() call, this API function will not
* call the tty's write() call because it will have no pointer
* to call the write().
*/
void n_tracesink_datadrain(u8 *buf, int count)
{
mutex_lock(&writelock);
if ((buf != NULL) && (count > 0) && (this_tty != NULL))
this_tty->ops->write(this_tty, buf, count);
mutex_unlock(&writelock);
}
EXPORT_SYMBOL_GPL(n_tracesink_datadrain);
/*
* Flush buffer is not impelemented as the ldisc has no internal buffering
* so the tty_driver_flush_buffer() is sufficient for this driver's needs.
*/
/*
* tty_ldisc function operations for this driver.
*/
static struct tty_ldisc_ops tty_n_tracesink = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
.name = DRIVERNAME,
.open = n_tracesink_open,
.close = n_tracesink_close,
.read = n_tracesink_read,
.write = n_tracesink_write
};
/**
* n_tracesink_init- module initialisation
*
* Registers this module as a line discipline driver.
*
* Return:
* 0 for success, any other value error.
*/
static int __init n_tracesink_init(void)
{
/* Note N_TRACESINK is defined in linux/tty.h */
int retval = tty_register_ldisc(N_TRACESINK, &tty_n_tracesink);
if (retval < 0)
pr_err("%s: Registration failed: %d\n", __func__, retval);
return retval;
}
/**
* n_tracesink_exit - module unload
*
* Removes this module as a line discipline driver.
*/
static void __exit n_tracesink_exit(void)
{
int retval = tty_unregister_ldisc(N_TRACESINK);
if (retval < 0)
pr_err("%s: Unregistration failed: %d\n", __func__, retval);
}
module_init(n_tracesink_init);
module_exit(n_tracesink_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jay Freyensee");
MODULE_ALIAS_LDISC(N_TRACESINK);
MODULE_DESCRIPTION("Trace sink ldisc driver");
| gpl-2.0 |
LordNerevar/kernel_htc_pyramid | fs/nfs/callback.c | 325 | 10292 | /*
* linux/fs/nfs/callback.c
*
* Copyright (C) 2004 Trond Myklebust
*
* NFSv4 callback handling
*/
#include <linux/completion.h>
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/svcsock.h>
#include <linux/nfs_fs.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/bc_xprt.h>
#include <net/inet_sock.h>
#include "nfs4_fs.h"
#include "callback.h"
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_CALLBACK
struct nfs_callback_data {
unsigned int users;
struct svc_serv *serv;
struct svc_rqst *rqst;
struct task_struct *task;
};
static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
static DEFINE_MUTEX(nfs_callback_mutex);
static struct svc_program nfs4_callback_program;
unsigned int nfs_callback_set_tcpport;
unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
#define NFS_CALLBACK_MAXPORTNR (65535U)
static int param_set_portnr(const char *val, const struct kernel_param *kp)
{
unsigned long num;
int ret;
if (!val)
return -EINVAL;
ret = strict_strtoul(val, 0, &num);
if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
return -EINVAL;
*((unsigned int *)kp->arg) = num;
return 0;
}
static struct kernel_param_ops param_ops_portnr = {
.set = param_set_portnr,
.get = param_get_uint,
};
#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
/*
* This is the NFSv4 callback kernel thread.
*/
static int
nfs4_callback_svc(void *vrqstp)
{
int err, preverr = 0;
struct svc_rqst *rqstp = vrqstp;
set_freezable();
while (!kthread_should_stop()) {
/*
* Listen for a request on the socket
*/
err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
if (err == -EAGAIN || err == -EINTR) {
preverr = err;
continue;
}
if (err < 0) {
if (err != preverr) {
printk(KERN_WARNING "NFS: %s: unexpected error "
"from svc_recv (%d)\n", __func__, err);
preverr = err;
}
schedule_timeout_uninterruptible(HZ);
continue;
}
preverr = err;
svc_process(rqstp);
}
return 0;
}
/*
* Prepare to bring up the NFSv4 callback service
*/
static struct svc_rqst *
nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
{
int ret;
ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
if (ret <= 0)
goto out_err;
nfs_callback_tcpport = ret;
dprintk("NFS: Callback listener port = %u (af %u)\n",
nfs_callback_tcpport, PF_INET);
ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
if (ret > 0) {
nfs_callback_tcpport6 = ret;
dprintk("NFS: Callback listener port = %u (af %u)\n",
nfs_callback_tcpport6, PF_INET6);
} else if (ret == -EAFNOSUPPORT)
ret = 0;
else
goto out_err;
return svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
out_err:
if (ret == 0)
ret = -ENOMEM;
return ERR_PTR(ret);
}
#if defined(CONFIG_NFS_V4_1)
/*
* The callback service for NFSv4.1 callbacks
*/
static int
nfs41_callback_svc(void *vrqstp)
{
struct svc_rqst *rqstp = vrqstp;
struct svc_serv *serv = rqstp->rq_server;
struct rpc_rqst *req;
int error;
DEFINE_WAIT(wq);
set_freezable();
while (!kthread_should_stop()) {
prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
if (!list_empty(&serv->sv_cb_list)) {
req = list_first_entry(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list);
list_del(&req->rq_bc_list);
spin_unlock_bh(&serv->sv_cb_lock);
finish_wait(&serv->sv_cb_waitq, &wq);
dprintk("Invoking bc_svc_process()\n");
error = bc_svc_process(serv, req, rqstp);
dprintk("bc_svc_process() returned w/ error code= %d\n",
error);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
schedule();
finish_wait(&serv->sv_cb_waitq, &wq);
}
flush_signals(current);
}
return 0;
}
/*
* Bring up the NFSv4.1 callback service
*/
static struct svc_rqst *
nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
{
struct svc_rqst *rqstp;
int ret;
/*
* Create an svc_sock for the back channel service that shares the
* fore channel connection.
* Returns the input port (0) and sets the svc_serv bc_xprt on success
*/
ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
SVC_SOCK_ANONYMOUS);
if (ret < 0) {
rqstp = ERR_PTR(ret);
goto out;
}
/*
* Save the svc_serv in the transport so that it can
* be referenced when the session backchannel is initialized
*/
xprt->bc_serv = serv;
INIT_LIST_HEAD(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
init_waitqueue_head(&serv->sv_cb_waitq);
rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
if (IS_ERR(rqstp)) {
svc_xprt_put(serv->sv_bc_xprt);
serv->sv_bc_xprt = NULL;
}
out:
dprintk("--> %s return %ld\n", __func__,
IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
return rqstp;
}
static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
struct svc_serv *serv, struct rpc_xprt *xprt,
struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
{
if (minorversion) {
*rqstpp = nfs41_callback_up(serv, xprt);
*callback_svc = nfs41_callback_svc;
}
return minorversion;
}
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct nfs_callback_data *cb_info)
{
if (minorversion)
xprt->bc_serv = cb_info->serv;
}
#else
static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
struct svc_serv *serv, struct rpc_xprt *xprt,
struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
{
return 0;
}
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct nfs_callback_data *cb_info)
{
}
#endif /* CONFIG_NFS_V4_1 */
/*
* Bring up the callback thread if it is not already up.
*/
int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
{
struct svc_serv *serv = NULL;
struct svc_rqst *rqstp;
int (*callback_svc)(void *vrqstp);
struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
char svc_name[12];
int ret = 0;
int minorversion_setup;
struct net *net = &init_net;
mutex_lock(&nfs_callback_mutex);
if (cb_info->users++ || cb_info->task != NULL) {
nfs_callback_bc_serv(minorversion, xprt, cb_info);
goto out;
}
serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
if (!serv) {
ret = -ENOMEM;
goto out_err;
}
ret = svc_bind(serv, net);
if (ret < 0) {
printk(KERN_WARNING "NFS: bind callback service failed\n");
goto out_err;
}
minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion,
serv, xprt, &rqstp, &callback_svc);
if (!minorversion_setup) {
/* v4.0 callback setup */
rqstp = nfs4_callback_up(serv, xprt);
callback_svc = nfs4_callback_svc;
}
if (IS_ERR(rqstp)) {
ret = PTR_ERR(rqstp);
goto out_err;
}
svc_sock_update_bufs(serv);
sprintf(svc_name, "nfsv4.%u-svc", minorversion);
cb_info->serv = serv;
cb_info->rqst = rqstp;
cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
if (IS_ERR(cb_info->task)) {
ret = PTR_ERR(cb_info->task);
svc_exit_thread(cb_info->rqst);
cb_info->rqst = NULL;
cb_info->task = NULL;
goto out_err;
}
out:
/*
* svc_create creates the svc_serv with sv_nrthreads == 1, and then
* svc_prepare_thread increments that. So we need to call svc_destroy
* on both success and failure so that the refcount is 1 when the
* thread exits.
*/
if (serv)
svc_destroy(serv);
mutex_unlock(&nfs_callback_mutex);
return ret;
out_err:
dprintk("NFS: Couldn't create callback socket or server thread; "
"err = %d\n", ret);
cb_info->users--;
if (serv)
svc_shutdown_net(serv, net);
goto out;
}
/*
* Kill the callback thread if it's no longer being used.
*/
void nfs_callback_down(int minorversion)
{
struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
mutex_lock(&nfs_callback_mutex);
cb_info->users--;
if (cb_info->users == 0 && cb_info->task != NULL) {
kthread_stop(cb_info->task);
svc_shutdown_net(cb_info->serv, &init_net);
svc_exit_thread(cb_info->rqst);
cb_info->serv = NULL;
cb_info->rqst = NULL;
cb_info->task = NULL;
}
mutex_unlock(&nfs_callback_mutex);
}
/* Boolean check of RPC_AUTH_GSS principal */
int
check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
{
char *p = svc_gss_principal(rqstp);
if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
return 1;
/* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
if (clp->cl_minorversion != 0)
return 0;
/*
* It might just be a normal user principal, in which case
* userspace won't bother to tell us the name at all.
*/
if (p == NULL)
return 0;
/* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */
if (memcmp(p, "nfs@", 4) != 0)
return 0;
p += 4;
if (strcmp(p, clp->cl_hostname) != 0)
return 0;
return 1;
}
/*
* pg_authenticate method for nfsv4 callback threads.
*
* The authflavor has been negotiated, so an incorrect flavor is a server
* bug. Drop packets with incorrect authflavor.
*
* All other checking done after NFS decoding where the nfs_client can be
* found in nfs4_callback_compound
*/
static int nfs_callback_authenticate(struct svc_rqst *rqstp)
{
switch (rqstp->rq_authop->flavour) {
case RPC_AUTH_NULL:
if (rqstp->rq_proc != CB_NULL)
return SVC_DROP;
break;
case RPC_AUTH_GSS:
/* No RPC_AUTH_GSS support yet in NFSv4.1 */
if (svc_is_backchannel(rqstp))
return SVC_DROP;
}
return SVC_OK;
}
/*
* Define NFS4 callback program
*/
static struct svc_version *nfs4_callback_version[] = {
[1] = &nfs4_callback_version1,
[4] = &nfs4_callback_version4,
};
static struct svc_stat nfs4_callback_stats;
static struct svc_program nfs4_callback_program = {
.pg_prog = NFS4_CALLBACK, /* RPC service number */
.pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */
.pg_vers = nfs4_callback_version, /* version table */
.pg_name = "NFSv4 callback", /* service name */
.pg_class = "nfs", /* authentication class */
.pg_stats = &nfs4_callback_stats,
.pg_authenticate = nfs_callback_authenticate,
};
| gpl-2.0 |
wbrambley/Grace-kernel | drivers/gpu/drm/radeon/rs400.c | 2117 | 15028 | /*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "rs400d.h"
/* This files gather functions specifics to : rs400,rs480 */
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
void rs400_gart_adjust_size(struct radeon_device *rdev)
{
/* Check gart size */
switch (rdev->mc.gtt_size/(1024*1024)) {
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
break;
default:
DRM_ERROR("Unable to use IGP GART size %uM\n",
(unsigned)(rdev->mc.gtt_size >> 20));
DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n");
DRM_ERROR("Forcing to 32M GART size\n");
rdev->mc.gtt_size = 32 * 1024 * 1024;
return;
}
}
void rs400_gart_tlb_flush(struct radeon_device *rdev)
{
uint32_t tmp;
unsigned int timeout = rdev->usec_timeout;
WREG32_MC(RS480_GART_CACHE_CNTRL, RS480_GART_CACHE_INVALIDATE);
do {
tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
if ((tmp & RS480_GART_CACHE_INVALIDATE) == 0)
break;
DRM_UDELAY(1);
timeout--;
} while (timeout > 0);
WREG32_MC(RS480_GART_CACHE_CNTRL, 0);
}
int rs400_gart_init(struct radeon_device *rdev)
{
int r;
if (rdev->gart.table.ram.ptr) {
WARN(1, "RS400 GART already initialized\n");
return 0;
}
/* Check gart size */
switch(rdev->mc.gtt_size / (1024 * 1024)) {
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
break;
default:
return -EINVAL;
}
/* Initialize common gart structure */
r = radeon_gart_init(rdev);
if (r)
return r;
if (rs400_debugfs_pcie_gart_info_init(rdev))
DRM_ERROR("Failed to register debugfs file for RS400 GART !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
return radeon_gart_table_ram_alloc(rdev);
}
int rs400_gart_enable(struct radeon_device *rdev)
{
uint32_t size_reg;
uint32_t tmp;
radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
/* Check gart size */
switch(rdev->mc.gtt_size / (1024 * 1024)) {
case 32:
size_reg = RS480_VA_SIZE_32MB;
break;
case 64:
size_reg = RS480_VA_SIZE_64MB;
break;
case 128:
size_reg = RS480_VA_SIZE_128MB;
break;
case 256:
size_reg = RS480_VA_SIZE_256MB;
break;
case 512:
size_reg = RS480_VA_SIZE_512MB;
break;
case 1024:
size_reg = RS480_VA_SIZE_1GB;
break;
case 2048:
size_reg = RS480_VA_SIZE_2GB;
break;
default:
return -EINVAL;
}
/* It should be fine to program it to max value */
if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF);
WREG32_MC(RS690_MCCFG_AGP_BASE_2, 0);
} else {
WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
WREG32(RS480_AGP_BASE_2, 0);
}
tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
} else {
WREG32(RADEON_MC_AGP_LOCATION, tmp);
tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
}
/* Table should be in 32bits address space so ignore bits above. */
tmp = (u32)rdev->gart.table_addr & 0xfffff000;
tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4;
WREG32_MC(RS480_GART_BASE, tmp);
/* TODO: more tweaking here */
WREG32_MC(RS480_GART_FEATURE_ID,
(RS480_TLB_ENABLE |
RS480_GTW_LAC_EN | RS480_1LEVEL_GART));
/* Disable snooping */
WREG32_MC(RS480_AGP_MODE_CNTL,
(1 << RS480_REQ_TYPE_SNOOP_SHIFT) | RS480_REQ_TYPE_SNOOP_DIS);
/* Disable AGP mode */
/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
* AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
WREG32_MC(RS480_MC_MISC_CNTL,
(RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
} else {
WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
}
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
rs400_gart_tlb_flush(rdev);
rdev->gart.ready = true;
return 0;
}
void rs400_gart_disable(struct radeon_device *rdev)
{
uint32_t tmp;
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
}
void rs400_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rs400_gart_disable(rdev);
radeon_gart_table_ram_free(rdev);
}
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
rdev->gart.table.ram.ptr[i] = entry;
return 0;
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32(RADEON_MC_STATUS);
if (tmp & RADEON_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
}
return -1;
}
void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
if (rs400_mc_wait_for_idle(rdev)) {
printk(KERN_WARNING "rs400: Failed to wait MC idle while "
"programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
}
}
void rs400_mc_init(struct radeon_device *rdev)
{
u64 base;
rs400_gart_adjust_size(rdev);
rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
r100_vram_init_sizes(rdev);
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
radeon_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
}
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
uint32_t r;
WREG32(RS480_NB_MC_INDEX, reg & 0xff);
r = RREG32(RS480_NB_MC_DATA);
WREG32(RS480_NB_MC_INDEX, 0xff);
return r;
}
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
WREG32(RS480_NB_MC_INDEX, ((reg) & 0xff) | RS480_NB_MC_IND_WR_EN);
WREG32(RS480_NB_MC_DATA, (v));
WREG32(RS480_NB_MC_INDEX, 0xff);
}
#if defined(CONFIG_DEBUG_FS)
static int rs400_debugfs_gart_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t tmp;
tmp = RREG32(RADEON_HOST_PATH_CNTL);
seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
tmp = RREG32(RADEON_BUS_CNTL);
seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
seq_printf(m, "AIC_CTRL_SCRATCH 0x%08x\n", tmp);
if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) {
tmp = RREG32_MC(RS690_MCCFG_AGP_BASE);
seq_printf(m, "MCCFG_AGP_BASE 0x%08x\n", tmp);
tmp = RREG32_MC(RS690_MCCFG_AGP_BASE_2);
seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
tmp = RREG32(RS690_HDP_FB_LOCATION);
seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
} else {
tmp = RREG32(RADEON_AGP_BASE);
seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
tmp = RREG32(RS480_AGP_BASE_2);
seq_printf(m, "AGP_BASE_2 0x%08x\n", tmp);
tmp = RREG32(RADEON_MC_AGP_LOCATION);
seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
}
tmp = RREG32_MC(RS480_GART_BASE);
seq_printf(m, "GART_BASE 0x%08x\n", tmp);
tmp = RREG32_MC(RS480_GART_FEATURE_ID);
seq_printf(m, "GART_FEATURE_ID 0x%08x\n", tmp);
tmp = RREG32_MC(RS480_AGP_MODE_CNTL);
seq_printf(m, "AGP_MODE_CONTROL 0x%08x\n", tmp);
tmp = RREG32_MC(RS480_MC_MISC_CNTL);
seq_printf(m, "MC_MISC_CNTL 0x%08x\n", tmp);
tmp = RREG32_MC(0x5F);
seq_printf(m, "MC_MISC_UMA_CNTL 0x%08x\n", tmp);
tmp = RREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE);
seq_printf(m, "AGP_ADDRESS_SPACE_SIZE 0x%08x\n", tmp);
tmp = RREG32_MC(RS480_GART_CACHE_CNTRL);
seq_printf(m, "GART_CACHE_CNTRL 0x%08x\n", tmp);
tmp = RREG32_MC(0x3B);
seq_printf(m, "MC_GART_ERROR_ADDRESS 0x%08x\n", tmp);
tmp = RREG32_MC(0x3C);
seq_printf(m, "MC_GART_ERROR_ADDRESS_HI 0x%08x\n", tmp);
tmp = RREG32_MC(0x30);
seq_printf(m, "GART_ERROR_0 0x%08x\n", tmp);
tmp = RREG32_MC(0x31);
seq_printf(m, "GART_ERROR_1 0x%08x\n", tmp);
tmp = RREG32_MC(0x32);
seq_printf(m, "GART_ERROR_2 0x%08x\n", tmp);
tmp = RREG32_MC(0x33);
seq_printf(m, "GART_ERROR_3 0x%08x\n", tmp);
tmp = RREG32_MC(0x34);
seq_printf(m, "GART_ERROR_4 0x%08x\n", tmp);
tmp = RREG32_MC(0x35);
seq_printf(m, "GART_ERROR_5 0x%08x\n", tmp);
tmp = RREG32_MC(0x36);
seq_printf(m, "GART_ERROR_6 0x%08x\n", tmp);
tmp = RREG32_MC(0x37);
seq_printf(m, "GART_ERROR_7 0x%08x\n", tmp);
return 0;
}
static struct drm_info_list rs400_gart_info_list[] = {
{"rs400_gart_info", rs400_debugfs_gart_info, 0, NULL},
};
#endif
static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1);
#else
return 0;
#endif
}
void rs400_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
/* Stops all mc clients */
r100_mc_stop(rdev, &save);
/* Wait for mc idle */
if (rs400_mc_wait_for_idle(rdev))
dev_warn(rdev->dev, "rs400: Wait MC idle timeout before updating MC.\n");
WREG32(R_000148_MC_FB_LOCATION,
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16));
r100_mc_resume(rdev, &save);
}
static int rs400_startup(struct radeon_device *rdev)
{
int r;
r100_set_common_regs(rdev);
rs400_mc_program(rdev);
/* Resume clock */
r300_clock_startup(rdev);
/* Initialize GPU configuration (# pipes, ...) */
rs400_gpu_init(rdev);
r100_enable_bm(rdev);
/* Initialize GART (initialize after TTM so we can allocate
* memory through TTM but finalize after TTM) */
r = rs400_gart_enable(rdev);
if (r)
return r;
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
return r;
}
return 0;
}
int rs400_resume(struct radeon_device *rdev)
{
/* Make sur GART are not working */
rs400_gart_disable(rdev);
/* Resume clock before doing reset */
r300_clock_startup(rdev);
/* setup MC before calling post tables */
rs400_mc_program(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
}
/* post */
radeon_combios_asic_init(rdev->ddev);
/* Resume clock after posting */
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return rs400_startup(rdev);
}
int rs400_suspend(struct radeon_device *rdev)
{
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
rs400_gart_disable(rdev);
return 0;
}
void rs400_fini(struct radeon_device *rdev)
{
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
r100_ib_fini(rdev);
radeon_gem_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
rdev->bios = NULL;
}
int rs400_init(struct radeon_device *rdev)
{
int r;
/* Disable VGA */
r100_vga_render_disable(rdev);
/* Initialize scratch registers */
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
/* restore some register to sane defaults */
r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
return -EINVAL;
}
if (rdev->is_atom_bios) {
dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n");
return -EINVAL;
} else {
r = radeon_combios_init(rdev);
if (r)
return r;
}
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev,
"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
if (radeon_boot_test_post_card(rdev) == false)
return -EINVAL;
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
/* initialize memory controller */
rs400_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
return r;
r = radeon_irq_kms_init(rdev);
if (r)
return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
return r;
r = rs400_gart_init(rdev);
if (r)
return r;
r300_set_reg_safe(rdev);
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
dev_err(rdev->dev, "Disabling GPU acceleration\n");
r100_cp_fini(rdev);
radeon_wb_fini(rdev);
r100_ib_fini(rdev);
rs400_gart_fini(rdev);
radeon_irq_kms_fini(rdev);
rdev->accel_working = false;
}
return 0;
}
| gpl-2.0 |
airk000/kernel_htc_7x30 | arch/arm/mach-msm/hotplug.c | 2373 | 1876 | /*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
extern volatile int pen_release;
static inline void cpu_enter_lowpower(void)
{
/* Just flush the cache. Changing the coherency is not yet
* available on msm. */
flush_cache_all();
}
static inline void cpu_leave_lowpower(void)
{
}
static inline void platform_do_lowpower(unsigned int cpu)
{
/* Just enter wfi for now. TODO: Properly shut off the cpu. */
for (;;) {
/*
* here's the WFI
*/
asm("wfi"
:
:
: "memory", "cc");
if (pen_release == cpu) {
/*
* OK, proper wakeup, we're done
*/
break;
}
/*
* getting here, means that we have come out of WFI without
* having been woken up - this shouldn't happen
*
* The trouble is, letting people know about this is not really
* possible, since we are currently running incoherently, and
* therefore cannot safely call printk() or anything else
*/
pr_debug("CPU%u: spurious wakeup call\n", cpu);
}
}
int platform_cpu_kill(unsigned int cpu)
{
return 1;
}
/*
* platform-specific code to shutdown a CPU
*
* Called with IRQs disabled
*/
void platform_cpu_die(unsigned int cpu)
{
/*
* we're ready for shutdown now, so do it
*/
cpu_enter_lowpower();
platform_do_lowpower(cpu);
/*
* bring this CPU back into the world of cache
* coherency, and then restore interrupts
*/
cpu_leave_lowpower();
}
int platform_cpu_disable(unsigned int cpu)
{
/*
* we don't allow CPU 0 to be shutdown (it is still too special
* e.g. clock tick interrupts)
*/
return cpu == 0 ? -EPERM : 0;
}
| gpl-2.0 |
ARMP/ARM-Project | drivers/char/bsr.c | 2373 | 9026 | /* IBM POWER Barrier Synchronization Register Driver
*
* Copyright IBM Corporation 2008
*
* Author: Sonny Rao <sonnyrao@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <asm/pgtable.h>
#include <asm/io.h>
/*
This driver exposes a special register which can be used for fast
synchronization across a large SMP machine. The hardware is exposed
as an array of bytes where each process will write to one of the bytes to
indicate it has finished the current stage and this update is broadcast to
all processors without having to bounce a cacheline between them. In
POWER5 and POWER6 there is one of these registers per SMP, but it is
presented in two forms; first, it is given as a whole and then as a number
of smaller registers which alias to parts of the single whole register.
This can potentially allow multiple groups of processes to each have their
own private synchronization device.
Note that this hardware *must* be written to using *only* single byte writes.
It may be read using 1, 2, 4, or 8 byte loads which must be aligned since
this region is treated as cache-inhibited processes should also use a
full sync before and after writing to the BSR to ensure all stores and
the BSR update have made it to all chips in the system
*/
/* This is arbitrary number, up to Power6 it's been 17 or fewer */
#define BSR_MAX_DEVS (32)
struct bsr_dev {
u64 bsr_addr; /* Real address */
u64 bsr_len; /* length of mem region we can map */
unsigned bsr_bytes; /* size of the BSR reg itself */
unsigned bsr_stride; /* interval at which BSR repeats in the page */
unsigned bsr_type; /* maps to enum below */
unsigned bsr_num; /* bsr id number for its type */
int bsr_minor;
struct list_head bsr_list;
dev_t bsr_dev;
struct cdev bsr_cdev;
struct device *bsr_device;
char bsr_name[32];
};
static unsigned total_bsr_devs;
static struct list_head bsr_devs = LIST_HEAD_INIT(bsr_devs);
static struct class *bsr_class;
static int bsr_major;
enum {
BSR_8 = 0,
BSR_16 = 1,
BSR_64 = 2,
BSR_128 = 3,
BSR_4096 = 4,
BSR_UNKNOWN = 5,
BSR_MAX = 6,
};
static unsigned bsr_types[BSR_MAX];
static ssize_t
bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
}
static ssize_t
bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
}
static ssize_t
bsr_len_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
}
static struct device_attribute bsr_dev_attrs[] = {
__ATTR(bsr_size, S_IRUGO, bsr_size_show, NULL),
__ATTR(bsr_stride, S_IRUGO, bsr_stride_show, NULL),
__ATTR(bsr_length, S_IRUGO, bsr_len_show, NULL),
__ATTR_NULL
};
static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
{
unsigned long size = vma->vm_end - vma->vm_start;
struct bsr_dev *dev = filp->private_data;
int ret;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/* check for the case of a small BSR device and map one 4k page for it*/
if (dev->bsr_len < PAGE_SIZE && size == PAGE_SIZE)
ret = remap_4k_pfn(vma, vma->vm_start, dev->bsr_addr >> 12,
vma->vm_page_prot);
else if (size <= dev->bsr_len)
ret = io_remap_pfn_range(vma, vma->vm_start,
dev->bsr_addr >> PAGE_SHIFT,
size, vma->vm_page_prot);
else
return -EINVAL;
if (ret)
return -EAGAIN;
return 0;
}
static int bsr_open(struct inode * inode, struct file * filp)
{
struct cdev *cdev = inode->i_cdev;
struct bsr_dev *dev = container_of(cdev, struct bsr_dev, bsr_cdev);
filp->private_data = dev;
return 0;
}
static const struct file_operations bsr_fops = {
.owner = THIS_MODULE,
.mmap = bsr_mmap,
.open = bsr_open,
.llseek = noop_llseek,
};
static void bsr_cleanup_devs(void)
{
struct bsr_dev *cur, *n;
list_for_each_entry_safe(cur, n, &bsr_devs, bsr_list) {
if (cur->bsr_device) {
cdev_del(&cur->bsr_cdev);
device_del(cur->bsr_device);
}
list_del(&cur->bsr_list);
kfree(cur);
}
}
static int bsr_add_node(struct device_node *bn)
{
int bsr_stride_len, bsr_bytes_len, num_bsr_devs;
const u32 *bsr_stride;
const u32 *bsr_bytes;
unsigned i;
int ret = -ENODEV;
bsr_stride = of_get_property(bn, "ibm,lock-stride", &bsr_stride_len);
bsr_bytes = of_get_property(bn, "ibm,#lock-bytes", &bsr_bytes_len);
if (!bsr_stride || !bsr_bytes ||
(bsr_stride_len != bsr_bytes_len)) {
printk(KERN_ERR "bsr of-node has missing/incorrect property\n");
return ret;
}
num_bsr_devs = bsr_bytes_len / sizeof(u32);
for (i = 0 ; i < num_bsr_devs; i++) {
struct bsr_dev *cur = kzalloc(sizeof(struct bsr_dev),
GFP_KERNEL);
struct resource res;
int result;
if (!cur) {
printk(KERN_ERR "Unable to alloc bsr dev\n");
ret = -ENOMEM;
goto out_err;
}
result = of_address_to_resource(bn, i, &res);
if (result < 0) {
printk(KERN_ERR "bsr of-node has invalid reg property, skipping\n");
kfree(cur);
continue;
}
cur->bsr_minor = i + total_bsr_devs;
cur->bsr_addr = res.start;
cur->bsr_len = res.end - res.start + 1;
cur->bsr_bytes = bsr_bytes[i];
cur->bsr_stride = bsr_stride[i];
cur->bsr_dev = MKDEV(bsr_major, i + total_bsr_devs);
/* if we have a bsr_len of > 4k and less then PAGE_SIZE (64k pages) */
/* we can only map 4k of it, so only advertise the 4k in sysfs */
if (cur->bsr_len > 4096 && cur->bsr_len < PAGE_SIZE)
cur->bsr_len = 4096;
switch(cur->bsr_bytes) {
case 8:
cur->bsr_type = BSR_8;
break;
case 16:
cur->bsr_type = BSR_16;
break;
case 64:
cur->bsr_type = BSR_64;
break;
case 128:
cur->bsr_type = BSR_128;
break;
case 4096:
cur->bsr_type = BSR_4096;
break;
default:
cur->bsr_type = BSR_UNKNOWN;
}
cur->bsr_num = bsr_types[cur->bsr_type];
snprintf(cur->bsr_name, 32, "bsr%d_%d",
cur->bsr_bytes, cur->bsr_num);
cdev_init(&cur->bsr_cdev, &bsr_fops);
result = cdev_add(&cur->bsr_cdev, cur->bsr_dev, 1);
if (result) {
kfree(cur);
goto out_err;
}
cur->bsr_device = device_create(bsr_class, NULL, cur->bsr_dev,
cur, cur->bsr_name);
if (IS_ERR(cur->bsr_device)) {
printk(KERN_ERR "device_create failed for %s\n",
cur->bsr_name);
cdev_del(&cur->bsr_cdev);
kfree(cur);
goto out_err;
}
bsr_types[cur->bsr_type] = cur->bsr_num + 1;
list_add_tail(&cur->bsr_list, &bsr_devs);
}
total_bsr_devs += num_bsr_devs;
return 0;
out_err:
bsr_cleanup_devs();
return ret;
}
static int bsr_create_devs(struct device_node *bn)
{
int ret;
while (bn) {
ret = bsr_add_node(bn);
if (ret) {
of_node_put(bn);
return ret;
}
bn = of_find_compatible_node(bn, NULL, "ibm,bsr");
}
return 0;
}
static int __init bsr_init(void)
{
struct device_node *np;
dev_t bsr_dev;
int ret = -ENODEV;
int result;
np = of_find_compatible_node(NULL, NULL, "ibm,bsr");
if (!np)
goto out_err;
bsr_class = class_create(THIS_MODULE, "bsr");
if (IS_ERR(bsr_class)) {
printk(KERN_ERR "class_create() failed for bsr_class\n");
goto out_err_1;
}
bsr_class->dev_attrs = bsr_dev_attrs;
result = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
bsr_major = MAJOR(bsr_dev);
if (result < 0) {
printk(KERN_ERR "alloc_chrdev_region() failed for bsr\n");
goto out_err_2;
}
if ((ret = bsr_create_devs(np)) < 0) {
np = NULL;
goto out_err_3;
}
return 0;
out_err_3:
unregister_chrdev_region(bsr_dev, BSR_MAX_DEVS);
out_err_2:
class_destroy(bsr_class);
out_err_1:
of_node_put(np);
out_err:
return ret;
}
static void __exit bsr_exit(void)
{
bsr_cleanup_devs();
if (bsr_class)
class_destroy(bsr_class);
if (bsr_major)
unregister_chrdev_region(MKDEV(bsr_major, 0), BSR_MAX_DEVS);
}
module_init(bsr_init);
module_exit(bsr_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sonny Rao <sonnyrao@us.ibm.com>");
| gpl-2.0 |
LiquidSmooth-Devices/kernel_samsung_jf | arch/x86/platform/mrst/mrst.c | 4677 | 27802 | /*
* mrst.c: Intel Moorestown platform specific setup code
*
* (C) Copyright 2008 Intel Corporation
* Author: Jacob Pan (jacob.jun.pan@intel.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
#define pr_fmt(fmt) "mrst: " fmt
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/scatterlist.h>
#include <linux/sfi.h>
#include <linux/intel_pmic_gpio.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/mfd/intel_msic.h>
#include <linux/gpio.h>
#include <linux/i2c/tc35876x.h>
#include <asm/setup.h>
#include <asm/mpspec_def.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/mrst.h>
#include <asm/mrst-vrtc.h>
#include <asm/io.h>
#include <asm/i8259.h>
#include <asm/intel_scu_ipc.h>
#include <asm/apb_timer.h>
#include <asm/reboot.h>
/*
* the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
* cmdline option x86_mrst_timer can be used to override the configuration
* to prefer one or the other.
* at runtime, there are basically three timer configurations:
* 1. per cpu apbt clock only
* 2. per cpu always-on lapic clocks only, this is Penwell/Medfield only
* 3. per cpu lapic clock (C3STOP) and one apbt clock, with broadcast.
*
* by default (without cmdline option), platform code first detects cpu type
* to see if we are on lincroft or penwell, then set up both lapic or apbt
* clocks accordingly.
* i.e. by default, medfield uses configuration #2, moorestown uses #1.
* config #3 is supported but not recommended on medfield.
*
* rating and feature summary:
* lapic (with C3STOP) --------- 100
* apbt (always-on) ------------ 110
* lapic (always-on,ARAT) ------ 150
*/
__cpuinitdata enum mrst_timer_options mrst_timer_options;
static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
enum mrst_cpu_type __mrst_cpu_chip;
EXPORT_SYMBOL_GPL(__mrst_cpu_chip);
int sfi_mtimer_num;
struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
EXPORT_SYMBOL_GPL(sfi_mrtc_array);
int sfi_mrtc_num;
static void mrst_power_off(void)
{
}
static void mrst_reboot(void)
{
intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
}
/* parse all the mtimer info to a static mtimer array */
static int __init sfi_parse_mtmr(struct sfi_table_header *table)
{
struct sfi_table_simple *sb;
struct sfi_timer_table_entry *pentry;
struct mpc_intsrc mp_irq;
int totallen;
sb = (struct sfi_table_simple *)table;
if (!sfi_mtimer_num) {
sfi_mtimer_num = SFI_GET_NUM_ENTRIES(sb,
struct sfi_timer_table_entry);
pentry = (struct sfi_timer_table_entry *) sb->pentry;
totallen = sfi_mtimer_num * sizeof(*pentry);
memcpy(sfi_mtimer_array, pentry, totallen);
}
pr_debug("SFI MTIMER info (num = %d):\n", sfi_mtimer_num);
pentry = sfi_mtimer_array;
for (totallen = 0; totallen < sfi_mtimer_num; totallen++, pentry++) {
pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz,"
" irq = %d\n", totallen, (u32)pentry->phys_addr,
pentry->freq_hz, pentry->irq);
if (!pentry->irq)
continue;
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
mp_irq.irqflag = 5;
mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
mp_irq.dstirq = pentry->irq;
mp_save_irq(&mp_irq);
}
return 0;
}
struct sfi_timer_table_entry *sfi_get_mtmr(int hint)
{
int i;
if (hint < sfi_mtimer_num) {
if (!sfi_mtimer_usage[hint]) {
pr_debug("hint taken for timer %d irq %d\n",\
hint, sfi_mtimer_array[hint].irq);
sfi_mtimer_usage[hint] = 1;
return &sfi_mtimer_array[hint];
}
}
/* take the first timer available */
for (i = 0; i < sfi_mtimer_num;) {
if (!sfi_mtimer_usage[i]) {
sfi_mtimer_usage[i] = 1;
return &sfi_mtimer_array[i];
}
i++;
}
return NULL;
}
void sfi_free_mtmr(struct sfi_timer_table_entry *mtmr)
{
int i;
for (i = 0; i < sfi_mtimer_num;) {
if (mtmr->irq == sfi_mtimer_array[i].irq) {
sfi_mtimer_usage[i] = 0;
return;
}
i++;
}
}
/* parse all the mrtc info to a global mrtc array */
int __init sfi_parse_mrtc(struct sfi_table_header *table)
{
struct sfi_table_simple *sb;
struct sfi_rtc_table_entry *pentry;
struct mpc_intsrc mp_irq;
int totallen;
sb = (struct sfi_table_simple *)table;
if (!sfi_mrtc_num) {
sfi_mrtc_num = SFI_GET_NUM_ENTRIES(sb,
struct sfi_rtc_table_entry);
pentry = (struct sfi_rtc_table_entry *)sb->pentry;
totallen = sfi_mrtc_num * sizeof(*pentry);
memcpy(sfi_mrtc_array, pentry, totallen);
}
pr_debug("SFI RTC info (num = %d):\n", sfi_mrtc_num);
pentry = sfi_mrtc_array;
for (totallen = 0; totallen < sfi_mrtc_num; totallen++, pentry++) {
pr_debug("RTC[%d]: paddr = 0x%08x, irq = %d\n",
totallen, (u32)pentry->phys_addr, pentry->irq);
mp_irq.type = MP_INTSRC;
mp_irq.irqtype = mp_INT;
mp_irq.irqflag = 0xf; /* level trigger and active low */
mp_irq.srcbus = MP_BUS_ISA;
mp_irq.srcbusirq = pentry->irq; /* IRQ */
mp_irq.dstapic = MP_APIC_ALL;
mp_irq.dstirq = pentry->irq;
mp_save_irq(&mp_irq);
}
return 0;
}
static unsigned long __init mrst_calibrate_tsc(void)
{
unsigned long fast_calibrate;
u32 lo, hi, ratio, fsb;
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
ratio = (hi >> 8) & 0x1f;
pr_debug("ratio is %d\n", ratio);
if (!ratio) {
pr_err("read a zero ratio, should be incorrect!\n");
pr_err("force tsc ratio to 16 ...\n");
ratio = 16;
}
rdmsr(MSR_FSB_FREQ, lo, hi);
if ((lo & 0x7) == 0x7)
fsb = PENWELL_FSB_FREQ_83SKU;
else
fsb = PENWELL_FSB_FREQ_100SKU;
fast_calibrate = ratio * fsb;
pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
lapic_timer_frequency = fsb * 1000 / HZ;
/* mark tsc clocksource as reliable */
set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
if (fast_calibrate)
return fast_calibrate;
return 0;
}
static void __init mrst_time_init(void)
{
sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
switch (mrst_timer_options) {
case MRST_TIMER_APBT_ONLY:
break;
case MRST_TIMER_LAPIC_APBT:
x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
break;
default:
if (!boot_cpu_has(X86_FEATURE_ARAT))
break;
x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
return;
}
/* we need at least one APB timer */
pre_init_apic_IRQ0();
apbt_time_init();
}
static void __cpuinit mrst_arch_setup(void)
{
if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
__mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
else {
pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
__mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
}
}
/* MID systems don't have i8042 controller */
static int mrst_i8042_detect(void)
{
return 0;
}
/*
* Moorestown does not have external NMI source nor port 0x61 to report
* NMI status. The possible NMI sources are from pmu as a result of NMI
* watchdog or lock debug. Reading io port 0x61 results in 0xff which
* misled NMI handler.
*/
static unsigned char mrst_get_nmi_reason(void)
{
return 0;
}
/*
* Moorestown specific x86_init function overrides and early setup
* calls.
*/
void __init x86_mrst_early_setup(void)
{
x86_init.resources.probe_roms = x86_init_noop;
x86_init.resources.reserve_resources = x86_init_noop;
x86_init.timers.timer_init = mrst_time_init;
x86_init.timers.setup_percpu_clockev = x86_init_noop;
x86_init.irqs.pre_vector_init = x86_init_noop;
x86_init.oem.arch_setup = mrst_arch_setup;
x86_cpuinit.setup_percpu_clockev = apbt_setup_secondary_clock;
x86_platform.calibrate_tsc = mrst_calibrate_tsc;
x86_platform.i8042_detect = mrst_i8042_detect;
x86_init.timers.wallclock_init = mrst_rtc_init;
x86_platform.get_nmi_reason = mrst_get_nmi_reason;
x86_init.pci.init = pci_mrst_init;
x86_init.pci.fixup_irqs = x86_init_noop;
legacy_pic = &null_legacy_pic;
/* Moorestown specific power_off/restart method */
pm_power_off = mrst_power_off;
machine_ops.emergency_restart = mrst_reboot;
/* Avoid searching for BIOS MP tables */
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
set_bit(MP_BUS_ISA, mp_bus_not_pci);
}
/*
* if user does not want to use per CPU apb timer, just give it a lower rating
* than local apic timer and skip the late per cpu timer init.
*/
static inline int __init setup_x86_mrst_timer(char *arg)
{
if (!arg)
return -EINVAL;
if (strcmp("apbt_only", arg) == 0)
mrst_timer_options = MRST_TIMER_APBT_ONLY;
else if (strcmp("lapic_and_apbt", arg) == 0)
mrst_timer_options = MRST_TIMER_LAPIC_APBT;
else {
pr_warning("X86 MRST timer option %s not recognised"
" use x86_mrst_timer=apbt_only or lapic_and_apbt\n",
arg);
return -EINVAL;
}
return 0;
}
__setup("x86_mrst_timer=", setup_x86_mrst_timer);
/*
* Parsing GPIO table first, since the DEVS table will need this table
* to map the pin name to the actual pin.
*/
static struct sfi_gpio_table_entry *gpio_table;
static int gpio_num_entry;
static int __init sfi_parse_gpio(struct sfi_table_header *table)
{
struct sfi_table_simple *sb;
struct sfi_gpio_table_entry *pentry;
int num, i;
if (gpio_table)
return 0;
sb = (struct sfi_table_simple *)table;
num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
pentry = (struct sfi_gpio_table_entry *)sb->pentry;
gpio_table = (struct sfi_gpio_table_entry *)
kmalloc(num * sizeof(*pentry), GFP_KERNEL);
if (!gpio_table)
return -1;
memcpy(gpio_table, pentry, num * sizeof(*pentry));
gpio_num_entry = num;
pr_debug("GPIO pin info:\n");
for (i = 0; i < num; i++, pentry++)
pr_debug("info[%2d]: controller = %16.16s, pin_name = %16.16s,"
" pin = %d\n", i,
pentry->controller_name,
pentry->pin_name,
pentry->pin_no);
return 0;
}
static int get_gpio_by_name(const char *name)
{
struct sfi_gpio_table_entry *pentry = gpio_table;
int i;
if (!pentry)
return -1;
for (i = 0; i < gpio_num_entry; i++, pentry++) {
if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
return pentry->pin_no;
}
return -1;
}
/*
* Here defines the array of devices platform data that IAFW would export
* through SFI "DEVS" table, we use name and type to match the device and
* its platform data.
*/
struct devs_id {
char name[SFI_NAME_LEN + 1];
u8 type;
u8 delay;
void *(*get_platform_data)(void *info);
};
/* the offset for the mapping of global gpio pin to irq */
#define MRST_IRQ_OFFSET 0x100
static void __init *pmic_gpio_platform_data(void *info)
{
static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
int gpio_base = get_gpio_by_name("pmic_gpio_base");
if (gpio_base == -1)
gpio_base = 64;
pmic_gpio_pdata.gpio_base = gpio_base;
pmic_gpio_pdata.irq_base = gpio_base + MRST_IRQ_OFFSET;
pmic_gpio_pdata.gpiointr = 0xffffeff8;
return &pmic_gpio_pdata;
}
static void __init *max3111_platform_data(void *info)
{
struct spi_board_info *spi_info = info;
int intr = get_gpio_by_name("max3111_int");
spi_info->mode = SPI_MODE_0;
if (intr == -1)
return NULL;
spi_info->irq = intr + MRST_IRQ_OFFSET;
return NULL;
}
/* we have multiple max7315 on the board ... */
#define MAX7315_NUM 2
static void __init *max7315_platform_data(void *info)
{
static struct pca953x_platform_data max7315_pdata[MAX7315_NUM];
static int nr;
struct pca953x_platform_data *max7315 = &max7315_pdata[nr];
struct i2c_board_info *i2c_info = info;
int gpio_base, intr;
char base_pin_name[SFI_NAME_LEN + 1];
char intr_pin_name[SFI_NAME_LEN + 1];
if (nr == MAX7315_NUM) {
pr_err("too many max7315s, we only support %d\n",
MAX7315_NUM);
return NULL;
}
/* we have several max7315 on the board, we only need load several
* instances of the same pca953x driver to cover them
*/
strcpy(i2c_info->type, "max7315");
if (nr++) {
sprintf(base_pin_name, "max7315_%d_base", nr);
sprintf(intr_pin_name, "max7315_%d_int", nr);
} else {
strcpy(base_pin_name, "max7315_base");
strcpy(intr_pin_name, "max7315_int");
}
gpio_base = get_gpio_by_name(base_pin_name);
intr = get_gpio_by_name(intr_pin_name);
if (gpio_base == -1)
return NULL;
max7315->gpio_base = gpio_base;
if (intr != -1) {
i2c_info->irq = intr + MRST_IRQ_OFFSET;
max7315->irq_base = gpio_base + MRST_IRQ_OFFSET;
} else {
i2c_info->irq = -1;
max7315->irq_base = -1;
}
return max7315;
}
static void *tca6416_platform_data(void *info)
{
static struct pca953x_platform_data tca6416;
struct i2c_board_info *i2c_info = info;
int gpio_base, intr;
char base_pin_name[SFI_NAME_LEN + 1];
char intr_pin_name[SFI_NAME_LEN + 1];
strcpy(i2c_info->type, "tca6416");
strcpy(base_pin_name, "tca6416_base");
strcpy(intr_pin_name, "tca6416_int");
gpio_base = get_gpio_by_name(base_pin_name);
intr = get_gpio_by_name(intr_pin_name);
if (gpio_base == -1)
return NULL;
tca6416.gpio_base = gpio_base;
if (intr != -1) {
i2c_info->irq = intr + MRST_IRQ_OFFSET;
tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
} else {
i2c_info->irq = -1;
tca6416.irq_base = -1;
}
return &tca6416;
}
static void *mpu3050_platform_data(void *info)
{
struct i2c_board_info *i2c_info = info;
int intr = get_gpio_by_name("mpu3050_int");
if (intr == -1)
return NULL;
i2c_info->irq = intr + MRST_IRQ_OFFSET;
return NULL;
}
static void __init *emc1403_platform_data(void *info)
{
static short intr2nd_pdata;
struct i2c_board_info *i2c_info = info;
int intr = get_gpio_by_name("thermal_int");
int intr2nd = get_gpio_by_name("thermal_alert");
if (intr == -1 || intr2nd == -1)
return NULL;
i2c_info->irq = intr + MRST_IRQ_OFFSET;
intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
return &intr2nd_pdata;
}
static void __init *lis331dl_platform_data(void *info)
{
static short intr2nd_pdata;
struct i2c_board_info *i2c_info = info;
int intr = get_gpio_by_name("accel_int");
int intr2nd = get_gpio_by_name("accel_2");
if (intr == -1 || intr2nd == -1)
return NULL;
i2c_info->irq = intr + MRST_IRQ_OFFSET;
intr2nd_pdata = intr2nd + MRST_IRQ_OFFSET;
return &intr2nd_pdata;
}
static void __init *no_platform_data(void *info)
{
return NULL;
}
static struct resource msic_resources[] = {
{
.start = INTEL_MSIC_IRQ_PHYS_BASE,
.end = INTEL_MSIC_IRQ_PHYS_BASE + 64 - 1,
.flags = IORESOURCE_MEM,
},
};
static struct intel_msic_platform_data msic_pdata;
static struct platform_device msic_device = {
.name = "intel_msic",
.id = -1,
.dev = {
.platform_data = &msic_pdata,
},
.num_resources = ARRAY_SIZE(msic_resources),
.resource = msic_resources,
};
static inline bool mrst_has_msic(void)
{
return mrst_identify_cpu() == MRST_CPU_CHIP_PENWELL;
}
static int msic_scu_status_change(struct notifier_block *nb,
unsigned long code, void *data)
{
if (code == SCU_DOWN) {
platform_device_unregister(&msic_device);
return 0;
}
return platform_device_register(&msic_device);
}
static int __init msic_init(void)
{
static struct notifier_block msic_scu_notifier = {
.notifier_call = msic_scu_status_change,
};
/*
* We need to be sure that the SCU IPC is ready before MSIC device
* can be registered.
*/
if (mrst_has_msic())
intel_scu_notifier_add(&msic_scu_notifier);
return 0;
}
arch_initcall(msic_init);
/*
* msic_generic_platform_data - sets generic platform data for the block
* @info: pointer to the SFI device table entry for this block
* @block: MSIC block
*
* Function sets IRQ number from the SFI table entry for given device to
* the MSIC platform data.
*/
static void *msic_generic_platform_data(void *info, enum intel_msic_block block)
{
struct sfi_device_table_entry *entry = info;
BUG_ON(block < 0 || block >= INTEL_MSIC_BLOCK_LAST);
msic_pdata.irq[block] = entry->irq;
return no_platform_data(info);
}
static void *msic_battery_platform_data(void *info)
{
return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_BATTERY);
}
static void *msic_gpio_platform_data(void *info)
{
static struct intel_msic_gpio_pdata pdata;
int gpio = get_gpio_by_name("msic_gpio_base");
if (gpio < 0)
return NULL;
pdata.gpio_base = gpio;
msic_pdata.gpio = &pdata;
return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_GPIO);
}
static void *msic_audio_platform_data(void *info)
{
struct platform_device *pdev;
pdev = platform_device_register_simple("sst-platform", -1, NULL, 0);
if (IS_ERR(pdev)) {
pr_err("failed to create audio platform device\n");
return NULL;
}
return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_AUDIO);
}
static void *msic_power_btn_platform_data(void *info)
{
return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_POWER_BTN);
}
static void *msic_ocd_platform_data(void *info)
{
static struct intel_msic_ocd_pdata pdata;
int gpio = get_gpio_by_name("ocd_gpio");
if (gpio < 0)
return NULL;
pdata.gpio = gpio;
msic_pdata.ocd = &pdata;
return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_OCD);
}
static void *msic_thermal_platform_data(void *info)
{
return msic_generic_platform_data(info, INTEL_MSIC_BLOCK_THERMAL);
}
/* tc35876x DSI-LVDS bridge chip and panel platform data */
static void *tc35876x_platform_data(void *data)
{
static struct tc35876x_platform_data pdata;
/* gpio pins set to -1 will not be used by the driver */
pdata.gpio_bridge_reset = get_gpio_by_name("LCMB_RXEN");
pdata.gpio_panel_bl_en = get_gpio_by_name("6S6P_BL_EN");
pdata.gpio_panel_vadd = get_gpio_by_name("EN_VREG_LCD_V3P3");
return &pdata;
}
static const struct devs_id __initconst device_ids[] = {
{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
{"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
{"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
{"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
{"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
{"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
{"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
{"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
{"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
{"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
{"i2c_disp_brig", SFI_DEV_TYPE_I2C, 0, &tc35876x_platform_data},
/* MSIC subdevices */
{"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
{"msic_gpio", SFI_DEV_TYPE_IPC, 1, &msic_gpio_platform_data},
{"msic_audio", SFI_DEV_TYPE_IPC, 1, &msic_audio_platform_data},
{"msic_power_btn", SFI_DEV_TYPE_IPC, 1, &msic_power_btn_platform_data},
{"msic_ocd", SFI_DEV_TYPE_IPC, 1, &msic_ocd_platform_data},
{"msic_thermal", SFI_DEV_TYPE_IPC, 1, &msic_thermal_platform_data},
{},
};
#define MAX_IPCDEVS 24
static struct platform_device *ipc_devs[MAX_IPCDEVS];
static int ipc_next_dev;
#define MAX_SCU_SPI 24
static struct spi_board_info *spi_devs[MAX_SCU_SPI];
static int spi_next_dev;
#define MAX_SCU_I2C 24
static struct i2c_board_info *i2c_devs[MAX_SCU_I2C];
static int i2c_bus[MAX_SCU_I2C];
static int i2c_next_dev;
static void __init intel_scu_device_register(struct platform_device *pdev)
{
if(ipc_next_dev == MAX_IPCDEVS)
pr_err("too many SCU IPC devices");
else
ipc_devs[ipc_next_dev++] = pdev;
}
static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
{
struct spi_board_info *new_dev;
if (spi_next_dev == MAX_SCU_SPI) {
pr_err("too many SCU SPI devices");
return;
}
new_dev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!new_dev) {
pr_err("failed to alloc mem for delayed spi dev %s\n",
sdev->modalias);
return;
}
memcpy(new_dev, sdev, sizeof(*sdev));
spi_devs[spi_next_dev++] = new_dev;
}
static void __init intel_scu_i2c_device_register(int bus,
struct i2c_board_info *idev)
{
struct i2c_board_info *new_dev;
if (i2c_next_dev == MAX_SCU_I2C) {
pr_err("too many SCU I2C devices");
return;
}
new_dev = kzalloc(sizeof(*idev), GFP_KERNEL);
if (!new_dev) {
pr_err("failed to alloc mem for delayed i2c dev %s\n",
idev->type);
return;
}
memcpy(new_dev, idev, sizeof(*idev));
i2c_bus[i2c_next_dev] = bus;
i2c_devs[i2c_next_dev++] = new_dev;
}
BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
EXPORT_SYMBOL_GPL(intel_scu_notifier);
/* Called by IPC driver */
void intel_scu_devices_create(void)
{
int i;
for (i = 0; i < ipc_next_dev; i++)
platform_device_add(ipc_devs[i]);
for (i = 0; i < spi_next_dev; i++)
spi_register_board_info(spi_devs[i], 1);
for (i = 0; i < i2c_next_dev; i++) {
struct i2c_adapter *adapter;
struct i2c_client *client;
adapter = i2c_get_adapter(i2c_bus[i]);
if (adapter) {
client = i2c_new_device(adapter, i2c_devs[i]);
if (!client)
pr_err("can't create i2c device %s\n",
i2c_devs[i]->type);
} else
i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1);
}
intel_scu_notifier_post(SCU_AVAILABLE, NULL);
}
EXPORT_SYMBOL_GPL(intel_scu_devices_create);
/* Called by IPC driver */
void intel_scu_devices_destroy(void)
{
int i;
intel_scu_notifier_post(SCU_DOWN, NULL);
for (i = 0; i < ipc_next_dev; i++)
platform_device_del(ipc_devs[i]);
}
EXPORT_SYMBOL_GPL(intel_scu_devices_destroy);
static void __init install_irq_resource(struct platform_device *pdev, int irq)
{
/* Single threaded */
static struct resource __initdata res = {
.name = "IRQ",
.flags = IORESOURCE_IRQ,
};
res.start = irq;
platform_device_add_resources(pdev, &res, 1);
}
static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *entry)
{
const struct devs_id *dev = device_ids;
struct platform_device *pdev;
void *pdata = NULL;
while (dev->name[0]) {
if (dev->type == SFI_DEV_TYPE_IPC &&
!strncmp(dev->name, entry->name, SFI_NAME_LEN)) {
pdata = dev->get_platform_data(entry);
break;
}
dev++;
}
/*
* On Medfield the platform device creation is handled by the MSIC
* MFD driver so we don't need to do it here.
*/
if (mrst_has_msic())
return;
pdev = platform_device_alloc(entry->name, 0);
if (pdev == NULL) {
pr_err("out of memory for SFI platform device '%s'.\n",
entry->name);
return;
}
install_irq_resource(pdev, entry->irq);
pdev->dev.platform_data = pdata;
intel_scu_device_register(pdev);
}
static void __init sfi_handle_spi_dev(struct spi_board_info *spi_info)
{
const struct devs_id *dev = device_ids;
void *pdata = NULL;
while (dev->name[0]) {
if (dev->type == SFI_DEV_TYPE_SPI &&
!strncmp(dev->name, spi_info->modalias, SFI_NAME_LEN)) {
pdata = dev->get_platform_data(spi_info);
break;
}
dev++;
}
spi_info->platform_data = pdata;
if (dev->delay)
intel_scu_spi_device_register(spi_info);
else
spi_register_board_info(spi_info, 1);
}
static void __init sfi_handle_i2c_dev(int bus, struct i2c_board_info *i2c_info)
{
const struct devs_id *dev = device_ids;
void *pdata = NULL;
while (dev->name[0]) {
if (dev->type == SFI_DEV_TYPE_I2C &&
!strncmp(dev->name, i2c_info->type, SFI_NAME_LEN)) {
pdata = dev->get_platform_data(i2c_info);
break;
}
dev++;
}
i2c_info->platform_data = pdata;
if (dev->delay)
intel_scu_i2c_device_register(bus, i2c_info);
else
i2c_register_board_info(bus, i2c_info, 1);
}
static int __init sfi_parse_devs(struct sfi_table_header *table)
{
struct sfi_table_simple *sb;
struct sfi_device_table_entry *pentry;
struct spi_board_info spi_info;
struct i2c_board_info i2c_info;
int num, i, bus;
int ioapic;
struct io_apic_irq_attr irq_attr;
sb = (struct sfi_table_simple *)table;
num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
pentry = (struct sfi_device_table_entry *)sb->pentry;
for (i = 0; i < num; i++, pentry++) {
int irq = pentry->irq;
if (irq != (u8)0xff) { /* native RTE case */
/* these SPI2 devices are not exposed to system as PCI
* devices, but they have separate RTE entry in IOAPIC
* so we have to enable them one by one here
*/
ioapic = mp_find_ioapic(irq);
irq_attr.ioapic = ioapic;
irq_attr.ioapic_pin = irq;
irq_attr.trigger = 1;
irq_attr.polarity = 1;
io_apic_set_pci_routing(NULL, irq, &irq_attr);
} else
irq = 0; /* No irq */
switch (pentry->type) {
case SFI_DEV_TYPE_IPC:
pr_debug("info[%2d]: IPC bus, name = %16.16s, "
"irq = 0x%2x\n", i, pentry->name, pentry->irq);
sfi_handle_ipc_dev(pentry);
break;
case SFI_DEV_TYPE_SPI:
memset(&spi_info, 0, sizeof(spi_info));
strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
spi_info.irq = irq;
spi_info.bus_num = pentry->host_num;
spi_info.chip_select = pentry->addr;
spi_info.max_speed_hz = pentry->max_freq;
pr_debug("info[%2d]: SPI bus = %d, name = %16.16s, "
"irq = 0x%2x, max_freq = %d, cs = %d\n", i,
spi_info.bus_num,
spi_info.modalias,
spi_info.irq,
spi_info.max_speed_hz,
spi_info.chip_select);
sfi_handle_spi_dev(&spi_info);
break;
case SFI_DEV_TYPE_I2C:
memset(&i2c_info, 0, sizeof(i2c_info));
bus = pentry->host_num;
strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
i2c_info.irq = irq;
i2c_info.addr = pentry->addr;
pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
"irq = 0x%2x, addr = 0x%x\n", i, bus,
i2c_info.type,
i2c_info.irq,
i2c_info.addr);
sfi_handle_i2c_dev(bus, &i2c_info);
break;
case SFI_DEV_TYPE_UART:
case SFI_DEV_TYPE_HSI:
default:
;
}
}
return 0;
}
static int __init mrst_platform_init(void)
{
sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, sfi_parse_gpio);
sfi_table_parse(SFI_SIG_DEVS, NULL, NULL, sfi_parse_devs);
return 0;
}
arch_initcall(mrst_platform_init);
/*
* we will search these buttons in SFI GPIO table (by name)
* and register them dynamically. Please add all possible
* buttons here, we will shrink them if no GPIO found.
*/
static struct gpio_keys_button gpio_button[] = {
{KEY_POWER, -1, 1, "power_btn", EV_KEY, 0, 3000},
{KEY_PROG1, -1, 1, "prog_btn1", EV_KEY, 0, 20},
{KEY_PROG2, -1, 1, "prog_btn2", EV_KEY, 0, 20},
{SW_LID, -1, 1, "lid_switch", EV_SW, 0, 20},
{KEY_VOLUMEUP, -1, 1, "vol_up", EV_KEY, 0, 20},
{KEY_VOLUMEDOWN, -1, 1, "vol_down", EV_KEY, 0, 20},
{KEY_CAMERA, -1, 1, "camera_full", EV_KEY, 0, 20},
{KEY_CAMERA_FOCUS, -1, 1, "camera_half", EV_KEY, 0, 20},
{SW_KEYPAD_SLIDE, -1, 1, "MagSw1", EV_SW, 0, 20},
{SW_KEYPAD_SLIDE, -1, 1, "MagSw2", EV_SW, 0, 20},
};
static struct gpio_keys_platform_data mrst_gpio_keys = {
.buttons = gpio_button,
.rep = 1,
.nbuttons = -1, /* will fill it after search */
};
static struct platform_device pb_device = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &mrst_gpio_keys,
},
};
/*
* Shrink the non-existent buttons, register the gpio button
* device if there is some
*/
static int __init pb_keys_init(void)
{
struct gpio_keys_button *gb = gpio_button;
int i, num, good = 0;
num = sizeof(gpio_button) / sizeof(struct gpio_keys_button);
for (i = 0; i < num; i++) {
gb[i].gpio = get_gpio_by_name(gb[i].desc);
pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc, gb[i].gpio);
if (gb[i].gpio == -1)
continue;
if (i != good)
gb[good] = gb[i];
good++;
}
if (good) {
mrst_gpio_keys.nbuttons = good;
return platform_device_register(&pb_device);
}
return 0;
}
late_initcall(pb_keys_init);
| gpl-2.0 |
santod/nuk3rn3l_htc_msm8960-revamped | arch/mips/netlogic/xlp/wakeup.c | 4677 | 3377 | /*
* Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
* reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the NetLogic
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/threads.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/mipsregs.h>
#include <asm/addrspace.h>
#include <asm/string.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/mips-extns.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/pic.h>
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
static void xlp_enable_secondary_cores(void)
{
uint32_t core, value, coremask, syscoremask;
int count;
/* read cores in reset from SYS block */
syscoremask = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
/* update user specified */
nlm_coremask = nlm_coremask & (syscoremask | 1);
for (core = 1; core < 8; core++) {
coremask = 1 << core;
if ((nlm_coremask & coremask) == 0)
continue;
/* Enable CPU clock */
value = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL);
value &= ~coremask;
nlm_write_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL, value);
/* Remove CPU Reset */
value = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
value &= ~coremask;
nlm_write_sys_reg(nlm_sys_base, SYS_CPU_RESET, value);
/* Poll for CPU to mark itself coherent */
count = 100000;
do {
value = nlm_read_sys_reg(nlm_sys_base,
SYS_CPU_NONCOHERENT_MODE);
} while ((value & coremask) != 0 && count-- > 0);
if (count == 0)
pr_err("Failed to enable core %d\n", core);
}
}
void xlp_wakeup_secondary_cpus(void)
{
/*
* In case of u-boot, the secondaries are in reset
* first wakeup core 0 threads
*/
xlp_boot_core0_siblings();
/* now get other cores out of reset */
xlp_enable_secondary_cores();
}
| gpl-2.0 |
Lolzen/kernel_mako | arch/blackfin/mach-common/ints-priority.c | 4933 | 29656 | /*
* Set up the interrupt priorities
*
* Copyright 2004-2009 Analog Devices Inc.
* 2003 Bas Vermeulen <bas@buyways.nl>
* 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
* 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
* 1999 D. Jeff Dionne <jeff@uclinux.org>
* 1996 Roman Zippel
*
* Licensed under the GPL-2
*/
#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/sched.h>
#ifdef CONFIG_IPIPE
#include <linux/ipipe.h>
#endif
#include <asm/traps.h>
#include <asm/blackfin.h>
#include <asm/gpio.h>
#include <asm/irq_handler.h>
#include <asm/dpmc.h>
#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
/*
* NOTES:
* - we have separated the physical Hardware interrupt from the
* levels that the LINUX kernel sees (see the description in irq.h)
* -
*/
#ifndef CONFIG_SMP
/* Initialize this to an actual value to force it into the .data
* section so that we know it is properly initialized at entry into
* the kernel but before bss is initialized to zero (which is where
* it would live otherwise). The 0x1f magic represents the IRQs we
* cannot actually mask out in hardware.
*/
unsigned long bfin_irq_flags = 0x1f;
EXPORT_SYMBOL(bfin_irq_flags);
#endif
#ifdef CONFIG_PM
unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
unsigned vr_wakeup;
#endif
static struct ivgx {
/* irq number for request_irq, available in mach-bf5xx/irq.h */
unsigned int irqno;
/* corresponding bit in the SIC_ISR register */
unsigned int isrflag;
} ivg_table[NR_PERI_INTS];
static struct ivg_slice {
/* position of first irq in ivg_table for given ivg */
struct ivgx *ifirst;
struct ivgx *istop;
} ivg7_13[IVG13 - IVG7 + 1];
/*
* Search SIC_IAR and fill tables with the irqvalues
* and their positions in the SIC_ISR register.
*/
static void __init search_IAR(void)
{
unsigned ivg, irq_pos = 0;
for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
int irqN;
ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
int irqn;
u32 iar = bfin_read32((unsigned long *)SIC_IAR0 +
#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
defined(CONFIG_BF538) || defined(CONFIG_BF539)
((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
#else
(irqN >> 3)
#endif
);
for (irqn = irqN; irqn < irqN + 4; ++irqn) {
int iar_shift = (irqn & 7) * 4;
if (ivg == (0xf & (iar >> iar_shift))) {
ivg_table[irq_pos].irqno = IVG7 + irqn;
ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
ivg7_13[ivg].istop++;
irq_pos++;
}
}
}
}
}
/*
* This is for core internal IRQs
*/
void bfin_ack_noop(struct irq_data *d)
{
/* Dummy function. */
}
static void bfin_core_mask_irq(struct irq_data *d)
{
bfin_irq_flags &= ~(1 << d->irq);
if (!hard_irqs_disabled())
hard_local_irq_enable();
}
static void bfin_core_unmask_irq(struct irq_data *d)
{
bfin_irq_flags |= 1 << d->irq;
/*
* If interrupts are enabled, IMASK must contain the same value
* as bfin_irq_flags. Make sure that invariant holds. If interrupts
* are currently disabled we need not do anything; one of the
* callers will take care of setting IMASK to the proper value
* when reenabling interrupts.
* local_irq_enable just does "STI bfin_irq_flags", so it's exactly
* what we need.
*/
if (!hard_irqs_disabled())
hard_local_irq_enable();
return;
}
void bfin_internal_mask_irq(unsigned int irq)
{
unsigned long flags = hard_local_irq_save();
#ifdef SIC_IMASK0
unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
~(1 << mask_bit));
# ifdef CONFIG_SMP
bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
~(1 << mask_bit));
# endif
#else
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
~(1 << SIC_SYSIRQ(irq)));
#endif
hard_local_irq_restore(flags);
}
static void bfin_internal_mask_irq_chip(struct irq_data *d)
{
bfin_internal_mask_irq(d->irq);
}
#ifdef CONFIG_SMP
static void bfin_internal_unmask_irq_affinity(unsigned int irq,
const struct cpumask *affinity)
#else
void bfin_internal_unmask_irq(unsigned int irq)
#endif
{
unsigned long flags = hard_local_irq_save();
#ifdef SIC_IMASK0
unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
# ifdef CONFIG_SMP
if (cpumask_test_cpu(0, affinity))
# endif
bfin_write_SIC_IMASK(mask_bank,
bfin_read_SIC_IMASK(mask_bank) |
(1 << mask_bit));
# ifdef CONFIG_SMP
if (cpumask_test_cpu(1, affinity))
bfin_write_SICB_IMASK(mask_bank,
bfin_read_SICB_IMASK(mask_bank) |
(1 << mask_bit));
# endif
#else
bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
(1 << SIC_SYSIRQ(irq)));
#endif
hard_local_irq_restore(flags);
}
#ifdef CONFIG_SMP
static void bfin_internal_unmask_irq_chip(struct irq_data *d)
{
bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
}
static int bfin_internal_set_affinity(struct irq_data *d,
const struct cpumask *mask, bool force)
{
bfin_internal_mask_irq(d->irq);
bfin_internal_unmask_irq_affinity(d->irq, mask);
return 0;
}
#else
static void bfin_internal_unmask_irq_chip(struct irq_data *d)
{
bfin_internal_unmask_irq(d->irq);
}
#endif
#ifdef CONFIG_PM
int bfin_internal_set_wake(unsigned int irq, unsigned int state)
{
u32 bank, bit, wakeup = 0;
unsigned long flags;
bank = SIC_SYSIRQ(irq) / 32;
bit = SIC_SYSIRQ(irq) % 32;
switch (irq) {
#ifdef IRQ_RTC
case IRQ_RTC:
wakeup |= WAKE;
break;
#endif
#ifdef IRQ_CAN0_RX
case IRQ_CAN0_RX:
wakeup |= CANWE;
break;
#endif
#ifdef IRQ_CAN1_RX
case IRQ_CAN1_RX:
wakeup |= CANWE;
break;
#endif
#ifdef IRQ_USB_INT0
case IRQ_USB_INT0:
wakeup |= USBWE;
break;
#endif
#ifdef CONFIG_BF54x
case IRQ_CNT:
wakeup |= ROTWE;
break;
#endif
default:
break;
}
flags = hard_local_irq_save();
if (state) {
bfin_sic_iwr[bank] |= (1 << bit);
vr_wakeup |= wakeup;
} else {
bfin_sic_iwr[bank] &= ~(1 << bit);
vr_wakeup &= ~wakeup;
}
hard_local_irq_restore(flags);
return 0;
}
static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
{
return bfin_internal_set_wake(d->irq, state);
}
#else
# define bfin_internal_set_wake_chip NULL
#endif
static struct irq_chip bfin_core_irqchip = {
.name = "CORE",
.irq_ack = bfin_ack_noop,
.irq_mask = bfin_core_mask_irq,
.irq_unmask = bfin_core_unmask_irq,
};
static struct irq_chip bfin_internal_irqchip = {
.name = "INTN",
.irq_ack = bfin_ack_noop,
.irq_mask = bfin_internal_mask_irq_chip,
.irq_unmask = bfin_internal_unmask_irq_chip,
.irq_mask_ack = bfin_internal_mask_irq_chip,
.irq_disable = bfin_internal_mask_irq_chip,
.irq_enable = bfin_internal_unmask_irq_chip,
#ifdef CONFIG_SMP
.irq_set_affinity = bfin_internal_set_affinity,
#endif
.irq_set_wake = bfin_internal_set_wake_chip,
};
void bfin_handle_irq(unsigned irq)
{
#ifdef CONFIG_IPIPE
struct pt_regs regs; /* Contents not used. */
ipipe_trace_irq_entry(irq);
__ipipe_handle_irq(irq, ®s);
ipipe_trace_irq_exit(irq);
#else /* !CONFIG_IPIPE */
generic_handle_irq(irq);
#endif /* !CONFIG_IPIPE */
}
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
static int mac_stat_int_mask;
static void bfin_mac_status_ack_irq(unsigned int irq)
{
switch (irq) {
case IRQ_MAC_MMCINT:
bfin_write_EMAC_MMC_TIRQS(
bfin_read_EMAC_MMC_TIRQE() &
bfin_read_EMAC_MMC_TIRQS());
bfin_write_EMAC_MMC_RIRQS(
bfin_read_EMAC_MMC_RIRQE() &
bfin_read_EMAC_MMC_RIRQS());
break;
case IRQ_MAC_RXFSINT:
bfin_write_EMAC_RX_STKY(
bfin_read_EMAC_RX_IRQE() &
bfin_read_EMAC_RX_STKY());
break;
case IRQ_MAC_TXFSINT:
bfin_write_EMAC_TX_STKY(
bfin_read_EMAC_TX_IRQE() &
bfin_read_EMAC_TX_STKY());
break;
case IRQ_MAC_WAKEDET:
bfin_write_EMAC_WKUP_CTL(
bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
break;
default:
/* These bits are W1C */
bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
break;
}
}
static void bfin_mac_status_mask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
#ifdef BF537_FAMILY
switch (irq) {
case IRQ_MAC_PHYINT:
bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
break;
default:
break;
}
#else
if (!mac_stat_int_mask)
bfin_internal_mask_irq(IRQ_MAC_ERROR);
#endif
bfin_mac_status_ack_irq(irq);
}
static void bfin_mac_status_unmask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
#ifdef BF537_FAMILY
switch (irq) {
case IRQ_MAC_PHYINT:
bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
break;
default:
break;
}
#else
if (!mac_stat_int_mask)
bfin_internal_unmask_irq(IRQ_MAC_ERROR);
#endif
mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
}
#ifdef CONFIG_PM
int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
{
#ifdef BF537_FAMILY
return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
#else
return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
#endif
}
#else
# define bfin_mac_status_set_wake NULL
#endif
static struct irq_chip bfin_mac_status_irqchip = {
.name = "MACST",
.irq_ack = bfin_ack_noop,
.irq_mask_ack = bfin_mac_status_mask_irq,
.irq_mask = bfin_mac_status_mask_irq,
.irq_unmask = bfin_mac_status_unmask_irq,
.irq_set_wake = bfin_mac_status_set_wake,
};
void bfin_demux_mac_status_irq(unsigned int int_err_irq,
struct irq_desc *inta_desc)
{
int i, irq = 0;
u32 status = bfin_read_EMAC_SYSTAT();
for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
if (status & (1L << i)) {
irq = IRQ_MAC_PHYINT + i;
break;
}
if (irq) {
if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
bfin_handle_irq(irq);
} else {
bfin_mac_status_ack_irq(irq);
pr_debug("IRQ %d:"
" MASKED MAC ERROR INTERRUPT ASSERTED\n",
irq);
}
} else
printk(KERN_ERR
"%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
" INTERRUPT ASSERTED BUT NO SOURCE FOUND"
"(EMAC_SYSTAT=0x%X)\n",
__func__, __FILE__, __LINE__, status);
}
#endif
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
{
#ifdef CONFIG_IPIPE
handle = handle_level_irq;
#endif
__irq_set_handler_locked(irq, handle);
}
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
extern void bfin_gpio_irq_prepare(unsigned gpio);
#if !BFIN_GPIO_PINT
static void bfin_gpio_ack_irq(struct irq_data *d)
{
/* AFAIK ack_irq in case mask_ack is provided
* get's only called for edge sense irqs
*/
set_gpio_data(irq_to_gpio(d->irq), 0);
}
static void bfin_gpio_mask_ack_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
u32 gpionr = irq_to_gpio(irq);
if (!irqd_is_level_type(d))
set_gpio_data(gpionr, 0);
set_gpio_maska(gpionr, 0);
}
static void bfin_gpio_mask_irq(struct irq_data *d)
{
set_gpio_maska(irq_to_gpio(d->irq), 0);
}
static void bfin_gpio_unmask_irq(struct irq_data *d)
{
set_gpio_maska(irq_to_gpio(d->irq), 1);
}
static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
{
u32 gpionr = irq_to_gpio(d->irq);
if (__test_and_set_bit(gpionr, gpio_enabled))
bfin_gpio_irq_prepare(gpionr);
bfin_gpio_unmask_irq(d);
return 0;
}
static void bfin_gpio_irq_shutdown(struct irq_data *d)
{
u32 gpionr = irq_to_gpio(d->irq);
bfin_gpio_mask_irq(d);
__clear_bit(gpionr, gpio_enabled);
bfin_gpio_irq_free(gpionr);
}
static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
{
unsigned int irq = d->irq;
int ret;
char buf[16];
u32 gpionr = irq_to_gpio(irq);
if (type == IRQ_TYPE_PROBE) {
/* only probe unenabled GPIO interrupt lines */
if (test_bit(gpionr, gpio_enabled))
return 0;
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
snprintf(buf, 16, "gpio-irq%d", irq);
ret = bfin_gpio_irq_request(gpionr, buf);
if (ret)
return ret;
if (__test_and_set_bit(gpionr, gpio_enabled))
bfin_gpio_irq_prepare(gpionr);
} else {
__clear_bit(gpionr, gpio_enabled);
return 0;
}
set_gpio_inen(gpionr, 0);
set_gpio_dir(gpionr, 0);
if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
== (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
set_gpio_both(gpionr, 1);
else
set_gpio_both(gpionr, 0);
if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
else
set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
set_gpio_edge(gpionr, 1);
set_gpio_inen(gpionr, 1);
set_gpio_data(gpionr, 0);
} else {
set_gpio_edge(gpionr, 0);
set_gpio_inen(gpionr, 1);
}
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
bfin_set_irq_handler(irq, handle_edge_irq);
else
bfin_set_irq_handler(irq, handle_level_irq);
return 0;
}
#ifdef CONFIG_PM
static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
{
return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
}
#else
# define bfin_gpio_set_wake NULL
#endif
static void bfin_demux_gpio_block(unsigned int irq)
{
unsigned int gpio, mask;
gpio = irq_to_gpio(irq);
mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
while (mask) {
if (mask & 1)
bfin_handle_irq(irq);
irq++;
mask >>= 1;
}
}
void bfin_demux_gpio_irq(unsigned int inta_irq,
struct irq_desc *desc)
{
unsigned int irq;
switch (inta_irq) {
#if defined(BF537_FAMILY)
case IRQ_PF_INTA_PG_INTA:
bfin_demux_gpio_block(IRQ_PF0);
irq = IRQ_PG0;
break;
case IRQ_PH_INTA_MAC_RX:
irq = IRQ_PH0;
break;
#elif defined(BF533_FAMILY)
case IRQ_PROG_INTA:
irq = IRQ_PF0;
break;
#elif defined(BF538_FAMILY)
case IRQ_PORTF_INTA:
irq = IRQ_PF0;
break;
#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
case IRQ_PORTF_INTA:
irq = IRQ_PF0;
break;
case IRQ_PORTG_INTA:
irq = IRQ_PG0;
break;
case IRQ_PORTH_INTA:
irq = IRQ_PH0;
break;
#elif defined(CONFIG_BF561)
case IRQ_PROG0_INTA:
irq = IRQ_PF0;
break;
case IRQ_PROG1_INTA:
irq = IRQ_PF16;
break;
case IRQ_PROG2_INTA:
irq = IRQ_PF32;
break;
#endif
default:
BUG();
return;
}
bfin_demux_gpio_block(irq);
}
#else
#define NR_PINT_SYS_IRQS 4
#define NR_PINT_BITS 32
#define NR_PINTS 160
#define IRQ_NOT_AVAIL 0xFF
#define PINT_2_BANK(x) ((x) >> 5)
#define PINT_2_BIT(x) ((x) & 0x1F)
#define PINT_BIT(x) (1 << (PINT_2_BIT(x)))
static unsigned char irq2pint_lut[NR_PINTS];
static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
(struct bfin_pint_regs *)PINT0_MASK_SET,
(struct bfin_pint_regs *)PINT1_MASK_SET,
(struct bfin_pint_regs *)PINT2_MASK_SET,
(struct bfin_pint_regs *)PINT3_MASK_SET,
};
inline unsigned int get_irq_base(u32 bank, u8 bmap)
{
unsigned int irq_base;
if (bank < 2) { /*PA-PB */
irq_base = IRQ_PA0 + bmap * 16;
} else { /*PC-PJ */
irq_base = IRQ_PC0 + bmap * 16;
}
return irq_base;
}
/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
void init_pint_lut(void)
{
u16 bank, bit, irq_base, bit_pos;
u32 pint_assign;
u8 bmap;
memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
pint_assign = pint[bank]->assign;
for (bit = 0; bit < NR_PINT_BITS; bit++) {
bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
irq_base = get_irq_base(bank, bmap);
irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
bit_pos = bit + bank * NR_PINT_BITS;
pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
}
}
}
static void bfin_gpio_ack_irq(struct irq_data *d)
{
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
if (pint[bank]->invert_set & pintbit)
pint[bank]->invert_clear = pintbit;
else
pint[bank]->invert_set = pintbit;
}
pint[bank]->request = pintbit;
}
static void bfin_gpio_mask_ack_irq(struct irq_data *d)
{
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
if (pint[bank]->invert_set & pintbit)
pint[bank]->invert_clear = pintbit;
else
pint[bank]->invert_set = pintbit;
}
pint[bank]->request = pintbit;
pint[bank]->mask_clear = pintbit;
}
static void bfin_gpio_mask_irq(struct irq_data *d)
{
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
}
static void bfin_gpio_unmask_irq(struct irq_data *d)
{
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
pint[bank]->mask_set = pintbit;
}
static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
{
unsigned int irq = d->irq;
u32 gpionr = irq_to_gpio(irq);
u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
if (pint_val == IRQ_NOT_AVAIL) {
printk(KERN_ERR
"GPIO IRQ %d :Not in PINT Assign table "
"Reconfigure Interrupt to Port Assignemt\n", irq);
return -ENODEV;
}
if (__test_and_set_bit(gpionr, gpio_enabled))
bfin_gpio_irq_prepare(gpionr);
bfin_gpio_unmask_irq(d);
return 0;
}
static void bfin_gpio_irq_shutdown(struct irq_data *d)
{
u32 gpionr = irq_to_gpio(d->irq);
bfin_gpio_mask_irq(d);
__clear_bit(gpionr, gpio_enabled);
bfin_gpio_irq_free(gpionr);
}
static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
{
unsigned int irq = d->irq;
int ret;
char buf[16];
u32 gpionr = irq_to_gpio(irq);
u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
u32 pintbit = PINT_BIT(pint_val);
u32 bank = PINT_2_BANK(pint_val);
if (pint_val == IRQ_NOT_AVAIL)
return -ENODEV;
if (type == IRQ_TYPE_PROBE) {
/* only probe unenabled GPIO interrupt lines */
if (test_bit(gpionr, gpio_enabled))
return 0;
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
snprintf(buf, 16, "gpio-irq%d", irq);
ret = bfin_gpio_irq_request(gpionr, buf);
if (ret)
return ret;
if (__test_and_set_bit(gpionr, gpio_enabled))
bfin_gpio_irq_prepare(gpionr);
} else {
__clear_bit(gpionr, gpio_enabled);
return 0;
}
if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
pint[bank]->invert_set = pintbit; /* low or falling edge denoted by one */
else
pint[bank]->invert_clear = pintbit; /* high or rising edge denoted by zero */
if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
== (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
if (gpio_get_value(gpionr))
pint[bank]->invert_set = pintbit;
else
pint[bank]->invert_clear = pintbit;
}
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
pint[bank]->edge_set = pintbit;
bfin_set_irq_handler(irq, handle_edge_irq);
} else {
pint[bank]->edge_clear = pintbit;
bfin_set_irq_handler(irq, handle_level_irq);
}
return 0;
}
#ifdef CONFIG_PM
static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
{
u32 pint_irq;
u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
u32 bank = PINT_2_BANK(pint_val);
switch (bank) {
case 0:
pint_irq = IRQ_PINT0;
break;
case 2:
pint_irq = IRQ_PINT2;
break;
case 3:
pint_irq = IRQ_PINT3;
break;
case 1:
pint_irq = IRQ_PINT1;
break;
default:
return -EINVAL;
}
bfin_internal_set_wake(pint_irq, state);
return 0;
}
#else
# define bfin_gpio_set_wake NULL
#endif
void bfin_demux_gpio_irq(unsigned int inta_irq,
struct irq_desc *desc)
{
u32 bank, pint_val;
u32 request, irq;
switch (inta_irq) {
case IRQ_PINT0:
bank = 0;
break;
case IRQ_PINT2:
bank = 2;
break;
case IRQ_PINT3:
bank = 3;
break;
case IRQ_PINT1:
bank = 1;
break;
default:
return;
}
pint_val = bank * NR_PINT_BITS;
request = pint[bank]->request;
while (request) {
if (request & 1) {
irq = pint2irq_lut[pint_val] + SYS_IRQS;
bfin_handle_irq(irq);
}
pint_val++;
request >>= 1;
}
}
#endif
static struct irq_chip bfin_gpio_irqchip = {
.name = "GPIO",
.irq_ack = bfin_gpio_ack_irq,
.irq_mask = bfin_gpio_mask_irq,
.irq_mask_ack = bfin_gpio_mask_ack_irq,
.irq_unmask = bfin_gpio_unmask_irq,
.irq_disable = bfin_gpio_mask_irq,
.irq_enable = bfin_gpio_unmask_irq,
.irq_set_type = bfin_gpio_irq_type,
.irq_startup = bfin_gpio_irq_startup,
.irq_shutdown = bfin_gpio_irq_shutdown,
.irq_set_wake = bfin_gpio_set_wake,
};
void __cpuinit init_exception_vectors(void)
{
/* cannot program in software:
* evt0 - emulation (jtag)
* evt1 - reset
*/
bfin_write_EVT2(evt_nmi);
bfin_write_EVT3(trap);
bfin_write_EVT5(evt_ivhw);
bfin_write_EVT6(evt_timer);
bfin_write_EVT7(evt_evt7);
bfin_write_EVT8(evt_evt8);
bfin_write_EVT9(evt_evt9);
bfin_write_EVT10(evt_evt10);
bfin_write_EVT11(evt_evt11);
bfin_write_EVT12(evt_evt12);
bfin_write_EVT13(evt_evt13);
bfin_write_EVT14(evt_evt14);
bfin_write_EVT15(evt_system_call);
CSYNC();
}
/*
* This function should be called during kernel startup to initialize
* the BFin IRQ handling routines.
*/
int __init init_arch_irq(void)
{
int irq;
unsigned long ilat = 0;
/* Disable all the peripheral intrs - page 4-29 HW Ref manual */
#ifdef SIC_IMASK0
bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
# ifdef SIC_IMASK2
bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
# endif
# ifdef CONFIG_SMP
bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
# endif
#else
bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
#endif
local_irq_disable();
#if BFIN_GPIO_PINT
# ifdef CONFIG_PINTx_REASSIGN
pint[0]->assign = CONFIG_PINT0_ASSIGN;
pint[1]->assign = CONFIG_PINT1_ASSIGN;
pint[2]->assign = CONFIG_PINT2_ASSIGN;
pint[3]->assign = CONFIG_PINT3_ASSIGN;
# endif
/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
init_pint_lut();
#endif
for (irq = 0; irq <= SYS_IRQS; irq++) {
if (irq <= IRQ_CORETMR)
irq_set_chip(irq, &bfin_core_irqchip);
else
irq_set_chip(irq, &bfin_internal_irqchip);
switch (irq) {
#if BFIN_GPIO_PINT
case IRQ_PINT0:
case IRQ_PINT1:
case IRQ_PINT2:
case IRQ_PINT3:
#elif defined(BF537_FAMILY)
case IRQ_PH_INTA_MAC_RX:
case IRQ_PF_INTA_PG_INTA:
#elif defined(BF533_FAMILY)
case IRQ_PROG_INTA:
#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
case IRQ_PORTF_INTA:
case IRQ_PORTG_INTA:
case IRQ_PORTH_INTA:
#elif defined(CONFIG_BF561)
case IRQ_PROG0_INTA:
case IRQ_PROG1_INTA:
case IRQ_PROG2_INTA:
#elif defined(BF538_FAMILY)
case IRQ_PORTF_INTA:
#endif
irq_set_chained_handler(irq, bfin_demux_gpio_irq);
break;
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
case IRQ_MAC_ERROR:
irq_set_chained_handler(irq,
bfin_demux_mac_status_irq);
break;
#endif
#ifdef CONFIG_SMP
case IRQ_SUPPLE_0:
case IRQ_SUPPLE_1:
irq_set_handler(irq, handle_percpu_irq);
break;
#endif
#ifdef CONFIG_TICKSOURCE_CORETMR
case IRQ_CORETMR:
# ifdef CONFIG_SMP
irq_set_handler(irq, handle_percpu_irq);
# else
irq_set_handler(irq, handle_simple_irq);
# endif
break;
#endif
#ifdef CONFIG_TICKSOURCE_GPTMR0
case IRQ_TIMER0:
irq_set_handler(irq, handle_simple_irq);
break;
#endif
default:
#ifdef CONFIG_IPIPE
irq_set_handler(irq, handle_level_irq);
#else
irq_set_handler(irq, handle_simple_irq);
#endif
break;
}
}
init_mach_irq();
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
handle_level_irq);
#endif
/* if configured as edge, then will be changed to do_edge_IRQ */
for (irq = GPIO_IRQ_BASE;
irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
handle_level_irq);
bfin_write_IMASK(0);
CSYNC();
ilat = bfin_read_ILAT();
CSYNC();
bfin_write_ILAT(ilat);
CSYNC();
printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
* local_irq_enable()
*/
program_IAR();
/* Therefore it's better to setup IARs before interrupts enabled */
search_IAR();
/* Enable interrupts IVG7-15 */
bfin_irq_flags |= IMASK_IVG15 |
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
/* This implicitly covers ANOMALY_05000171
* Boot-ROM code modifies SICA_IWRx wakeup registers
*/
#ifdef SIC_IWR0
bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
# ifdef SIC_IWR1
/* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
* will screw up the bootrom as it relies on MDMA0/1 waking it
* up from IDLE instructions. See this report for more info:
* http://blackfin.uclinux.org/gf/tracker/4323
*/
if (ANOMALY_05000435)
bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
else
bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
# endif
# ifdef SIC_IWR2
bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
# endif
#else
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
return 0;
}
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
static int vec_to_irq(int vec)
{
struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
unsigned long sic_status[3];
if (likely(vec == EVT_IVTMR_P))
return IRQ_CORETMR;
#ifdef SIC_ISR
sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
#else
if (smp_processor_id()) {
# ifdef SICB_ISR0
/* This will be optimized out in UP mode. */
sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
# endif
} else {
sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
}
#endif
#ifdef SIC_ISR2
sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
#endif
for (;; ivg++) {
if (ivg >= ivg_stop)
return -1;
#ifdef SIC_ISR
if (sic_status[0] & ivg->isrflag)
#else
if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
#endif
return ivg->irqno;
}
}
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
void do_irq(int vec, struct pt_regs *fp)
{
int irq = vec_to_irq(vec);
if (irq == -1)
return;
asm_do_IRQ(irq, fp);
}
#ifdef CONFIG_IPIPE
int __ipipe_get_irq_priority(unsigned irq)
{
int ient, prio;
if (irq <= IRQ_CORETMR)
return irq;
for (ient = 0; ient < NR_PERI_INTS; ient++) {
struct ivgx *ivg = ivg_table + ient;
if (ivg->irqno == irq) {
for (prio = 0; prio <= IVG13-IVG7; prio++) {
if (ivg7_13[prio].ifirst <= ivg &&
ivg7_13[prio].istop > ivg)
return IVG7 + prio;
}
}
}
return IVG15;
}
/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
struct ipipe_domain *this_domain = __ipipe_current_domain;
struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
int irq, s = 0;
irq = vec_to_irq(vec);
if (irq == -1)
return 0;
if (irq == IRQ_SYSTMR) {
#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
#endif
/* This is basically what we need from the register frame. */
__raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
__raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
if (this_domain != ipipe_root_domain)
__raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
else
__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
}
/*
* We don't want Linux interrupt handlers to run at the
* current core priority level (i.e. < EVT15), since this
* might delay other interrupts handled by a high priority
* domain. Here is what we do instead:
*
* - we raise the SYNCDEFER bit to prevent
* __ipipe_handle_irq() to sync the pipeline for the root
* stage for the incoming interrupt. Upon return, that IRQ is
* pending in the interrupt log.
*
* - we raise the TIF_IRQ_SYNC bit for the current thread, so
* that _schedule_and_signal_from_int will eventually sync the
* pipeline from EVT15.
*/
if (this_domain == ipipe_root_domain) {
s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
barrier();
}
ipipe_trace_irq_entry(irq);
__ipipe_handle_irq(irq, regs);
ipipe_trace_irq_exit(irq);
if (user_mode(regs) &&
!ipipe_test_foreign_stack() &&
(current->ipipe_flags & PF_EVTRET) != 0) {
/*
* Testing for user_regs() does NOT fully eliminate
* foreign stack contexts, because of the forged
* interrupt returns we do through
* __ipipe_call_irqtail. In that case, we might have
* preempted a foreign stack context in a high
* priority domain, with a single interrupt level now
* pending after the irqtail unwinding is done. In
* which case user_mode() is now true, and the event
* gets dispatched spuriously.
*/
current->ipipe_flags &= ~PF_EVTRET;
__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
}
if (this_domain == ipipe_root_domain) {
set_thread_flag(TIF_IRQ_SYNC);
if (!s) {
__clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
return !test_bit(IPIPE_STALL_FLAG, &p->status);
}
}
return 0;
}
#endif /* CONFIG_IPIPE */
| gpl-2.0 |
Dzenik/kernel-source | arch/mips/pnx8550/common/reset.c | 8005 | 1126 | /*.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*
* Reset the PNX8550 board.
*
*/
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/reboot.h>
#include <glb.h>
void pnx8550_machine_restart(char *command)
{
PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
}
void pnx8550_machine_halt(void)
{
while (1) {
if (cpu_wait)
cpu_wait();
}
}
| gpl-2.0 |
Pesach85/PH85-KERNEL | drivers/staging/vt6656/datarate.c | 8517 | 12384 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: datarate.c
*
* Purpose: Handles the auto fallback & data rates functions
*
* Author: Lyndon Chen
*
* Date: July 17, 2002
*
* Functions:
* RATEvParseMaxRate - Parsing the highest basic & support rate in rate field of frame
* RATEvTxRateFallBack - Rate fallback Algorithm Implementaion
* RATEuSetIE- Set rate IE field.
*
* Revision History:
*
*/
#include "ttype.h"
#include "tmacro.h"
#include "mac.h"
#include "80211mgr.h"
#include "bssdb.h"
#include "datarate.h"
#include "card.h"
#include "baseband.h"
#include "srom.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel =MSG_LEVEL_INFO;
const BYTE acbyIERate[MAX_RATE] =
{0x02, 0x04, 0x0B, 0x16, 0x0C, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6C};
#define AUTORATE_TXOK_CNT 0x0400
#define AUTORATE_TXFAIL_CNT 0x0064
#define AUTORATE_TIMEOUT 10
/*--------------------- Static Functions --------------------------*/
void s_vResetCounter(PKnownNodeDB psNodeDBTable);
void s_vResetCounter(PKnownNodeDB psNodeDBTable)
{
BYTE ii;
// clear statistic counter for auto_rate
for (ii = 0; ii <= MAX_RATE; ii++) {
psNodeDBTable->uTxOk[ii] = 0;
psNodeDBTable->uTxFail[ii] = 0;
}
}
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*+
*
* Description:
* Get RateIdx from the value in SuppRates IE or ExtSuppRates IE
*
* Parameters:
* In:
* BYTE - Rate value in SuppRates IE or ExtSuppRates IE
* Out:
* none
*
* Return Value: RateIdx
*
-*/
BYTE
DATARATEbyGetRateIdx (
BYTE byRate
)
{
BYTE ii;
//Erase basicRate flag.
byRate = byRate & 0x7F;//0111 1111
for (ii = 0; ii < MAX_RATE; ii ++) {
if (acbyIERate[ii] == byRate)
return ii;
}
return 0;
}
/*+
*
* Routine Description:
* Rate fallback Algorithm Implementaion
*
* Parameters:
* In:
* pDevice - Pointer to the adapter
* psNodeDBTable - Pointer to Node Data Base
* Out:
* none
*
* Return Value: none
*
-*/
#define AUTORATE_TXCNT_THRESHOLD 20
#define AUTORATE_INC_THRESHOLD 30
/*+
*
* Description:
* Get RateIdx from the value in SuppRates IE or ExtSuppRates IE
*
* Parameters:
* In:
* BYTE - Rate value in SuppRates IE or ExtSuppRates IE
* Out:
* none
*
* Return Value: RateIdx
*
-*/
WORD
RATEwGetRateIdx(
BYTE byRate
)
{
WORD ii;
//Erase basicRate flag.
byRate = byRate & 0x7F;//0111 1111
for (ii = 0; ii < MAX_RATE; ii ++) {
if (acbyIERate[ii] == byRate)
return ii;
}
return 0;
}
/*+
*
* Description:
* Parsing the highest basic & support rate in rate field of frame.
*
* Parameters:
* In:
* pDevice - Pointer to the adapter
* pItemRates - Pointer to Rate field defined in 802.11 spec.
* pItemExtRates - Pointer to Extended Rate field defined in 802.11 spec.
* Out:
* pwMaxBasicRate - Maximum Basic Rate
* pwMaxSuppRate - Maximum Supported Rate
* pbyTopCCKRate - Maximum Basic Rate in CCK mode
* pbyTopOFDMRate - Maximum Basic Rate in OFDM mode
*
* Return Value: none
*
-*/
void RATEvParseMaxRate(
void *pDeviceHandler,
PWLAN_IE_SUPP_RATES pItemRates,
PWLAN_IE_SUPP_RATES pItemExtRates,
BOOL bUpdateBasicRate,
PWORD pwMaxBasicRate,
PWORD pwMaxSuppRate,
PWORD pwSuppRate,
PBYTE pbyTopCCKRate,
PBYTE pbyTopOFDMRate
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
unsigned int ii;
BYTE byHighSuppRate = 0;
BYTE byRate = 0;
WORD wOldBasicRate = pDevice->wBasicRate;
unsigned int uRateLen;
if (pItemRates == NULL)
return;
*pwSuppRate = 0;
uRateLen = pItemRates->len;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate Len: %d\n", uRateLen);
if (pDevice->byBBType != BB_TYPE_11B) {
if (uRateLen > WLAN_RATES_MAXLEN)
uRateLen = WLAN_RATES_MAXLEN;
} else {
if (uRateLen > WLAN_RATES_MAXLEN_11B)
uRateLen = WLAN_RATES_MAXLEN_11B;
}
for (ii = 0; ii < uRateLen; ii++) {
byRate = (BYTE)(pItemRates->abyRates[ii]);
if (WLAN_MGMT_IS_BASICRATE(byRate) &&
(bUpdateBasicRate == TRUE)) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate));
}
byRate = (BYTE)(pItemRates->abyRates[ii]&0x7F);
if (byHighSuppRate == 0)
byHighSuppRate = byRate;
if (byRate > byHighSuppRate)
byHighSuppRate = byRate;
*pwSuppRate |= (1<<RATEwGetRateIdx(byRate));
}
if ((pItemExtRates != NULL) && (pItemExtRates->byElementID == WLAN_EID_EXTSUPP_RATES) &&
(pDevice->byBBType != BB_TYPE_11B)) {
unsigned int uExtRateLen = pItemExtRates->len;
if (uExtRateLen > WLAN_RATES_MAXLEN)
uExtRateLen = WLAN_RATES_MAXLEN;
for (ii = 0; ii < uExtRateLen ; ii++) {
byRate = (BYTE)(pItemExtRates->abyRates[ii]);
// select highest basic rate
if (WLAN_MGMT_IS_BASICRATE(pItemExtRates->abyRates[ii])) {
// Add to basic rate set, update pDevice->byTopCCKBasicRate and pDevice->byTopOFDMBasicRate
CARDbAddBasicRate((void *)pDevice, RATEwGetRateIdx(byRate));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ParseMaxRate AddBasicRate: %d\n", RATEwGetRateIdx(byRate));
}
byRate = (BYTE)(pItemExtRates->abyRates[ii]&0x7F);
if (byHighSuppRate == 0)
byHighSuppRate = byRate;
if (byRate > byHighSuppRate)
byHighSuppRate = byRate;
*pwSuppRate |= (1<<RATEwGetRateIdx(byRate));
//DBG_PRN_GRP09(("ParseMaxRate : HighSuppRate: %d, %X\n", RATEwGetRateIdx(byRate), byRate));
}
} //if(pItemExtRates != NULL)
if ((pDevice->byPacketType == PK_TYPE_11GB)
&& CARDbIsOFDMinBasicRate((void *)pDevice)) {
pDevice->byPacketType = PK_TYPE_11GA;
}
*pbyTopCCKRate = pDevice->byTopCCKBasicRate;
*pbyTopOFDMRate = pDevice->byTopOFDMBasicRate;
*pwMaxSuppRate = RATEwGetRateIdx(byHighSuppRate);
if ((pDevice->byPacketType==PK_TYPE_11B) || (pDevice->byPacketType==PK_TYPE_11GB))
*pwMaxBasicRate = pDevice->byTopCCKBasicRate;
else
*pwMaxBasicRate = pDevice->byTopOFDMBasicRate;
if (wOldBasicRate != pDevice->wBasicRate)
CARDvSetRSPINF((void *)pDevice, pDevice->byBBType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Exit ParseMaxRate\n");
}
/*+
*
* Routine Description:
* Rate fallback Algorithm Implementaion
*
* Parameters:
* In:
* pDevice - Pointer to the adapter
* psNodeDBTable - Pointer to Node Data Base
* Out:
* none
*
* Return Value: none
*
-*/
#define AUTORATE_TXCNT_THRESHOLD 20
#define AUTORATE_INC_THRESHOLD 30
void
RATEvTxRateFallBack(
void *pDeviceHandler,
PKnownNodeDB psNodeDBTable
)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
WORD wIdxDownRate = 0;
unsigned int ii;
BOOL bAutoRate[MAX_RATE] = {TRUE,TRUE,TRUE,TRUE,FALSE,FALSE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE};
DWORD dwThroughputTbl[MAX_RATE] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
DWORD dwThroughput = 0;
WORD wIdxUpRate = 0;
DWORD dwTxDiff = 0;
if (pMgmt->eScanState != WMAC_NO_SCANNING) {
// Don't do Fallback when scanning Channel
return;
}
psNodeDBTable->uTimeCount ++;
if (psNodeDBTable->uTxFail[MAX_RATE] > psNodeDBTable->uTxOk[MAX_RATE])
dwTxDiff = psNodeDBTable->uTxFail[MAX_RATE] - psNodeDBTable->uTxOk[MAX_RATE];
if ((psNodeDBTable->uTxOk[MAX_RATE] < AUTORATE_TXOK_CNT) &&
(dwTxDiff < AUTORATE_TXFAIL_CNT) &&
(psNodeDBTable->uTimeCount < AUTORATE_TIMEOUT)) {
return;
}
if (psNodeDBTable->uTimeCount >= AUTORATE_TIMEOUT) {
psNodeDBTable->uTimeCount = 0;
}
for (ii = 0; ii < MAX_RATE; ii++) {
if (psNodeDBTable->wSuppRate & (0x0001<<ii)) {
if (bAutoRate[ii] == TRUE) {
wIdxUpRate = (WORD) ii;
}
} else {
bAutoRate[ii] = FALSE;
}
}
for (ii = 0; ii <= psNodeDBTable->wTxDataRate; ii++) {
if ( (psNodeDBTable->uTxOk[ii] != 0) ||
(psNodeDBTable->uTxFail[ii] != 0) ) {
dwThroughputTbl[ii] *= psNodeDBTable->uTxOk[ii];
if (ii < RATE_11M) {
psNodeDBTable->uTxFail[ii] *= 4;
}
dwThroughputTbl[ii] /= (psNodeDBTable->uTxOk[ii] + psNodeDBTable->uTxFail[ii]);
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate %d,Ok: %d, Fail:%d, Throughput:%d\n",
ii, (int)psNodeDBTable->uTxOk[ii], (int)psNodeDBTable->uTxFail[ii], (int)dwThroughputTbl[ii]);
}
dwThroughput = dwThroughputTbl[psNodeDBTable->wTxDataRate];
wIdxDownRate = psNodeDBTable->wTxDataRate;
for (ii = psNodeDBTable->wTxDataRate; ii > 0;) {
ii--;
if ( (dwThroughputTbl[ii] > dwThroughput) &&
(bAutoRate[ii]==TRUE) ) {
dwThroughput = dwThroughputTbl[ii];
wIdxDownRate = (WORD) ii;
}
}
psNodeDBTable->wTxDataRate = wIdxDownRate;
if (psNodeDBTable->uTxOk[MAX_RATE]) {
if (psNodeDBTable->uTxOk[MAX_RATE] >
(psNodeDBTable->uTxFail[MAX_RATE] * 4) ) {
psNodeDBTable->wTxDataRate = wIdxUpRate;
}
}else { // adhoc, if uTxOk(total) =0 & uTxFail(total) = 0
if (psNodeDBTable->uTxFail[MAX_RATE] == 0)
psNodeDBTable->wTxDataRate = wIdxUpRate;
}
if (pDevice->byBBType == BB_TYPE_11A) {
if (psNodeDBTable->wTxDataRate <= RATE_11M)
psNodeDBTable->wTxDataRate = RATE_6M;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"uTxOk[MAX_RATE] %d, uTxFail[MAX_RATE]:%d\n",(int)psNodeDBTable->uTxOk[MAX_RATE], (int)psNodeDBTable->uTxFail[MAX_RATE]);
s_vResetCounter(psNodeDBTable);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Rate: %d, U:%d, D:%d\n", (int)psNodeDBTable->wTxDataRate, (int)wIdxUpRate, (int)wIdxDownRate);
return;
}
/*+
*
* Description:
* This routine is used to assemble available Rate IE.
*
* Parameters:
* In:
* pDevice
* Out:
*
* Return Value: None
*
-*/
BYTE
RATEuSetIE (
PWLAN_IE_SUPP_RATES pSrcRates,
PWLAN_IE_SUPP_RATES pDstRates,
unsigned int uRateLen
)
{
unsigned int ii, uu, uRateCnt = 0;
if ((pSrcRates == NULL) || (pDstRates == NULL))
return 0;
if (pSrcRates->len == 0)
return 0;
for (ii = 0; ii < uRateLen; ii++) {
for (uu = 0; uu < pSrcRates->len; uu++) {
if ((pSrcRates->abyRates[uu] & 0x7F) == acbyIERate[ii]) {
pDstRates->abyRates[uRateCnt ++] = pSrcRates->abyRates[uu];
break;
}
}
}
return (BYTE)uRateCnt;
}
| gpl-2.0 |
shakalaca/ASUS_ZenFone_A500KL | kernel/drivers/leds/ledtrig-default-on.c | 9797 | 1071 | /*
* LED Kernel Default ON Trigger
*
* Copyright 2008 Nick Forbes <nick.forbes@incepta.com>
*
* Based on Richard Purdie's ledtrig-timer.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/leds.h>
#include "leds.h"
static void defon_trig_activate(struct led_classdev *led_cdev)
{
led_set_brightness(led_cdev, led_cdev->max_brightness);
}
static struct led_trigger defon_led_trigger = {
.name = "default-on",
.activate = defon_trig_activate,
};
static int __init defon_trig_init(void)
{
return led_trigger_register(&defon_led_trigger);
}
static void __exit defon_trig_exit(void)
{
led_trigger_unregister(&defon_led_trigger);
}
module_init(defon_trig_init);
module_exit(defon_trig_exit);
MODULE_AUTHOR("Nick Forbes <nick.forbes@incepta.com>");
MODULE_DESCRIPTION("Default-ON LED trigger");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MozOpenHard/linux-rockchip | drivers/ide/ide-cd_ioctl.c | 11845 | 11821 | /*
* cdrom.c IOCTLs handling for ide-cd driver.
*
* Copyright (C) 1994-1996 Scott Snyder <snyder@fnald0.fnal.gov>
* Copyright (C) 1996-1998 Erik Andersen <andersee@debian.org>
* Copyright (C) 1998-2000 Jens Axboe <axboe@suse.de>
*/
#include <linux/kernel.h>
#include <linux/cdrom.h>
#include <linux/gfp.h>
#include <linux/ide.h>
#include <scsi/scsi.h>
#include "ide-cd.h"
/****************************************************************************
* Other driver requests (open, close, check media change).
*/
int ide_cdrom_open_real(struct cdrom_device_info *cdi, int purpose)
{
return 0;
}
/*
* Close down the device. Invalidate all cached blocks.
*/
void ide_cdrom_release_real(struct cdrom_device_info *cdi)
{
ide_drive_t *drive = cdi->handle;
if (!cdi->use_count)
drive->atapi_flags &= ~IDE_AFLAG_TOC_VALID;
}
/*
* add logic to try GET_EVENT command first to check for media and tray
* status. this should be supported by newer cd-r/w and all DVD etc
* drives
*/
int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr)
{
ide_drive_t *drive = cdi->handle;
struct media_event_desc med;
struct request_sense sense;
int stat;
if (slot_nr != CDSL_CURRENT)
return -EINVAL;
stat = cdrom_check_status(drive, &sense);
if (!stat || sense.sense_key == UNIT_ATTENTION)
return CDS_DISC_OK;
if (!cdrom_get_media_event(cdi, &med)) {
if (med.media_present)
return CDS_DISC_OK;
else if (med.door_open)
return CDS_TRAY_OPEN;
else
return CDS_NO_DISC;
}
if (sense.sense_key == NOT_READY && sense.asc == 0x04
&& sense.ascq == 0x04)
return CDS_DISC_OK;
/*
* If not using Mt Fuji extended media tray reports,
* just return TRAY_OPEN since ATAPI doesn't provide
* any other way to detect this...
*/
if (sense.sense_key == NOT_READY) {
if (sense.asc == 0x3a && sense.ascq == 1)
return CDS_NO_DISC;
else
return CDS_TRAY_OPEN;
}
return CDS_DRIVE_NOT_READY;
}
/*
* ide-cd always generates media changed event if media is missing, which
* makes it impossible to use for proper event reporting, so disk->events
* is cleared to 0 and the following function is used only to trigger
* revalidation and never propagated to userland.
*/
unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi,
unsigned int clearing, int slot_nr)
{
ide_drive_t *drive = cdi->handle;
int retval;
if (slot_nr == CDSL_CURRENT) {
(void) cdrom_check_status(drive, NULL);
retval = (drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED) ? 1 : 0;
drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
return retval ? DISK_EVENT_MEDIA_CHANGE : 0;
} else {
return 0;
}
}
/* Eject the disk if EJECTFLAG is 0.
If EJECTFLAG is 1, try to reload the disk. */
static
int cdrom_eject(ide_drive_t *drive, int ejectflag,
struct request_sense *sense)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_device_info *cdi = &cd->devinfo;
char loej = 0x02;
unsigned char cmd[BLK_MAX_CDB];
if ((drive->atapi_flags & IDE_AFLAG_NO_EJECT) && !ejectflag)
return -EDRIVE_CANT_DO_THIS;
/* reload fails on some drives, if the tray is locked */
if ((drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) && ejectflag)
return 0;
/* only tell drive to close tray if open, if it can do that */
if (ejectflag && (cdi->mask & CDC_CLOSE_TRAY))
loej = 0;
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_START_STOP_UNIT;
cmd[4] = loej | (ejectflag != 0);
return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0);
}
/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
static
int ide_cd_lockdoor(ide_drive_t *drive, int lockflag,
struct request_sense *sense)
{
struct request_sense my_sense;
int stat;
if (sense == NULL)
sense = &my_sense;
/* If the drive cannot lock the door, just pretend. */
if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) {
stat = 0;
} else {
unsigned char cmd[BLK_MAX_CDB];
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
cmd[4] = lockflag ? 1 : 0;
stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL,
sense, 0, 0);
}
/* If we got an illegal field error, the drive
probably cannot lock the door. */
if (stat != 0 &&
sense->sense_key == ILLEGAL_REQUEST &&
(sense->asc == 0x24 || sense->asc == 0x20)) {
printk(KERN_ERR "%s: door locking not supported\n",
drive->name);
drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
stat = 0;
}
/* no medium, that's alright. */
if (stat != 0 && sense->sense_key == NOT_READY && sense->asc == 0x3a)
stat = 0;
if (stat == 0) {
if (lockflag)
drive->atapi_flags |= IDE_AFLAG_DOOR_LOCKED;
else
drive->atapi_flags &= ~IDE_AFLAG_DOOR_LOCKED;
}
return stat;
}
int ide_cdrom_tray_move(struct cdrom_device_info *cdi, int position)
{
ide_drive_t *drive = cdi->handle;
struct request_sense sense;
if (position) {
int stat = ide_cd_lockdoor(drive, 0, &sense);
if (stat)
return stat;
}
return cdrom_eject(drive, !position, &sense);
}
int ide_cdrom_lock_door(struct cdrom_device_info *cdi, int lock)
{
ide_drive_t *drive = cdi->handle;
return ide_cd_lockdoor(drive, lock, NULL);
}
/*
* ATAPI devices are free to select the speed you request or any slower
* rate. :-( Requesting too fast a speed will _not_ produce an error.
*/
int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
struct request_sense sense;
u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE];
int stat;
unsigned char cmd[BLK_MAX_CDB];
if (speed == 0)
speed = 0xffff; /* set to max */
else
speed *= 177; /* Nx to kbytes/s */
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_SET_SPEED;
/* Read Drive speed in kbytes/second MSB/LSB */
cmd[2] = (speed >> 8) & 0xff;
cmd[3] = speed & 0xff;
if ((cdi->mask & (CDC_CD_R | CDC_CD_RW | CDC_DVD_R)) !=
(CDC_CD_R | CDC_CD_RW | CDC_DVD_R)) {
/* Write Drive speed in kbytes/second MSB/LSB */
cmd[4] = (speed >> 8) & 0xff;
cmd[5] = speed & 0xff;
}
stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
if (!ide_cdrom_get_capabilities(drive, buf)) {
ide_cdrom_update_speed(drive, buf);
cdi->speed = cd->current_speed;
}
return 0;
}
int ide_cdrom_get_last_session(struct cdrom_device_info *cdi,
struct cdrom_multisession *ms_info)
{
struct atapi_toc *toc;
ide_drive_t *drive = cdi->handle;
struct cdrom_info *info = drive->driver_data;
struct request_sense sense;
int ret;
if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) {
ret = ide_cd_read_toc(drive, &sense);
if (ret)
return ret;
}
toc = info->toc;
ms_info->addr.lba = toc->last_session_lba;
ms_info->xa_flag = toc->xa_flag;
return 0;
}
int ide_cdrom_get_mcn(struct cdrom_device_info *cdi,
struct cdrom_mcn *mcn_info)
{
ide_drive_t *drive = cdi->handle;
int stat, mcnlen;
char buf[24];
unsigned char cmd[BLK_MAX_CDB];
unsigned len = sizeof(buf);
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_READ_SUBCHANNEL;
cmd[1] = 2; /* MSF addressing */
cmd[2] = 0x40; /* request subQ data */
cmd[3] = 2; /* format */
cmd[8] = len;
stat = ide_cd_queue_pc(drive, cmd, 0, buf, &len, NULL, 0, 0);
if (stat)
return stat;
mcnlen = sizeof(mcn_info->medium_catalog_number) - 1;
memcpy(mcn_info->medium_catalog_number, buf + 9, mcnlen);
mcn_info->medium_catalog_number[mcnlen] = '\0';
return 0;
}
int ide_cdrom_reset(struct cdrom_device_info *cdi)
{
ide_drive_t *drive = cdi->handle;
struct cdrom_info *cd = drive->driver_data;
struct request_sense sense;
struct request *rq;
int ret;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_flags = REQ_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
blk_put_request(rq);
/*
* A reset will unlock the door. If it was previously locked,
* lock it again.
*/
if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED)
(void)ide_cd_lockdoor(drive, 1, &sense);
return ret;
}
static int ide_cd_get_toc_entry(ide_drive_t *drive, int track,
struct atapi_toc_entry **ent)
{
struct cdrom_info *info = drive->driver_data;
struct atapi_toc *toc = info->toc;
int ntracks;
/*
* don't serve cached data, if the toc isn't valid
*/
if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0)
return -EINVAL;
/* Check validity of requested track number. */
ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
if (toc->hdr.first_track == CDROM_LEADOUT)
ntracks = 0;
if (track == CDROM_LEADOUT)
*ent = &toc->ent[ntracks];
else if (track < toc->hdr.first_track || track > toc->hdr.last_track)
return -EINVAL;
else
*ent = &toc->ent[track - toc->hdr.first_track];
return 0;
}
static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg)
{
struct cdrom_ti *ti = arg;
struct atapi_toc_entry *first_toc, *last_toc;
unsigned long lba_start, lba_end;
int stat;
struct request_sense sense;
unsigned char cmd[BLK_MAX_CDB];
stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
if (stat)
return stat;
stat = ide_cd_get_toc_entry(drive, ti->cdti_trk1, &last_toc);
if (stat)
return stat;
if (ti->cdti_trk1 != CDROM_LEADOUT)
++last_toc;
lba_start = first_toc->addr.lba;
lba_end = last_toc->addr.lba;
if (lba_end <= lba_start)
return -EINVAL;
memset(cmd, 0, BLK_MAX_CDB);
cmd[0] = GPCMD_PLAY_AUDIO_MSF;
lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]);
lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]);
return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0);
}
static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg)
{
struct cdrom_info *cd = drive->driver_data;
struct cdrom_tochdr *tochdr = arg;
struct atapi_toc *toc;
int stat;
/* Make sure our saved TOC is valid. */
stat = ide_cd_read_toc(drive, NULL);
if (stat)
return stat;
toc = cd->toc;
tochdr->cdth_trk0 = toc->hdr.first_track;
tochdr->cdth_trk1 = toc->hdr.last_track;
return 0;
}
static int ide_cd_read_tocentry(ide_drive_t *drive, void *arg)
{
struct cdrom_tocentry *tocentry = arg;
struct atapi_toc_entry *toce;
int stat;
stat = ide_cd_get_toc_entry(drive, tocentry->cdte_track, &toce);
if (stat)
return stat;
tocentry->cdte_ctrl = toce->control;
tocentry->cdte_adr = toce->adr;
if (tocentry->cdte_format == CDROM_MSF) {
lba_to_msf(toce->addr.lba,
&tocentry->cdte_addr.msf.minute,
&tocentry->cdte_addr.msf.second,
&tocentry->cdte_addr.msf.frame);
} else
tocentry->cdte_addr.lba = toce->addr.lba;
return 0;
}
int ide_cdrom_audio_ioctl(struct cdrom_device_info *cdi,
unsigned int cmd, void *arg)
{
ide_drive_t *drive = cdi->handle;
switch (cmd) {
/*
* emulate PLAY_AUDIO_TI command with PLAY_AUDIO_10, since
* atapi doesn't support it
*/
case CDROMPLAYTRKIND:
return ide_cd_fake_play_trkind(drive, arg);
case CDROMREADTOCHDR:
return ide_cd_read_tochdr(drive, arg);
case CDROMREADTOCENTRY:
return ide_cd_read_tocentry(drive, arg);
default:
return -EINVAL;
}
}
/* the generic packet interface to cdrom.c */
int ide_cdrom_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
ide_drive_t *drive = cdi->handle;
unsigned int flags = 0;
unsigned len = cgc->buflen;
if (cgc->timeout <= 0)
cgc->timeout = ATAPI_WAIT_PC;
/* here we queue the commands from the uniform CD-ROM
layer. the packet must be complete, as we do not
touch it at all. */
if (cgc->data_direction == CGC_DATA_WRITE)
flags |= REQ_WRITE;
if (cgc->sense)
memset(cgc->sense, 0, sizeof(struct request_sense));
if (cgc->quiet)
flags |= REQ_QUIET;
cgc->stat = ide_cd_queue_pc(drive, cgc->cmd,
cgc->data_direction == CGC_DATA_WRITE,
cgc->buffer, &len,
cgc->sense, cgc->timeout, flags);
if (!cgc->stat)
cgc->buflen -= len;
return cgc->stat;
}
| gpl-2.0 |
mdxy2010/forlinux-ok6410 | kernel/drivers/video/geode/lxfb_ops.c | 13125 | 20993 | /* Geode LX framebuffer driver
*
* Copyright (C) 2006-2007, Advanced Micro Devices,Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/cs5535.h>
#include "lxfb.h"
/* TODO
* Support panel scaling
* Add acceleration
* Add support for interlacing (TV out)
* Support compression
*/
/* This is the complete list of PLL frequencies that we can set -
* we will choose the closest match to the incoming clock.
* freq is the frequency of the dotclock * 1000 (for example,
* 24823 = 24.983 Mhz).
* pllval is the corresponding PLL value
*/
static const struct {
unsigned int pllval;
unsigned int freq;
} pll_table[] = {
{ 0x000131AC, 6231 },
{ 0x0001215D, 6294 },
{ 0x00011087, 6750 },
{ 0x0001216C, 7081 },
{ 0x0001218D, 7140 },
{ 0x000110C9, 7800 },
{ 0x00013147, 7875 },
{ 0x000110A7, 8258 },
{ 0x00012159, 8778 },
{ 0x00014249, 8875 },
{ 0x00010057, 9000 },
{ 0x0001219A, 9472 },
{ 0x00012158, 9792 },
{ 0x00010045, 10000 },
{ 0x00010089, 10791 },
{ 0x000110E7, 11225 },
{ 0x00012136, 11430 },
{ 0x00013207, 12375 },
{ 0x00012187, 12500 },
{ 0x00014286, 14063 },
{ 0x000110E5, 15016 },
{ 0x00014214, 16250 },
{ 0x00011105, 17045 },
{ 0x000131E4, 18563 },
{ 0x00013183, 18750 },
{ 0x00014284, 19688 },
{ 0x00011104, 20400 },
{ 0x00016363, 23625 },
{ 0x000031AC, 24923 },
{ 0x0000215D, 25175 },
{ 0x00001087, 27000 },
{ 0x0000216C, 28322 },
{ 0x0000218D, 28560 },
{ 0x000010C9, 31200 },
{ 0x00003147, 31500 },
{ 0x000010A7, 33032 },
{ 0x00002159, 35112 },
{ 0x00004249, 35500 },
{ 0x00000057, 36000 },
{ 0x0000219A, 37889 },
{ 0x00002158, 39168 },
{ 0x00000045, 40000 },
{ 0x00000089, 43163 },
{ 0x000010E7, 44900 },
{ 0x00002136, 45720 },
{ 0x00003207, 49500 },
{ 0x00002187, 50000 },
{ 0x00004286, 56250 },
{ 0x000010E5, 60065 },
{ 0x00004214, 65000 },
{ 0x00001105, 68179 },
{ 0x000031E4, 74250 },
{ 0x00003183, 75000 },
{ 0x00004284, 78750 },
{ 0x00001104, 81600 },
{ 0x00006363, 94500 },
{ 0x00005303, 97520 },
{ 0x00002183, 100187 },
{ 0x00002122, 101420 },
{ 0x00001081, 108000 },
{ 0x00006201, 113310 },
{ 0x00000041, 119650 },
{ 0x000041A1, 129600 },
{ 0x00002182, 133500 },
{ 0x000041B1, 135000 },
{ 0x00000051, 144000 },
{ 0x000041E1, 148500 },
{ 0x000062D1, 157500 },
{ 0x000031A1, 162000 },
{ 0x00000061, 169203 },
{ 0x00004231, 172800 },
{ 0x00002151, 175500 },
{ 0x000052E1, 189000 },
{ 0x00000071, 192000 },
{ 0x00003201, 198000 },
{ 0x00004291, 202500 },
{ 0x00001101, 204750 },
{ 0x00007481, 218250 },
{ 0x00004170, 229500 },
{ 0x00006210, 234000 },
{ 0x00003140, 251182 },
{ 0x00006250, 261000 },
{ 0x000041C0, 278400 },
{ 0x00005220, 280640 },
{ 0x00000050, 288000 },
{ 0x000041E0, 297000 },
{ 0x00002130, 320207 }
};
static void lx_set_dotpll(u32 pllval)
{
u32 dotpll_lo, dotpll_hi;
int i;
rdmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
if ((dotpll_lo & MSR_GLCP_DOTPLL_LOCK) && (dotpll_hi == pllval))
return;
dotpll_hi = pllval;
dotpll_lo &= ~(MSR_GLCP_DOTPLL_BYPASS | MSR_GLCP_DOTPLL_HALFPIX);
dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
/* Wait 100us for the PLL to lock */
udelay(100);
/* Now, loop for the lock bit */
for (i = 0; i < 1000; i++) {
rdmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
break;
}
/* Clear the reset bit */
dotpll_lo &= ~MSR_GLCP_DOTPLL_DOTRESET;
wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
}
/* Set the clock based on the frequency specified by the current mode */
static void lx_set_clock(struct fb_info *info)
{
unsigned int diff, min, best = 0;
unsigned int freq, i;
freq = (unsigned int) (1000000000 / info->var.pixclock);
min = abs(pll_table[0].freq - freq);
for (i = 0; i < ARRAY_SIZE(pll_table); i++) {
diff = abs(pll_table[i].freq - freq);
if (diff < min) {
min = diff;
best = i;
}
}
lx_set_dotpll(pll_table[best].pllval & 0x00017FFF);
}
static void lx_graphics_disable(struct fb_info *info)
{
struct lxfb_par *par = info->par;
unsigned int val, gcfg;
/* Note: This assumes that the video is in a quitet state */
write_vp(par, VP_A1T, 0);
write_vp(par, VP_A2T, 0);
write_vp(par, VP_A3T, 0);
/* Turn off the VGA and video enable */
val = read_dc(par, DC_GENERAL_CFG) & ~(DC_GENERAL_CFG_VGAE |
DC_GENERAL_CFG_VIDE);
write_dc(par, DC_GENERAL_CFG, val);
val = read_vp(par, VP_VCFG) & ~VP_VCFG_VID_EN;
write_vp(par, VP_VCFG, val);
write_dc(par, DC_IRQ, DC_IRQ_MASK | DC_IRQ_VIP_VSYNC_LOSS_IRQ_MASK |
DC_IRQ_STATUS | DC_IRQ_VIP_VSYNC_IRQ_STATUS);
val = read_dc(par, DC_GENLK_CTL) & ~DC_GENLK_CTL_GENLK_EN;
write_dc(par, DC_GENLK_CTL, val);
val = read_dc(par, DC_CLR_KEY);
write_dc(par, DC_CLR_KEY, val & ~DC_CLR_KEY_CLR_KEY_EN);
/* turn off the panel */
write_fp(par, FP_PM, read_fp(par, FP_PM) & ~FP_PM_P);
val = read_vp(par, VP_MISC) | VP_MISC_DACPWRDN;
write_vp(par, VP_MISC, val);
/* Turn off the display */
val = read_vp(par, VP_DCFG);
write_vp(par, VP_DCFG, val & ~(VP_DCFG_CRT_EN | VP_DCFG_HSYNC_EN |
VP_DCFG_VSYNC_EN | VP_DCFG_DAC_BL_EN));
gcfg = read_dc(par, DC_GENERAL_CFG);
gcfg &= ~(DC_GENERAL_CFG_CMPE | DC_GENERAL_CFG_DECE);
write_dc(par, DC_GENERAL_CFG, gcfg);
/* Turn off the TGEN */
val = read_dc(par, DC_DISPLAY_CFG);
val &= ~DC_DISPLAY_CFG_TGEN;
write_dc(par, DC_DISPLAY_CFG, val);
/* Wait 1000 usecs to ensure that the TGEN is clear */
udelay(1000);
/* Turn off the FIFO loader */
gcfg &= ~DC_GENERAL_CFG_DFLE;
write_dc(par, DC_GENERAL_CFG, gcfg);
/* Lastly, wait for the GP to go idle */
do {
val = read_gp(par, GP_BLT_STATUS);
} while ((val & GP_BLT_STATUS_PB) || !(val & GP_BLT_STATUS_CE));
}
static void lx_graphics_enable(struct fb_info *info)
{
struct lxfb_par *par = info->par;
u32 temp, config;
/* Set the video request register */
write_vp(par, VP_VRR, 0);
/* Set up the polarities */
config = read_vp(par, VP_DCFG);
config &= ~(VP_DCFG_CRT_SYNC_SKW | VP_DCFG_PWR_SEQ_DELAY |
VP_DCFG_CRT_HSYNC_POL | VP_DCFG_CRT_VSYNC_POL);
config |= (VP_DCFG_CRT_SYNC_SKW_DEFAULT | VP_DCFG_PWR_SEQ_DELAY_DEFAULT
| VP_DCFG_GV_GAM);
if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
config |= VP_DCFG_CRT_HSYNC_POL;
if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
config |= VP_DCFG_CRT_VSYNC_POL;
if (par->output & OUTPUT_PANEL) {
u32 msrlo, msrhi;
write_fp(par, FP_PT1, 0);
temp = FP_PT2_SCRC;
if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
temp |= FP_PT2_HSP;
if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
temp |= FP_PT2_VSP;
write_fp(par, FP_PT2, temp);
write_fp(par, FP_DFC, FP_DFC_BC);
msrlo = MSR_LX_MSR_PADSEL_TFT_SEL_LOW;
msrhi = MSR_LX_MSR_PADSEL_TFT_SEL_HIGH;
wrmsr(MSR_LX_MSR_PADSEL, msrlo, msrhi);
}
if (par->output & OUTPUT_CRT) {
config |= VP_DCFG_CRT_EN | VP_DCFG_HSYNC_EN |
VP_DCFG_VSYNC_EN | VP_DCFG_DAC_BL_EN;
}
write_vp(par, VP_DCFG, config);
/* Turn the CRT dacs back on */
if (par->output & OUTPUT_CRT) {
temp = read_vp(par, VP_MISC);
temp &= ~(VP_MISC_DACPWRDN | VP_MISC_APWRDN);
write_vp(par, VP_MISC, temp);
}
/* Turn the panel on (if it isn't already) */
if (par->output & OUTPUT_PANEL)
write_fp(par, FP_PM, read_fp(par, FP_PM) | FP_PM_P);
}
unsigned int lx_framebuffer_size(void)
{
unsigned int val;
if (!cs5535_has_vsa2()) {
uint32_t hi, lo;
/* The number of pages is (PMAX - PMIN)+1 */
rdmsr(MSR_GLIU_P2D_RO0, lo, hi);
/* PMAX */
val = ((hi & 0xff) << 12) | ((lo & 0xfff00000) >> 20);
/* PMIN */
val -= (lo & 0x000fffff);
val += 1;
/* The page size is 4k */
return (val << 12);
}
/* The frame buffer size is reported by a VSM in VSA II */
/* Virtual Register Class = 0x02 */
/* VG_MEM_SIZE (1MB units) = 0x00 */
outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
outw(VSA_VR_MEM_SIZE, VSA_VRC_INDEX);
val = (unsigned int)(inw(VSA_VRC_DATA)) & 0xFE;
return (val << 20);
}
void lx_set_mode(struct fb_info *info)
{
struct lxfb_par *par = info->par;
u64 msrval;
unsigned int max, dv, val, size;
unsigned int gcfg, dcfg;
int hactive, hblankstart, hsyncstart, hsyncend, hblankend, htotal;
int vactive, vblankstart, vsyncstart, vsyncend, vblankend, vtotal;
/* Unlock the DC registers */
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
lx_graphics_disable(info);
lx_set_clock(info);
/* Set output mode */
rdmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
msrval &= ~MSR_LX_GLD_MSR_CONFIG_FMT;
if (par->output & OUTPUT_PANEL) {
msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_FP;
if (par->output & OUTPUT_CRT)
msrval |= MSR_LX_GLD_MSR_CONFIG_FPC;
else
msrval &= ~MSR_LX_GLD_MSR_CONFIG_FPC;
} else
msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_CRT;
wrmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
/* Clear the various buffers */
/* FIXME: Adjust for panning here */
write_dc(par, DC_FB_ST_OFFSET, 0);
write_dc(par, DC_CB_ST_OFFSET, 0);
write_dc(par, DC_CURS_ST_OFFSET, 0);
/* FIXME: Add support for interlacing */
/* FIXME: Add support for scaling */
val = read_dc(par, DC_GENLK_CTL);
val &= ~(DC_GENLK_CTL_ALPHA_FLICK_EN | DC_GENLK_CTL_FLICK_EN |
DC_GENLK_CTL_FLICK_SEL_MASK);
/* Default scaling params */
write_dc(par, DC_GFX_SCALE, (0x4000 << 16) | 0x4000);
write_dc(par, DC_IRQ_FILT_CTL, 0);
write_dc(par, DC_GENLK_CTL, val);
/* FIXME: Support compression */
if (info->fix.line_length > 4096)
dv = DC_DV_CTL_DV_LINE_SIZE_8K;
else if (info->fix.line_length > 2048)
dv = DC_DV_CTL_DV_LINE_SIZE_4K;
else if (info->fix.line_length > 1024)
dv = DC_DV_CTL_DV_LINE_SIZE_2K;
else
dv = DC_DV_CTL_DV_LINE_SIZE_1K;
max = info->fix.line_length * info->var.yres;
max = (max + 0x3FF) & 0xFFFFFC00;
write_dc(par, DC_DV_TOP, max | DC_DV_TOP_DV_TOP_EN);
val = read_dc(par, DC_DV_CTL) & ~DC_DV_CTL_DV_LINE_SIZE;
write_dc(par, DC_DV_CTL, val | dv);
size = info->var.xres * (info->var.bits_per_pixel >> 3);
write_dc(par, DC_GFX_PITCH, info->fix.line_length >> 3);
write_dc(par, DC_LINE_SIZE, (size + 7) >> 3);
/* Set default watermark values */
rdmsrl(MSR_LX_SPARE_MSR, msrval);
msrval &= ~(MSR_LX_SPARE_MSR_DIS_CFIFO_HGO
| MSR_LX_SPARE_MSR_VFIFO_ARB_SEL
| MSR_LX_SPARE_MSR_LOAD_WM_LPEN_M
| MSR_LX_SPARE_MSR_WM_LPEN_OVRD);
msrval |= MSR_LX_SPARE_MSR_DIS_VIFO_WM |
MSR_LX_SPARE_MSR_DIS_INIT_V_PRI;
wrmsrl(MSR_LX_SPARE_MSR, msrval);
gcfg = DC_GENERAL_CFG_DFLE; /* Display fifo enable */
gcfg |= (0x6 << DC_GENERAL_CFG_DFHPSL_SHIFT) | /* default priority */
(0xb << DC_GENERAL_CFG_DFHPEL_SHIFT);
gcfg |= DC_GENERAL_CFG_FDTY; /* Set the frame dirty mode */
dcfg = DC_DISPLAY_CFG_VDEN; /* Enable video data */
dcfg |= DC_DISPLAY_CFG_GDEN; /* Enable graphics */
dcfg |= DC_DISPLAY_CFG_TGEN; /* Turn on the timing generator */
dcfg |= DC_DISPLAY_CFG_TRUP; /* Update timings immediately */
dcfg |= DC_DISPLAY_CFG_PALB; /* Palette bypass in > 8 bpp modes */
dcfg |= DC_DISPLAY_CFG_VISL;
dcfg |= DC_DISPLAY_CFG_DCEN; /* Always center the display */
/* Set the current BPP mode */
switch (info->var.bits_per_pixel) {
case 8:
dcfg |= DC_DISPLAY_CFG_DISP_MODE_8BPP;
break;
case 16:
dcfg |= DC_DISPLAY_CFG_DISP_MODE_16BPP;
break;
case 32:
case 24:
dcfg |= DC_DISPLAY_CFG_DISP_MODE_24BPP;
break;
}
/* Now - set up the timings */
hactive = info->var.xres;
hblankstart = hactive;
hsyncstart = hblankstart + info->var.right_margin;
hsyncend = hsyncstart + info->var.hsync_len;
hblankend = hsyncend + info->var.left_margin;
htotal = hblankend;
vactive = info->var.yres;
vblankstart = vactive;
vsyncstart = vblankstart + info->var.lower_margin;
vsyncend = vsyncstart + info->var.vsync_len;
vblankend = vsyncend + info->var.upper_margin;
vtotal = vblankend;
write_dc(par, DC_H_ACTIVE_TIMING, (hactive - 1) | ((htotal - 1) << 16));
write_dc(par, DC_H_BLANK_TIMING,
(hblankstart - 1) | ((hblankend - 1) << 16));
write_dc(par, DC_H_SYNC_TIMING,
(hsyncstart - 1) | ((hsyncend - 1) << 16));
write_dc(par, DC_V_ACTIVE_TIMING, (vactive - 1) | ((vtotal - 1) << 16));
write_dc(par, DC_V_BLANK_TIMING,
(vblankstart - 1) | ((vblankend - 1) << 16));
write_dc(par, DC_V_SYNC_TIMING,
(vsyncstart - 1) | ((vsyncend - 1) << 16));
write_dc(par, DC_FB_ACTIVE,
(info->var.xres - 1) << 16 | (info->var.yres - 1));
/* And re-enable the graphics output */
lx_graphics_enable(info);
/* Write the two main configuration registers */
write_dc(par, DC_DISPLAY_CFG, dcfg);
write_dc(par, DC_ARB_CFG, 0);
write_dc(par, DC_GENERAL_CFG, gcfg);
/* Lock the DC registers */
write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
void lx_set_palette_reg(struct fb_info *info, unsigned regno,
unsigned red, unsigned green, unsigned blue)
{
struct lxfb_par *par = info->par;
int val;
/* Hardware palette is in RGB 8-8-8 format. */
val = (red << 8) & 0xff0000;
val |= (green) & 0x00ff00;
val |= (blue >> 8) & 0x0000ff;
write_dc(par, DC_PAL_ADDRESS, regno);
write_dc(par, DC_PAL_DATA, val);
}
int lx_blank_display(struct fb_info *info, int blank_mode)
{
struct lxfb_par *par = info->par;
u32 dcfg, misc, fp_pm;
int blank, hsync, vsync;
/* CRT power saving modes. */
switch (blank_mode) {
case FB_BLANK_UNBLANK:
blank = 0; hsync = 1; vsync = 1;
break;
case FB_BLANK_NORMAL:
blank = 1; hsync = 1; vsync = 1;
break;
case FB_BLANK_VSYNC_SUSPEND:
blank = 1; hsync = 1; vsync = 0;
break;
case FB_BLANK_HSYNC_SUSPEND:
blank = 1; hsync = 0; vsync = 1;
break;
case FB_BLANK_POWERDOWN:
blank = 1; hsync = 0; vsync = 0;
break;
default:
return -EINVAL;
}
dcfg = read_vp(par, VP_DCFG);
dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN |
VP_DCFG_CRT_EN);
if (!blank)
dcfg |= VP_DCFG_DAC_BL_EN | VP_DCFG_CRT_EN;
if (hsync)
dcfg |= VP_DCFG_HSYNC_EN;
if (vsync)
dcfg |= VP_DCFG_VSYNC_EN;
write_vp(par, VP_DCFG, dcfg);
misc = read_vp(par, VP_MISC);
if (vsync && hsync)
misc &= ~VP_MISC_DACPWRDN;
else
misc |= VP_MISC_DACPWRDN;
write_vp(par, VP_MISC, misc);
/* Power on/off flat panel */
if (par->output & OUTPUT_PANEL) {
fp_pm = read_fp(par, FP_PM);
if (blank_mode == FB_BLANK_POWERDOWN)
fp_pm &= ~FP_PM_P;
else
fp_pm |= FP_PM_P;
write_fp(par, FP_PM, fp_pm);
}
return 0;
}
#ifdef CONFIG_PM
static void lx_save_regs(struct lxfb_par *par)
{
uint32_t filt;
int i;
/* wait for the BLT engine to stop being busy */
do {
i = read_gp(par, GP_BLT_STATUS);
} while ((i & GP_BLT_STATUS_PB) || !(i & GP_BLT_STATUS_CE));
/* save MSRs */
rdmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
rdmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
rdmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
/* save registers */
memcpy(par->gp, par->gp_regs, sizeof(par->gp));
memcpy(par->dc, par->dc_regs, sizeof(par->dc));
memcpy(par->vp, par->vp_regs, sizeof(par->vp));
memcpy(par->fp, par->vp_regs + VP_FP_START, sizeof(par->fp));
/* save the display controller palette */
write_dc(par, DC_PAL_ADDRESS, 0);
for (i = 0; i < ARRAY_SIZE(par->dc_pal); i++)
par->dc_pal[i] = read_dc(par, DC_PAL_DATA);
/* save the video processor palette */
write_vp(par, VP_PAR, 0);
for (i = 0; i < ARRAY_SIZE(par->vp_pal); i++)
par->vp_pal[i] = read_vp(par, VP_PDR);
/* save the horizontal filter coefficients */
filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL;
for (i = 0; i < ARRAY_SIZE(par->hcoeff); i += 2) {
write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
par->hcoeff[i] = read_dc(par, DC_FILT_COEFF1);
par->hcoeff[i + 1] = read_dc(par, DC_FILT_COEFF2);
}
/* save the vertical filter coefficients */
filt &= ~DC_IRQ_FILT_CTL_H_FILT_SEL;
for (i = 0; i < ARRAY_SIZE(par->vcoeff); i++) {
write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
par->vcoeff[i] = read_dc(par, DC_FILT_COEFF1);
}
/* save video coeff ram */
memcpy(par->vp_coeff, par->vp_regs + VP_VCR, sizeof(par->vp_coeff));
}
static void lx_restore_gfx_proc(struct lxfb_par *par)
{
int i;
/* a bunch of registers require GP_RASTER_MODE to be set first */
write_gp(par, GP_RASTER_MODE, par->gp[GP_RASTER_MODE]);
for (i = 0; i < ARRAY_SIZE(par->gp); i++) {
switch (i) {
case GP_RASTER_MODE:
case GP_VECTOR_MODE:
case GP_BLT_MODE:
case GP_BLT_STATUS:
case GP_HST_SRC:
/* FIXME: restore LUT data */
case GP_LUT_INDEX:
case GP_LUT_DATA:
/* don't restore these registers */
break;
default:
write_gp(par, i, par->gp[i]);
}
}
}
static void lx_restore_display_ctlr(struct lxfb_par *par)
{
uint32_t filt;
int i;
wrmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
for (i = 0; i < ARRAY_SIZE(par->dc); i++) {
switch (i) {
case DC_UNLOCK:
/* unlock the DC; runs first */
write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
break;
case DC_GENERAL_CFG:
case DC_DISPLAY_CFG:
/* disable all while restoring */
write_dc(par, i, 0);
break;
case DC_DV_CTL:
/* set all ram to dirty */
write_dc(par, i, par->dc[i] | DC_DV_CTL_CLEAR_DV_RAM);
case DC_RSVD_1:
case DC_RSVD_2:
case DC_RSVD_3:
case DC_LINE_CNT:
case DC_PAL_ADDRESS:
case DC_PAL_DATA:
case DC_DFIFO_DIAG:
case DC_CFIFO_DIAG:
case DC_FILT_COEFF1:
case DC_FILT_COEFF2:
case DC_RSVD_4:
case DC_RSVD_5:
/* don't restore these registers */
break;
default:
write_dc(par, i, par->dc[i]);
}
}
/* restore the palette */
write_dc(par, DC_PAL_ADDRESS, 0);
for (i = 0; i < ARRAY_SIZE(par->dc_pal); i++)
write_dc(par, DC_PAL_DATA, par->dc_pal[i]);
/* restore the horizontal filter coefficients */
filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL;
for (i = 0; i < ARRAY_SIZE(par->hcoeff); i += 2) {
write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
write_dc(par, DC_FILT_COEFF1, par->hcoeff[i]);
write_dc(par, DC_FILT_COEFF2, par->hcoeff[i + 1]);
}
/* restore the vertical filter coefficients */
filt &= ~DC_IRQ_FILT_CTL_H_FILT_SEL;
for (i = 0; i < ARRAY_SIZE(par->vcoeff); i++) {
write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
write_dc(par, DC_FILT_COEFF1, par->vcoeff[i]);
}
}
static void lx_restore_video_proc(struct lxfb_par *par)
{
int i;
wrmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
wrmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
switch (i) {
case VP_VCFG:
case VP_DCFG:
case VP_PAR:
case VP_PDR:
case VP_CCS:
case VP_RSVD_0:
/* case VP_VDC: */ /* why should this not be restored? */
case VP_RSVD_1:
case VP_CRC32:
/* don't restore these registers */
break;
default:
write_vp(par, i, par->vp[i]);
}
}
/* restore video processor palette */
write_vp(par, VP_PAR, 0);
for (i = 0; i < ARRAY_SIZE(par->vp_pal); i++)
write_vp(par, VP_PDR, par->vp_pal[i]);
/* restore video coeff ram */
memcpy(par->vp_regs + VP_VCR, par->vp_coeff, sizeof(par->vp_coeff));
}
static void lx_restore_regs(struct lxfb_par *par)
{
int i;
lx_set_dotpll((u32) (par->msr.dotpll >> 32));
lx_restore_gfx_proc(par);
lx_restore_display_ctlr(par);
lx_restore_video_proc(par);
/* Flat Panel */
for (i = 0; i < ARRAY_SIZE(par->fp); i++) {
switch (i) {
case FP_PM:
case FP_RSVD_0:
case FP_RSVD_1:
case FP_RSVD_2:
case FP_RSVD_3:
case FP_RSVD_4:
/* don't restore these registers */
break;
default:
write_fp(par, i, par->fp[i]);
}
}
/* control the panel */
if (par->fp[FP_PM] & FP_PM_P) {
/* power on the panel if not already power{ed,ing} on */
if (!(read_fp(par, FP_PM) &
(FP_PM_PANEL_ON|FP_PM_PANEL_PWR_UP)))
write_fp(par, FP_PM, par->fp[FP_PM]);
} else {
/* power down the panel if not already power{ed,ing} down */
if (!(read_fp(par, FP_PM) &
(FP_PM_PANEL_OFF|FP_PM_PANEL_PWR_DOWN)))
write_fp(par, FP_PM, par->fp[FP_PM]);
}
/* turn everything on */
write_vp(par, VP_VCFG, par->vp[VP_VCFG]);
write_vp(par, VP_DCFG, par->vp[VP_DCFG]);
write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG]);
/* do this last; it will enable the FIFO load */
write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG]);
/* lock the door behind us */
write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
}
int lx_powerdown(struct fb_info *info)
{
struct lxfb_par *par = info->par;
if (par->powered_down)
return 0;
lx_save_regs(par);
lx_graphics_disable(info);
par->powered_down = 1;
return 0;
}
int lx_powerup(struct fb_info *info)
{
struct lxfb_par *par = info->par;
if (!par->powered_down)
return 0;
lx_restore_regs(par);
par->powered_down = 0;
return 0;
}
#endif
| gpl-2.0 |
blackdeviant/hybrid-nicki | arch/parisc/math-emu/dfrem.c | 14149 | 8997 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfrem.c $Revision: 1.1 $
*
* Purpose:
* Double Precision Floating-point Remainder
*
* External Interfaces:
* dbl_frem(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double Precision Floating-point Remainder
*/
int
dbl_frem (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
dbl_floating_point * dstptr, unsigned int *status)
{
register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
register unsigned int resultp1, resultp2;
register int opnd1_exponent, opnd2_exponent, dest_exponent, stepcount;
register boolean roundup = FALSE;
Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
/*
* check first operand for NaN's or infinity
*/
if ((opnd1_exponent = Dbl_exponent(opnd1p1)) == DBL_INFINITY_EXPONENT) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
/* invalid since first operand is infinity */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if ((opnd2_exponent = Dbl_exponent(opnd2p1)) == DBL_INFINITY_EXPONENT) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* return first operand
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* check second operand for zero
*/
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
/* invalid since second operand is zero */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* get sign of result
*/
resultp1 = opnd1p1;
/*
* check for denormalized operands
*/
if (opnd1_exponent == 0) {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
/* normalize, then continue */
opnd1_exponent = 1;
Dbl_normalize(opnd1p1,opnd1p2,opnd1_exponent);
}
else {
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
if (opnd2_exponent == 0) {
/* normalize, then continue */
opnd2_exponent = 1;
Dbl_normalize(opnd2p1,opnd2p2,opnd2_exponent);
}
else {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
/* find result exponent and divide step loop count */
dest_exponent = opnd2_exponent - 1;
stepcount = opnd1_exponent - opnd2_exponent;
/*
* check for opnd1/opnd2 < 1
*/
if (stepcount < 0) {
/*
* check for opnd1/opnd2 > 1/2
*
* In this case n will round to 1, so
* r = opnd1 - opnd2
*/
if (stepcount == -1 &&
Dbl_isgreaterthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
/* set sign */
Dbl_allp1(resultp1) = ~Dbl_allp1(resultp1);
/* align opnd2 with opnd1 */
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_subtract(opnd2p1,opnd2p2,opnd1p1,opnd1p2,
opnd2p1,opnd2p2);
/* now normalize */
while (Dbl_iszero_hidden(opnd2p1)) {
Dbl_leftshiftby1(opnd2p1,opnd2p2);
dest_exponent--;
}
Dbl_set_exponentmantissa(resultp1,resultp2,opnd2p1,opnd2p2);
goto testforunderflow;
}
/*
* opnd1/opnd2 <= 1/2
*
* In this case n will round to zero, so
* r = opnd1
*/
Dbl_set_exponentmantissa(resultp1,resultp2,opnd1p1,opnd1p2);
dest_exponent = opnd1_exponent;
goto testforunderflow;
}
/*
* Generate result
*
* Do iterative subtract until remainder is less than operand 2.
*/
while (stepcount-- > 0 && (Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2))) {
if (Dbl_isnotlessthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
Dbl_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2,opnd1p1,opnd1p2);
}
Dbl_leftshiftby1(opnd1p1,opnd1p2);
}
/*
* Do last subtract, then determine which way to round if remainder
* is exactly 1/2 of opnd2
*/
if (Dbl_isnotlessthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
Dbl_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2,opnd1p1,opnd1p2);
roundup = TRUE;
}
if (stepcount > 0 || Dbl_iszero(opnd1p1,opnd1p2)) {
/* division is exact, remainder is zero */
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Check for cases where opnd1/opnd2 < n
*
* In this case the result's sign will be opposite that of
* opnd1. The mantissa also needs some correction.
*/
Dbl_leftshiftby1(opnd1p1,opnd1p2);
if (Dbl_isgreaterthan(opnd1p1,opnd1p2,opnd2p1,opnd2p2)) {
Dbl_invert_sign(resultp1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
Dbl_subtract(opnd2p1,opnd2p2,opnd1p1,opnd1p2,opnd1p1,opnd1p2);
}
/* check for remainder being exactly 1/2 of opnd2 */
else if (Dbl_isequal(opnd1p1,opnd1p2,opnd2p1,opnd2p2) && roundup) {
Dbl_invert_sign(resultp1);
}
/* normalize result's mantissa */
while (Dbl_iszero_hidden(opnd1p1)) {
dest_exponent--;
Dbl_leftshiftby1(opnd1p1,opnd1p2);
}
Dbl_set_exponentmantissa(resultp1,resultp2,opnd1p1,opnd1p2);
/*
* Test for underflow
*/
testforunderflow:
if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
/* frem is always exact */
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(UNDERFLOWEXCEPTION);
}
/*
* denormalize result or set to signed zero
*/
if (dest_exponent >= (1 - DBL_P)) {
Dbl_rightshift_exponentmantissa(resultp1,resultp2,
1-dest_exponent);
}
else {
Dbl_setzero_exponentmantissa(resultp1,resultp2);
}
}
else Dbl_set_exponent(resultp1,dest_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
| gpl-2.0 |
assassin0408/AK-OnePone | arch/parisc/math-emu/dfdiv.c | 14149 | 12636 | /*
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
*
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* BEGIN_DESC
*
* File:
* @(#) pa/spmath/dfdiv.c $Revision: 1.1 $
*
* Purpose:
* Double Precision Floating-point Divide
*
* External Interfaces:
* dbl_fdiv(srcptr1,srcptr2,dstptr,status)
*
* Internal Interfaces:
*
* Theory:
* <<please update with a overview of the operation of this file>>
*
* END_DESC
*/
#include "float.h"
#include "dbl_float.h"
/*
* Double Precision Floating-point Divide
*/
int
dbl_fdiv (dbl_floating_point * srcptr1, dbl_floating_point * srcptr2,
dbl_floating_point * dstptr, unsigned int *status)
{
register unsigned int opnd1p1, opnd1p2, opnd2p1, opnd2p2;
register unsigned int opnd3p1, opnd3p2, resultp1, resultp2;
register int dest_exponent, count;
register boolean inexact = FALSE, guardbit = FALSE, stickybit = FALSE;
boolean is_tiny;
Dbl_copyfromptr(srcptr1,opnd1p1,opnd1p2);
Dbl_copyfromptr(srcptr2,opnd2p1,opnd2p2);
/*
* set sign bit of result
*/
if (Dbl_sign(opnd1p1) ^ Dbl_sign(opnd2p1))
Dbl_setnegativezerop1(resultp1);
else Dbl_setzerop1(resultp1);
/*
* check first operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd1p1)) {
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
if (Dbl_isnotnan(opnd2p1,opnd2p2)) {
if (Dbl_isinfinity(opnd2p1,opnd2p2)) {
/*
* invalid since both operands
* are infinity
*/
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* return infinity
*/
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
}
else {
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd1p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd1p1);
}
/*
* is second operand a signaling NaN?
*/
else if (Dbl_is_signalingnan(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled())
return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd1p1,opnd1p2,dstptr);
return(NOEXCEPTION);
}
}
/*
* check second operand for NaN's or infinity
*/
if (Dbl_isinfinity_exponent(opnd2p1)) {
if (Dbl_iszero_mantissa(opnd2p1,opnd2p2)) {
/*
* return zero
*/
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* is NaN; signaling or quiet?
*/
if (Dbl_isone_signaling(opnd2p1)) {
/* trap if INVALIDTRAP enabled */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
/* make NaN quiet */
Set_invalidflag();
Dbl_set_quiet(opnd2p1);
}
/*
* return quiet NaN
*/
Dbl_copytoptr(opnd2p1,opnd2p2,dstptr);
return(NOEXCEPTION);
}
/*
* check for division by zero
*/
if (Dbl_iszero_exponentmantissa(opnd2p1,opnd2p2)) {
if (Dbl_iszero_exponentmantissa(opnd1p1,opnd1p2)) {
/* invalid since both operands are zero */
if (Is_invalidtrap_enabled()) return(INVALIDEXCEPTION);
Set_invalidflag();
Dbl_makequietnan(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
if (Is_divisionbyzerotrap_enabled())
return(DIVISIONBYZEROEXCEPTION);
Set_divisionbyzeroflag();
Dbl_setinfinity_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/*
* Generate exponent
*/
dest_exponent = Dbl_exponent(opnd1p1) - Dbl_exponent(opnd2p1) + DBL_BIAS;
/*
* Generate mantissa
*/
if (Dbl_isnotzero_exponent(opnd1p1)) {
/* set hidden bit */
Dbl_clear_signexponent_set_hidden(opnd1p1);
}
else {
/* check for zero */
if (Dbl_iszero_mantissa(opnd1p1,opnd1p2)) {
Dbl_setzero_exponentmantissa(resultp1,resultp2);
Dbl_copytoptr(resultp1,resultp2,dstptr);
return(NOEXCEPTION);
}
/* is denormalized, want to normalize */
Dbl_clear_signexponent(opnd1p1);
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_normalize(opnd1p1,opnd1p2,dest_exponent);
}
/* opnd2 needs to have hidden bit set with msb in hidden bit */
if (Dbl_isnotzero_exponent(opnd2p1)) {
Dbl_clear_signexponent_set_hidden(opnd2p1);
}
else {
/* is denormalized; want to normalize */
Dbl_clear_signexponent(opnd2p1);
Dbl_leftshiftby1(opnd2p1,opnd2p2);
while (Dbl_iszero_hiddenhigh7mantissa(opnd2p1)) {
dest_exponent+=8;
Dbl_leftshiftby8(opnd2p1,opnd2p2);
}
if (Dbl_iszero_hiddenhigh3mantissa(opnd2p1)) {
dest_exponent+=4;
Dbl_leftshiftby4(opnd2p1,opnd2p2);
}
while (Dbl_iszero_hidden(opnd2p1)) {
dest_exponent++;
Dbl_leftshiftby1(opnd2p1,opnd2p2);
}
}
/* Divide the source mantissas */
/*
* A non-restoring divide algorithm is used.
*/
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
Dbl_setzero(opnd3p1,opnd3p2);
for (count=1; count <= DBL_P && (opnd1p1 || opnd1p2); count++) {
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
if (Dbl_iszero_sign(opnd1p1)) {
Dbl_setone_lowmantissap2(opnd3p2);
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
else {
Twoword_add(opnd1p1, opnd1p2, opnd2p1, opnd2p2);
}
}
if (count <= DBL_P) {
Dbl_leftshiftby1(opnd3p1,opnd3p2);
Dbl_setone_lowmantissap2(opnd3p2);
Dbl_leftshift(opnd3p1,opnd3p2,(DBL_P-count));
if (Dbl_iszero_hidden(opnd3p1)) {
Dbl_leftshiftby1(opnd3p1,opnd3p2);
dest_exponent--;
}
}
else {
if (Dbl_iszero_hidden(opnd3p1)) {
/* need to get one more bit of result */
Dbl_leftshiftby1(opnd1p1,opnd1p2);
Dbl_leftshiftby1(opnd3p1,opnd3p2);
if (Dbl_iszero_sign(opnd1p1)) {
Dbl_setone_lowmantissap2(opnd3p2);
Twoword_subtract(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
else {
Twoword_add(opnd1p1,opnd1p2,opnd2p1,opnd2p2);
}
dest_exponent--;
}
if (Dbl_iszero_sign(opnd1p1)) guardbit = TRUE;
stickybit = Dbl_allp1(opnd1p1) || Dbl_allp2(opnd1p2);
}
inexact = guardbit | stickybit;
/*
* round result
*/
if (inexact && (dest_exponent > 0 || Is_underflowtrap_enabled())) {
Dbl_clear_signexponent(opnd3p1);
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1))
Dbl_increment(opnd3p1,opnd3p2);
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1))
Dbl_increment(opnd3p1,opnd3p2);
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
}
}
if (Dbl_isone_hidden(opnd3p1)) dest_exponent++;
}
Dbl_set_mantissa(resultp1,resultp2,opnd3p1,opnd3p2);
/*
* Test for overflow
*/
if (dest_exponent >= DBL_INFINITY_EXPONENT) {
/* trap if OVERFLOWTRAP enabled */
if (Is_overflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,ovfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(OVERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(OVERFLOWEXCEPTION);
}
Set_overflowflag();
/* set result to infinity or largest number */
Dbl_setoverflow(resultp1,resultp2);
inexact = TRUE;
}
/*
* Test for underflow
*/
else if (dest_exponent <= 0) {
/* trap if UNDERFLOWTRAP enabled */
if (Is_underflowtrap_enabled()) {
/*
* Adjust bias of result
*/
Dbl_setwrapped_exponent(resultp1,dest_exponent,unfl);
Dbl_copytoptr(resultp1,resultp2,dstptr);
if (inexact)
if (Is_inexacttrap_enabled())
return(UNDERFLOWEXCEPTION | INEXACTEXCEPTION);
else Set_inexactflag();
return(UNDERFLOWEXCEPTION);
}
/* Determine if should set underflow flag */
is_tiny = TRUE;
if (dest_exponent == 0 && inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
if (Dbl_isone_hiddenoverflow(opnd3p1))
is_tiny = FALSE;
Dbl_decrement(opnd3p1,opnd3p2);
}
break;
}
}
/*
* denormalize result or set to signed zero
*/
stickybit = inexact;
Dbl_denormalize(opnd3p1,opnd3p2,dest_exponent,guardbit,
stickybit,inexact);
/* return rounded number */
if (inexact) {
switch (Rounding_mode()) {
case ROUNDPLUS:
if (Dbl_iszero_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
case ROUNDMINUS:
if (Dbl_isone_sign(resultp1)) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
case ROUNDNEAREST:
if (guardbit && (stickybit ||
Dbl_isone_lowmantissap2(opnd3p2))) {
Dbl_increment(opnd3p1,opnd3p2);
}
break;
}
if (is_tiny) Set_underflowflag();
}
Dbl_set_exponentmantissa(resultp1,resultp2,opnd3p1,opnd3p2);
}
else Dbl_set_exponent(resultp1,dest_exponent);
Dbl_copytoptr(resultp1,resultp2,dstptr);
/* check for inexact */
if (inexact) {
if (Is_inexacttrap_enabled()) return(INEXACTEXCEPTION);
else Set_inexactflag();
}
return(NOEXCEPTION);
}
| gpl-2.0 |
hurdleman/fieldtrip | external/dmlt/external/gpstuff/dist/linuxCsource/invgamrand1.c | 70 | 1357 | /*INVGAMRAND1 Random matrices from inverse gamma distribution
*
* R = INVGAMRAND1(A,B) returns a matrix of random numbers chosen
* from the inverse gamma distribution with parameters A and B.
* The size of R is the common size of A and B if both are matrices.
* If either parameter is a scalar, the size of R is the size of the other
* parameter. Alternatively, R = GAMRND(A,B,M,N) returns an M by N matrix.
*
* Note: Parameterization as in (Neal, 1996).
* A is mean of the distribution
* B is degrees of freedom
*
* Last modified: 2000-05-31 12:06:08 EEST
*
*/
/* Copyright (C) 1998-2000 Aki Vehtari
*
*This software is distributed under the GNU General Public
*License (version 3 or later); please refer to the file
*License.txt, included with the software, for details.
*
*/
#include <math.h>
#include "mex.h"
#include "rand.h"
void mexFunction(const int nlhs_, mxArray *plhs_[],
const int nrhs_, const mxArray *prhs_[])
{
if (nrhs_ != 2 )
mexErrMsgTxt( "Wrong number of input arguments." );
if (nlhs_ > 1 )
mexErrMsgTxt( "Too many output arguments." );
{
double a, b;
a=mxGetScalar(prhs_[0]);
b=mxGetScalar(prhs_[1]);
plhs_[0]=mxCreateDoubleMatrix(1,1,mxREAL);
*mxGetPr(plhs_[0]) =
a*b/2/rand_gamma(b/2);
}
return;
}
| gpl-2.0 |
amphorion/kernel_pyramidv2 | net/core/neighbour.c | 70 | 70928 | /*
* Generic address resolution entity
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
* Harald Welte Add neighbour cache statistics like rtstat
*/
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <linux/times.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/netevent.h>
#include <net/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/log2.h>
#define NEIGH_DEBUG 1
#define NEIGH_PRINTK(x...) printk(x)
#define NEIGH_NOPRINTK(x...) do { ; } while(0)
#define NEIGH_PRINTK1 NEIGH_NOPRINTK
#define NEIGH_PRINTK2 NEIGH_NOPRINTK
#if NEIGH_DEBUG >= 1
#undef NEIGH_PRINTK1
#define NEIGH_PRINTK1 NEIGH_PRINTK
#endif
#if NEIGH_DEBUG >= 2
#undef NEIGH_PRINTK2
#define NEIGH_PRINTK2 NEIGH_PRINTK
#endif
#define PNEIGH_HASHMASK 0xF
static void neigh_timer_handler(unsigned long arg);
static void __neigh_notify(struct neighbour *n, int type, int flags);
static void neigh_update_notify(struct neighbour *neigh);
static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
static struct neigh_table *neigh_tables;
#ifdef CONFIG_PROC_FS
static const struct file_operations neigh_stat_seq_fops;
#endif
/*
Neighbour hash table buckets are protected with rwlock tbl->lock.
- All the scans/updates to hash buckets MUST be made under this lock.
- NOTHING clever should be made under this lock: no callbacks
to protocol backends, no attempts to send something to network.
It will result in deadlocks, if backend/driver wants to use neighbour
cache.
- If the entry requires some non-trivial actions, increase
its reference count and release table lock.
Neighbour entries are protected:
- with reference count.
- with rwlock neigh->lock
Reference count prevents destruction.
neigh->lock mainly serializes ll address data and its validity state.
However, the same lock is used to protect another entry fields:
- timer
- resolution queue
Again, nothing clever shall be made under neigh->lock,
the most complicated procedure, which we allow is dev->hard_header.
It is supposed, that dev->hard_header is simplistic and does
not make callbacks to neighbour tables.
The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
list of neighbour tables. This list is used only in process context,
*/
static DEFINE_RWLOCK(neigh_tbl_lock);
static int neigh_blackhole(struct sk_buff *skb)
{
kfree_skb(skb);
return -ENETDOWN;
}
static void neigh_cleanup_and_release(struct neighbour *neigh)
{
if (neigh->parms->neigh_cleanup)
neigh->parms->neigh_cleanup(neigh);
__neigh_notify(neigh, RTM_DELNEIGH, 0);
neigh_release(neigh);
}
/*
* It is random distribution in the interval (1/2)*base...(3/2)*base.
* It corresponds to default IPv6 settings and is not overridable,
* because it is really reasonable choice.
*/
unsigned long neigh_rand_reach_time(unsigned long base)
{
return base ? (net_random() % base) + (base >> 1) : 0;
}
EXPORT_SYMBOL(neigh_rand_reach_time);
static int neigh_forced_gc(struct neigh_table *tbl)
{
int shrunk = 0;
int i;
struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
write_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
for (i = 0; i <= nht->hash_mask; i++) {
struct neighbour *n;
struct neighbour __rcu **np;
np = &nht->hash_buckets[i];
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock))) != NULL) {
/* Neighbour record may be discarded if:
* - nobody refers to it.
* - it is not permanent
*/
write_lock(&n->lock);
if (atomic_read(&n->refcnt) == 1 &&
!(n->nud_state & NUD_PERMANENT)) {
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
n->dead = 1;
shrunk = 1;
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
continue;
}
write_unlock(&n->lock);
np = &n->next;
}
}
tbl->last_flush = jiffies;
write_unlock_bh(&tbl->lock);
return shrunk;
}
static void neigh_add_timer(struct neighbour *n, unsigned long when)
{
neigh_hold(n);
if (unlikely(mod_timer(&n->timer, when))) {
printk("NEIGH: BUG, double timer add, state is %x\n",
n->nud_state);
dump_stack();
}
}
static int neigh_del_timer(struct neighbour *n)
{
if ((n->nud_state & NUD_IN_TIMER) &&
del_timer(&n->timer)) {
neigh_release(n);
return 1;
}
return 0;
}
static void pneigh_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(list)) != NULL) {
dev_put(skb->dev);
kfree_skb(skb);
}
}
static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
{
int i;
struct neigh_hash_table *nht;
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
for (i = 0; i <= nht->hash_mask; i++) {
struct neighbour *n;
struct neighbour __rcu **np = &nht->hash_buckets[i];
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock))) != NULL) {
if (dev && n->dev != dev) {
np = &n->next;
continue;
}
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
write_lock(&n->lock);
neigh_del_timer(n);
n->dead = 1;
if (atomic_read(&n->refcnt) != 1) {
/* The most unpleasant situation.
We must destroy neighbour entry,
but someone still uses it.
The destroy will be delayed until
the last user releases us, but
we must kill timers etc. and move
it to safe state.
*/
__skb_queue_purge(&n->arp_queue);
n->output = neigh_blackhole;
if (n->nud_state & NUD_VALID)
n->nud_state = NUD_NOARP;
else
n->nud_state = NUD_NONE;
NEIGH_PRINTK2("neigh %p is stray.\n", n);
}
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
}
}
}
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
write_unlock_bh(&tbl->lock);
}
EXPORT_SYMBOL(neigh_changeaddr);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
pneigh_ifdown(tbl, dev);
write_unlock_bh(&tbl->lock);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
return 0;
}
EXPORT_SYMBOL(neigh_ifdown);
static struct neighbour *neigh_alloc(struct neigh_table *tbl)
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
int entries;
entries = atomic_inc_return(&tbl->entries) - 1;
if (entries >= tbl->gc_thresh3 ||
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
if (!neigh_forced_gc(tbl) &&
entries >= tbl->gc_thresh3)
goto out_entries;
}
n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
if (!n)
goto out_entries;
__skb_queue_head_init(&n->arp_queue);
rwlock_init(&n->lock);
seqlock_init(&n->ha_lock);
n->updated = n->used = now;
n->nud_state = NUD_NONE;
n->output = neigh_blackhole;
n->parms = neigh_parms_clone(&tbl->parms);
setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
NEIGH_CACHE_STAT_INC(tbl, allocs);
n->tbl = tbl;
atomic_set(&n->refcnt, 1);
n->dead = 1;
out:
return n;
out_entries:
atomic_dec(&tbl->entries);
goto out;
}
static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
{
size_t size = entries * sizeof(struct neighbour *);
struct neigh_hash_table *ret;
struct neighbour __rcu **buckets;
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
return NULL;
if (size <= PAGE_SIZE)
buckets = kzalloc(size, GFP_ATOMIC);
else
buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
if (!buckets) {
kfree(ret);
return NULL;
}
ret->hash_buckets = buckets;
ret->hash_mask = entries - 1;
get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
return ret;
}
static void neigh_hash_free_rcu(struct rcu_head *head)
{
struct neigh_hash_table *nht = container_of(head,
struct neigh_hash_table,
rcu);
size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
struct neighbour __rcu **buckets = nht->hash_buckets;
if (size <= PAGE_SIZE)
kfree(buckets);
else
free_pages((unsigned long)buckets, get_order(size));
kfree(nht);
}
static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
unsigned long new_entries)
{
unsigned int i, hash;
struct neigh_hash_table *new_nht, *old_nht;
NEIGH_CACHE_STAT_INC(tbl, hash_grows);
BUG_ON(!is_power_of_2(new_entries));
old_nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
new_nht = neigh_hash_alloc(new_entries);
if (!new_nht)
return old_nht;
for (i = 0; i <= old_nht->hash_mask; i++) {
struct neighbour *n, *next;
for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
lockdep_is_held(&tbl->lock));
n != NULL;
n = next) {
hash = tbl->hash(n->primary_key, n->dev,
new_nht->hash_rnd);
hash &= new_nht->hash_mask;
next = rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock));
rcu_assign_pointer(n->next,
rcu_dereference_protected(
new_nht->hash_buckets[hash],
lockdep_is_held(&tbl->lock)));
rcu_assign_pointer(new_nht->hash_buckets[hash], n);
}
}
rcu_assign_pointer(tbl->nht, new_nht);
call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
return new_nht;
}
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev)
{
struct neighbour *n;
int key_len = tbl->key_len;
u32 hash_val;
struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, lookups);
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
n != NULL;
n = rcu_dereference_bh(n->next)) {
if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
if (!atomic_inc_not_zero(&n->refcnt))
n = NULL;
NEIGH_CACHE_STAT_INC(tbl, hits);
break;
}
}
rcu_read_unlock_bh();
return n;
}
EXPORT_SYMBOL(neigh_lookup);
struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
const void *pkey)
{
struct neighbour *n;
int key_len = tbl->key_len;
u32 hash_val;
struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, lookups);
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask;
for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
n != NULL;
n = rcu_dereference_bh(n->next)) {
if (!memcmp(n->primary_key, pkey, key_len) &&
net_eq(dev_net(n->dev), net)) {
if (!atomic_inc_not_zero(&n->refcnt))
n = NULL;
NEIGH_CACHE_STAT_INC(tbl, hits);
break;
}
}
rcu_read_unlock_bh();
return n;
}
EXPORT_SYMBOL(neigh_lookup_nodev);
struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev)
{
u32 hash_val;
int key_len = tbl->key_len;
int error;
struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
struct neigh_hash_table *nht;
if (!n) {
rc = ERR_PTR(-ENOBUFS);
goto out;
}
memcpy(n->primary_key, pkey, key_len);
n->dev = dev;
dev_hold(dev);
/* Protocol specific setup. */
if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
rc = ERR_PTR(error);
goto out_neigh_release;
}
/* Device specific setup. */
if (n->parms->neigh_setup &&
(error = n->parms->neigh_setup(n)) < 0) {
rc = ERR_PTR(error);
goto out_neigh_release;
}
n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
write_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
if (atomic_read(&tbl->entries) > (nht->hash_mask + 1))
nht = neigh_hash_grow(tbl, (nht->hash_mask + 1) << 1);
hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
if (n->parms->dead) {
rc = ERR_PTR(-EINVAL);
goto out_tbl_unlock;
}
for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
lockdep_is_held(&tbl->lock));
n1 != NULL;
n1 = rcu_dereference_protected(n1->next,
lockdep_is_held(&tbl->lock))) {
if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
neigh_hold(n1);
rc = n1;
goto out_tbl_unlock;
}
}
n->dead = 0;
neigh_hold(n);
rcu_assign_pointer(n->next,
rcu_dereference_protected(nht->hash_buckets[hash_val],
lockdep_is_held(&tbl->lock)));
rcu_assign_pointer(nht->hash_buckets[hash_val], n);
write_unlock_bh(&tbl->lock);
NEIGH_PRINTK2("neigh %p is created.\n", n);
rc = n;
out:
return rc;
out_tbl_unlock:
write_unlock_bh(&tbl->lock);
out_neigh_release:
neigh_release(n);
goto out;
}
EXPORT_SYMBOL(neigh_create);
static u32 pneigh_hash(const void *pkey, int key_len)
{
u32 hash_val = *(u32 *)(pkey + key_len - 4);
hash_val ^= (hash_val >> 16);
hash_val ^= hash_val >> 8;
hash_val ^= hash_val >> 4;
hash_val &= PNEIGH_HASHMASK;
return hash_val;
}
static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
struct net *net,
const void *pkey,
int key_len,
struct net_device *dev)
{
while (n) {
if (!memcmp(n->key, pkey, key_len) &&
net_eq(pneigh_net(n), net) &&
(n->dev == dev || !n->dev))
return n;
n = n->next;
}
return NULL;
}
struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
struct net *net, const void *pkey, struct net_device *dev)
{
int key_len = tbl->key_len;
u32 hash_val = pneigh_hash(pkey, key_len);
return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
net, pkey, key_len, dev);
}
EXPORT_SYMBOL_GPL(__pneigh_lookup);
struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
struct net *net, const void *pkey,
struct net_device *dev, int creat)
{
struct pneigh_entry *n;
int key_len = tbl->key_len;
u32 hash_val = pneigh_hash(pkey, key_len);
read_lock_bh(&tbl->lock);
n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
net, pkey, key_len, dev);
read_unlock_bh(&tbl->lock);
if (n || !creat)
goto out;
ASSERT_RTNL();
n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
if (!n)
goto out;
write_pnet(&n->net, hold_net(net));
memcpy(n->key, pkey, key_len);
n->dev = dev;
if (dev)
dev_hold(dev);
if (tbl->pconstructor && tbl->pconstructor(n)) {
if (dev)
dev_put(dev);
release_net(net);
kfree(n);
n = NULL;
goto out;
}
write_lock_bh(&tbl->lock);
n->next = tbl->phash_buckets[hash_val];
tbl->phash_buckets[hash_val] = n;
write_unlock_bh(&tbl->lock);
out:
return n;
}
EXPORT_SYMBOL(pneigh_lookup);
int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
struct net_device *dev)
{
struct pneigh_entry *n, **np;
int key_len = tbl->key_len;
u32 hash_val = pneigh_hash(pkey, key_len);
write_lock_bh(&tbl->lock);
for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
np = &n->next) {
if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
net_eq(pneigh_net(n), net)) {
*np = n->next;
write_unlock_bh(&tbl->lock);
if (tbl->pdestructor)
tbl->pdestructor(n);
if (n->dev)
dev_put(n->dev);
release_net(pneigh_net(n));
kfree(n);
return 0;
}
}
write_unlock_bh(&tbl->lock);
return -ENOENT;
}
static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
{
struct pneigh_entry *n, **np;
u32 h;
for (h = 0; h <= PNEIGH_HASHMASK; h++) {
np = &tbl->phash_buckets[h];
while ((n = *np) != NULL) {
if (!dev || n->dev == dev) {
*np = n->next;
if (tbl->pdestructor)
tbl->pdestructor(n);
if (n->dev)
dev_put(n->dev);
release_net(pneigh_net(n));
kfree(n);
continue;
}
np = &n->next;
}
}
return -ENOENT;
}
static void neigh_parms_destroy(struct neigh_parms *parms);
static inline void neigh_parms_put(struct neigh_parms *parms)
{
if (atomic_dec_and_test(&parms->refcnt))
neigh_parms_destroy(parms);
}
static void neigh_destroy_rcu(struct rcu_head *head)
{
struct neighbour *neigh = container_of(head, struct neighbour, rcu);
kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
}
/*
* neighbour must already be out of the table;
*
*/
void neigh_destroy(struct neighbour *neigh)
{
struct hh_cache *hh;
NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
if (!neigh->dead) {
printk(KERN_WARNING
"Destroying alive neighbour %p\n", neigh);
dump_stack();
return;
}
if (neigh_del_timer(neigh))
printk(KERN_WARNING "Impossible event.\n");
while ((hh = neigh->hh) != NULL) {
neigh->hh = hh->hh_next;
hh->hh_next = NULL;
write_seqlock_bh(&hh->hh_lock);
hh->hh_output = neigh_blackhole;
write_sequnlock_bh(&hh->hh_lock);
hh_cache_put(hh);
}
write_lock_bh(&neigh->lock);
__skb_queue_purge(&neigh->arp_queue);
write_unlock_bh(&neigh->lock);
dev_put(neigh->dev);
neigh_parms_put(neigh->parms);
NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
atomic_dec(&neigh->tbl->entries);
call_rcu(&neigh->rcu, neigh_destroy_rcu);
}
EXPORT_SYMBOL(neigh_destroy);
/* Neighbour state is suspicious;
disable fast path.
Called with write_locked neigh.
*/
static void neigh_suspect(struct neighbour *neigh)
{
struct hh_cache *hh;
NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
neigh->output = neigh->ops->output;
for (hh = neigh->hh; hh; hh = hh->hh_next)
hh->hh_output = neigh->ops->output;
}
/* Neighbour state is OK;
enable fast path.
Called with write_locked neigh.
*/
static void neigh_connect(struct neighbour *neigh)
{
struct hh_cache *hh;
NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
neigh->output = neigh->ops->connected_output;
for (hh = neigh->hh; hh; hh = hh->hh_next)
hh->hh_output = neigh->ops->hh_output;
}
static void neigh_periodic_work(struct work_struct *work)
{
struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
struct neighbour *n;
struct neighbour __rcu **np;
unsigned int i;
struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
write_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
/*
* periodically recompute ReachableTime from random function
*/
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
tbl->last_rand = jiffies;
for (p = &tbl->parms; p; p = p->next)
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
}
for (i = 0 ; i <= nht->hash_mask; i++) {
np = &nht->hash_buckets[i];
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock))) != NULL) {
unsigned int state;
write_lock(&n->lock);
state = n->nud_state;
if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
write_unlock(&n->lock);
goto next_elt;
}
if (time_before(n->used, n->confirmed))
n->used = n->confirmed;
if (atomic_read(&n->refcnt) == 1 &&
(state == NUD_FAILED ||
time_after(jiffies, n->used + n->parms->gc_staletime))) {
*np = n->next;
n->dead = 1;
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
continue;
}
write_unlock(&n->lock);
next_elt:
np = &n->next;
}
/*
* It's fine to release lock here, even if hash table
* grows while we are preempted.
*/
write_unlock_bh(&tbl->lock);
cond_resched();
write_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
}
/* Cycle through all hash buckets every base_reachable_time/2 ticks.
* ARP entry timeouts range from 1/2 base_reachable_time to 3/2
* base_reachable_time.
*/
schedule_delayed_work(&tbl->gc_work,
tbl->parms.base_reachable_time >> 1);
write_unlock_bh(&tbl->lock);
}
static __inline__ int neigh_max_probes(struct neighbour *n)
{
struct neigh_parms *p = n->parms;
return (n->nud_state & NUD_PROBE) ?
p->ucast_probes :
p->ucast_probes + p->app_probes + p->mcast_probes;
}
static void neigh_invalidate(struct neighbour *neigh)
__releases(neigh->lock)
__acquires(neigh->lock)
{
struct sk_buff *skb;
NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
neigh->updated = jiffies;
/* It is very thin place. report_unreachable is very complicated
routine. Particularly, it can hit the same neighbour entry!
So that, we try to be accurate and avoid dead loop. --ANK
*/
while (neigh->nud_state == NUD_FAILED &&
(skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
write_unlock(&neigh->lock);
neigh->ops->error_report(neigh, skb);
write_lock(&neigh->lock);
}
__skb_queue_purge(&neigh->arp_queue);
}
/* Called when a timer expires for a neighbour entry. */
static void neigh_timer_handler(unsigned long arg)
{
unsigned long now, next;
struct neighbour *neigh = (struct neighbour *)arg;
unsigned state;
int notify = 0;
write_lock(&neigh->lock);
state = neigh->nud_state;
now = jiffies;
next = now + HZ;
if (!(state & NUD_IN_TIMER)) {
#ifndef CONFIG_SMP
printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
#endif
goto out;
}
if (state & NUD_REACHABLE) {
if (time_before_eq(now,
neigh->confirmed + neigh->parms->reachable_time)) {
NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
next = neigh->confirmed + neigh->parms->reachable_time;
} else if (time_before_eq(now,
neigh->used + neigh->parms->delay_probe_time)) {
NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
neigh_suspect(neigh);
next = now + neigh->parms->delay_probe_time;
} else {
NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
neigh->nud_state = NUD_STALE;
neigh->updated = jiffies;
neigh_suspect(neigh);
notify = 1;
}
} else if (state & NUD_DELAY) {
if (time_before_eq(now,
neigh->confirmed + neigh->parms->delay_probe_time)) {
NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
neigh->nud_state = NUD_REACHABLE;
neigh->updated = jiffies;
neigh_connect(neigh);
notify = 1;
next = neigh->confirmed + neigh->parms->reachable_time;
} else {
NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
neigh->nud_state = NUD_PROBE;
neigh->updated = jiffies;
atomic_set(&neigh->probes, 0);
next = now + neigh->parms->retrans_time;
}
} else {
/* NUD_PROBE|NUD_INCOMPLETE */
next = now + neigh->parms->retrans_time;
}
if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
neigh->nud_state = NUD_FAILED;
notify = 1;
neigh_invalidate(neigh);
}
if (neigh->nud_state & NUD_IN_TIMER) {
if (time_before(next, jiffies + HZ/2))
next = jiffies + HZ/2;
if (!mod_timer(&neigh->timer, next))
neigh_hold(neigh);
}
if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
struct sk_buff *skb = skb_peek(&neigh->arp_queue);
/* keep skb alive even if arp_queue overflows */
if (skb)
skb = skb_copy(skb, GFP_ATOMIC);
write_unlock(&neigh->lock);
neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
#ifdef CONFIG_HTC_NETWORK_MODIFY
if (!IS_ERR(skb) && (skb))
kfree_skb(skb);
#else
kfree_skb(skb);
#endif
} else {
out:
write_unlock(&neigh->lock);
}
if (notify)
neigh_update_notify(neigh);
neigh_release(neigh);
}
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
int rc;
unsigned long now;
write_lock_bh(&neigh->lock);
rc = 0;
if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
goto out_unlock_bh;
now = jiffies;
if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
atomic_set(&neigh->probes, neigh->parms->ucast_probes);
neigh->nud_state = NUD_INCOMPLETE;
neigh->updated = jiffies;
neigh_add_timer(neigh, now + 1);
} else {
neigh->nud_state = NUD_FAILED;
neigh->updated = jiffies;
write_unlock_bh(&neigh->lock);
kfree_skb(skb);
return 1;
}
} else if (neigh->nud_state & NUD_STALE) {
NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
neigh->nud_state = NUD_DELAY;
neigh->updated = jiffies;
neigh_add_timer(neigh,
jiffies + neigh->parms->delay_probe_time);
}
if (neigh->nud_state == NUD_INCOMPLETE) {
if (skb) {
if (skb_queue_len(&neigh->arp_queue) >=
neigh->parms->queue_len) {
struct sk_buff *buff;
buff = __skb_dequeue(&neigh->arp_queue);
kfree_skb(buff);
NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
}
skb_dst_force(skb);
__skb_queue_tail(&neigh->arp_queue, skb);
}
rc = 1;
}
out_unlock_bh:
write_unlock_bh(&neigh->lock);
return rc;
}
EXPORT_SYMBOL(__neigh_event_send);
static void neigh_update_hhs(const struct neighbour *neigh)
{
struct hh_cache *hh;
void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
= NULL;
if (neigh->dev->header_ops)
update = neigh->dev->header_ops->cache_update;
if (update) {
for (hh = neigh->hh; hh; hh = hh->hh_next) {
write_seqlock_bh(&hh->hh_lock);
update(hh, neigh->dev, neigh->ha);
write_sequnlock_bh(&hh->hh_lock);
}
}
}
/* Generic update routine.
-- lladdr is new lladdr or NULL, if it is not supplied.
-- new is new state.
-- flags
NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
if it is different.
NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
lladdr instead of overriding it
if it is different.
It also allows to retain current state
if lladdr is unchanged.
NEIGH_UPDATE_F_ADMIN means that the change is administrative.
NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
NTF_ROUTER flag.
NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
a router.
Caller MUST hold reference count on the entry.
*/
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
u32 flags)
{
u8 old;
int err;
int notify = 0;
struct net_device *dev;
int update_isrouter = 0;
write_lock_bh(&neigh->lock);
dev = neigh->dev;
old = neigh->nud_state;
err = -EPERM;
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
if (!(new & NUD_VALID)) {
neigh_del_timer(neigh);
if (old & NUD_CONNECTED)
neigh_suspect(neigh);
neigh->nud_state = new;
err = 0;
notify = old & NUD_VALID;
if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
(new & NUD_FAILED)) {
neigh_invalidate(neigh);
notify = 1;
}
goto out;
}
/* Compare new lladdr with cached one */
if (!dev->addr_len) {
/* First case: device needs no address. */
lladdr = neigh->ha;
} else if (lladdr) {
/* The second case: if something is already cached
and a new address is proposed:
- compare new & old
- if they are different, check override flag
*/
if ((old & NUD_VALID) &&
!memcmp(lladdr, neigh->ha, dev->addr_len))
lladdr = neigh->ha;
} else {
/* No address is supplied; if we know something,
use it, otherwise discard the request.
*/
err = -EINVAL;
if (!(old & NUD_VALID))
goto out;
lladdr = neigh->ha;
}
if (new & NUD_CONNECTED)
neigh->confirmed = jiffies;
neigh->updated = jiffies;
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
err = 0;
update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
if (old & NUD_VALID) {
if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
update_isrouter = 0;
if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
(old & NUD_CONNECTED)) {
lladdr = neigh->ha;
new = NUD_STALE;
} else
goto out;
} else {
if (lladdr == neigh->ha && new == NUD_STALE &&
((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
(old & NUD_CONNECTED))
)
new = old;
}
}
if (new != old) {
neigh_del_timer(neigh);
if (new & NUD_IN_TIMER)
neigh_add_timer(neigh, (jiffies +
((new & NUD_REACHABLE) ?
neigh->parms->reachable_time :
0)));
neigh->nud_state = new;
}
if (lladdr != neigh->ha) {
write_seqlock(&neigh->ha_lock);
memcpy(&neigh->ha, lladdr, dev->addr_len);
write_sequnlock(&neigh->ha_lock);
neigh_update_hhs(neigh);
if (!(new & NUD_CONNECTED))
neigh->confirmed = jiffies -
(neigh->parms->base_reachable_time << 1);
notify = 1;
}
if (new == old)
goto out;
if (new & NUD_CONNECTED)
neigh_connect(neigh);
else
neigh_suspect(neigh);
if (!(old & NUD_VALID)) {
struct sk_buff *skb;
/* Again: avoid dead loop if something went wrong */
while (neigh->nud_state & NUD_VALID &&
(skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
struct dst_entry *dst = skb_dst(skb);
struct neighbour *n2, *n1 = neigh;
write_unlock_bh(&neigh->lock);
rcu_read_lock();
/* On shaper/eql skb->dst->neighbour != neigh :( */
if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
n1 = n2;
n1->output(skb);
rcu_read_unlock();
write_lock_bh(&neigh->lock);
}
__skb_queue_purge(&neigh->arp_queue);
}
out:
if (update_isrouter) {
neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
(neigh->flags | NTF_ROUTER) :
(neigh->flags & ~NTF_ROUTER);
}
write_unlock_bh(&neigh->lock);
if (notify)
neigh_update_notify(neigh);
return err;
}
EXPORT_SYMBOL(neigh_update);
struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
struct net_device *dev)
{
struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
lladdr || !dev->addr_len);
if (neigh)
neigh_update(neigh, lladdr, NUD_STALE,
NEIGH_UPDATE_F_OVERRIDE);
return neigh;
}
EXPORT_SYMBOL(neigh_event_ns);
static inline bool neigh_hh_lookup(struct neighbour *n, struct dst_entry *dst,
__be16 protocol)
{
struct hh_cache *hh;
smp_rmb(); /* paired with smp_wmb() in neigh_hh_init() */
for (hh = n->hh; hh; hh = hh->hh_next) {
if (hh->hh_type == protocol) {
atomic_inc(&hh->hh_refcnt);
if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
hh_cache_put(hh);
return true;
}
}
return false;
}
/* called with read_lock_bh(&n->lock); */
static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
__be16 protocol)
{
struct hh_cache *hh;
struct net_device *dev = dst->dev;
if (likely(neigh_hh_lookup(n, dst, protocol)))
return;
/* slow path */
hh = kzalloc(sizeof(*hh), GFP_ATOMIC);
if (!hh)
return;
seqlock_init(&hh->hh_lock);
hh->hh_type = protocol;
atomic_set(&hh->hh_refcnt, 2);
if (dev->header_ops->cache(n, hh)) {
kfree(hh);
return;
}
write_lock_bh(&n->lock);
/* must check if another thread already did the insert */
if (neigh_hh_lookup(n, dst, protocol)) {
kfree(hh);
goto end;
}
if (n->nud_state & NUD_CONNECTED)
hh->hh_output = n->ops->hh_output;
else
hh->hh_output = n->ops->output;
hh->hh_next = n->hh;
smp_wmb(); /* paired with smp_rmb() in neigh_hh_lookup() */
n->hh = hh;
if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
hh_cache_put(hh);
end:
write_unlock_bh(&n->lock);
}
/* This function can be used in contexts, where only old dev_queue_xmit
* worked, f.e. if you want to override normal output path (eql, shaper),
* but resolution is not made yet.
*/
int neigh_compat_output(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
__skb_pull(skb, skb_network_offset(skb));
if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
skb->len) < 0 &&
dev->header_ops->rebuild(skb))
return 0;
return dev_queue_xmit(skb);
}
EXPORT_SYMBOL(neigh_compat_output);
/* Slow and careful. */
int neigh_resolve_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct neighbour *neigh = dst_get_neighbour(dst);
int rc = 0;
if (!dst)
goto discard;
if (!neigh_event_send(neigh, skb)) {
int err;
struct net_device *dev = neigh->dev;
unsigned int seq;
if (dev->header_ops->cache &&
!dst->hh &&
!(dst->flags & DST_NOCACHE))
neigh_hh_init(neigh, dst, dst->ops->protocol);
do {
__skb_pull(skb, skb_network_offset(skb));
seq = read_seqbegin(&neigh->ha_lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
} while (read_seqretry(&neigh->ha_lock, seq));
if (err >= 0)
rc = neigh->ops->queue_xmit(skb);
else
goto out_kfree_skb;
}
out:
return rc;
discard:
NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
dst, neigh);
out_kfree_skb:
rc = -EINVAL;
kfree_skb(skb);
goto out;
}
EXPORT_SYMBOL(neigh_resolve_output);
/* As fast as possible without hh cache */
int neigh_connected_output(struct sk_buff *skb)
{
int err;
struct dst_entry *dst = skb_dst(skb);
struct neighbour *neigh = dst_get_neighbour(dst);
struct net_device *dev = neigh->dev;
unsigned int seq;
do {
__skb_pull(skb, skb_network_offset(skb));
seq = read_seqbegin(&neigh->ha_lock);
err = dev_hard_header(skb, dev, ntohs(skb->protocol),
neigh->ha, NULL, skb->len);
} while (read_seqretry(&neigh->ha_lock, seq));
if (err >= 0)
err = neigh->ops->queue_xmit(skb);
else {
err = -EINVAL;
kfree_skb(skb);
}
return err;
}
EXPORT_SYMBOL(neigh_connected_output);
static void neigh_proxy_process(unsigned long arg)
{
struct neigh_table *tbl = (struct neigh_table *)arg;
long sched_next = 0;
unsigned long now = jiffies;
struct sk_buff *skb, *n;
spin_lock(&tbl->proxy_queue.lock);
skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
long tdif = NEIGH_CB(skb)->sched_next - now;
if (tdif <= 0) {
struct net_device *dev = skb->dev;
__skb_unlink(skb, &tbl->proxy_queue);
if (tbl->proxy_redo && netif_running(dev)) {
rcu_read_lock();
tbl->proxy_redo(skb);
rcu_read_unlock();
} else {
kfree_skb(skb);
}
dev_put(dev);
} else if (!sched_next || tdif < sched_next)
sched_next = tdif;
}
del_timer(&tbl->proxy_timer);
if (sched_next)
mod_timer(&tbl->proxy_timer, jiffies + sched_next);
spin_unlock(&tbl->proxy_queue.lock);
}
void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
struct sk_buff *skb)
{
unsigned long now = jiffies;
unsigned long sched_next = now + (net_random() % p->proxy_delay);
if (tbl->proxy_queue.qlen > p->proxy_qlen) {
kfree_skb(skb);
return;
}
NEIGH_CB(skb)->sched_next = sched_next;
NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
spin_lock(&tbl->proxy_queue.lock);
if (del_timer(&tbl->proxy_timer)) {
if (time_before(tbl->proxy_timer.expires, sched_next))
sched_next = tbl->proxy_timer.expires;
}
skb_dst_drop(skb);
dev_hold(skb->dev);
__skb_queue_tail(&tbl->proxy_queue, skb);
mod_timer(&tbl->proxy_timer, sched_next);
spin_unlock(&tbl->proxy_queue.lock);
}
EXPORT_SYMBOL(pneigh_enqueue);
static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
struct net *net, int ifindex)
{
struct neigh_parms *p;
for (p = &tbl->parms; p; p = p->next) {
if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
(!p->dev && !ifindex))
return p;
}
return NULL;
}
struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
struct neigh_table *tbl)
{
struct neigh_parms *p, *ref;
struct net *net = dev_net(dev);
const struct net_device_ops *ops = dev->netdev_ops;
ref = lookup_neigh_parms(tbl, net, 0);
if (!ref)
return NULL;
p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
if (p) {
p->tbl = tbl;
atomic_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
kfree(p);
return NULL;
}
dev_hold(dev);
p->dev = dev;
write_pnet(&p->net, hold_net(net));
p->sysctl_table = NULL;
write_lock_bh(&tbl->lock);
p->next = tbl->parms.next;
tbl->parms.next = p;
write_unlock_bh(&tbl->lock);
}
return p;
}
EXPORT_SYMBOL(neigh_parms_alloc);
static void neigh_rcu_free_parms(struct rcu_head *head)
{
struct neigh_parms *parms =
container_of(head, struct neigh_parms, rcu_head);
neigh_parms_put(parms);
}
void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
{
struct neigh_parms **p;
if (!parms || parms == &tbl->parms)
return;
write_lock_bh(&tbl->lock);
for (p = &tbl->parms.next; *p; p = &(*p)->next) {
if (*p == parms) {
*p = parms->next;
parms->dead = 1;
write_unlock_bh(&tbl->lock);
if (parms->dev)
dev_put(parms->dev);
call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
return;
}
}
write_unlock_bh(&tbl->lock);
NEIGH_PRINTK1("neigh_parms_release: not found\n");
}
EXPORT_SYMBOL(neigh_parms_release);
static void neigh_parms_destroy(struct neigh_parms *parms)
{
release_net(neigh_parms_net(parms));
kfree(parms);
}
static struct lock_class_key neigh_table_proxy_queue_class;
void neigh_table_init_no_netlink(struct neigh_table *tbl)
{
unsigned long now = jiffies;
unsigned long phsize;
write_pnet(&tbl->parms.net, &init_net);
atomic_set(&tbl->parms.refcnt, 1);
tbl->parms.reachable_time =
neigh_rand_reach_time(tbl->parms.base_reachable_time);
if (!tbl->kmem_cachep)
tbl->kmem_cachep =
kmem_cache_create(tbl->id, tbl->entry_size, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
tbl->stats = alloc_percpu(struct neigh_statistics);
if (!tbl->stats)
panic("cannot create neighbour cache statistics");
#ifdef CONFIG_PROC_FS
if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
&neigh_stat_seq_fops, tbl))
panic("cannot create neighbour proc dir entry");
#endif
RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8));
phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
if (!tbl->nht || !tbl->phash_buckets)
panic("cannot allocate neighbour cache hashes");
rwlock_init(&tbl->lock);
INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
skb_queue_head_init_class(&tbl->proxy_queue,
&neigh_table_proxy_queue_class);
tbl->last_flush = now;
tbl->last_rand = now + tbl->parms.reachable_time * 20;
}
EXPORT_SYMBOL(neigh_table_init_no_netlink);
void neigh_table_init(struct neigh_table *tbl)
{
struct neigh_table *tmp;
neigh_table_init_no_netlink(tbl);
write_lock(&neigh_tbl_lock);
for (tmp = neigh_tables; tmp; tmp = tmp->next) {
if (tmp->family == tbl->family)
break;
}
tbl->next = neigh_tables;
neigh_tables = tbl;
write_unlock(&neigh_tbl_lock);
if (unlikely(tmp)) {
printk(KERN_ERR "NEIGH: Registering multiple tables for "
"family %d\n", tbl->family);
dump_stack();
}
}
EXPORT_SYMBOL(neigh_table_init);
int neigh_table_clear(struct neigh_table *tbl)
{
struct neigh_table **tp;
/* It is not clean... Fix it to unload IPv6 module safely */
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
neigh_ifdown(tbl, NULL);
if (atomic_read(&tbl->entries))
printk(KERN_CRIT "neighbour leakage\n");
write_lock(&neigh_tbl_lock);
for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
if (*tp == tbl) {
*tp = tbl->next;
break;
}
}
write_unlock(&neigh_tbl_lock);
call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
neigh_hash_free_rcu);
tbl->nht = NULL;
kfree(tbl->phash_buckets);
tbl->phash_buckets = NULL;
remove_proc_entry(tbl->id, init_net.proc_net_stat);
free_percpu(tbl->stats);
tbl->stats = NULL;
kmem_cache_destroy(tbl->kmem_cachep);
tbl->kmem_cachep = NULL;
return 0;
}
EXPORT_SYMBOL(neigh_table_clear);
static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *dst_attr;
struct neigh_table *tbl;
struct net_device *dev = NULL;
int err = -EINVAL;
ASSERT_RTNL();
if (nlmsg_len(nlh) < sizeof(*ndm))
goto out;
dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
if (dst_attr == NULL)
goto out;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
err = -ENODEV;
goto out;
}
}
read_lock(&neigh_tbl_lock);
for (tbl = neigh_tables; tbl; tbl = tbl->next) {
struct neighbour *neigh;
if (tbl->family != ndm->ndm_family)
continue;
read_unlock(&neigh_tbl_lock);
if (nla_len(dst_attr) < tbl->key_len)
goto out;
if (ndm->ndm_flags & NTF_PROXY) {
err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
goto out;
}
if (dev == NULL)
goto out;
neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
if (neigh == NULL) {
err = -ENOENT;
goto out;
}
err = neigh_update(neigh, NULL, NUD_FAILED,
NEIGH_UPDATE_F_OVERRIDE |
NEIGH_UPDATE_F_ADMIN);
neigh_release(neigh);
goto out;
}
read_unlock(&neigh_tbl_lock);
err = -EAFNOSUPPORT;
out:
return err;
}
static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct neigh_table *tbl;
struct net_device *dev = NULL;
int err;
ASSERT_RTNL();
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
if (err < 0)
goto out;
err = -EINVAL;
if (tb[NDA_DST] == NULL)
goto out;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
err = -ENODEV;
goto out;
}
if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
goto out;
}
read_lock(&neigh_tbl_lock);
for (tbl = neigh_tables; tbl; tbl = tbl->next) {
int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
struct neighbour *neigh;
void *dst, *lladdr;
if (tbl->family != ndm->ndm_family)
continue;
read_unlock(&neigh_tbl_lock);
if (nla_len(tb[NDA_DST]) < tbl->key_len)
goto out;
dst = nla_data(tb[NDA_DST]);
lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
if (ndm->ndm_flags & NTF_PROXY) {
struct pneigh_entry *pn;
err = -ENOBUFS;
pn = pneigh_lookup(tbl, net, dst, dev, 1);
if (pn) {
pn->flags = ndm->ndm_flags;
err = 0;
}
goto out;
}
if (dev == NULL)
goto out;
neigh = neigh_lookup(tbl, dst, dev);
if (neigh == NULL) {
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
goto out;
}
neigh = __neigh_lookup_errno(tbl, dst, dev);
if (IS_ERR(neigh)) {
err = PTR_ERR(neigh);
goto out;
}
} else {
if (nlh->nlmsg_flags & NLM_F_EXCL) {
err = -EEXIST;
neigh_release(neigh);
goto out;
}
if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
flags &= ~NEIGH_UPDATE_F_OVERRIDE;
}
if (ndm->ndm_flags & NTF_USE) {
neigh_event_send(neigh, NULL);
err = 0;
} else
err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
neigh_release(neigh);
goto out;
}
read_unlock(&neigh_tbl_lock);
err = -EAFNOSUPPORT;
out:
return err;
}
static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
{
struct nlattr *nest;
nest = nla_nest_start(skb, NDTA_PARMS);
if (nest == NULL)
return -ENOBUFS;
if (parms->dev)
NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
parms->base_reachable_time);
NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
return nla_nest_end(skb, nest);
nla_put_failure:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
u32 pid, u32 seq, int type, int flags)
{
struct nlmsghdr *nlh;
struct ndtmsg *ndtmsg;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
ndtmsg = nlmsg_data(nlh);
read_lock_bh(&tbl->lock);
ndtmsg->ndtm_family = tbl->family;
ndtmsg->ndtm_pad1 = 0;
ndtmsg->ndtm_pad2 = 0;
NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
{
unsigned long now = jiffies;
unsigned int flush_delta = now - tbl->last_flush;
unsigned int rand_delta = now - tbl->last_rand;
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
.ndtc_entry_size = tbl->entry_size,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
.ndtc_proxy_qlen = tbl->proxy_queue.qlen,
};
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
ndc.ndtc_hash_rnd = nht->hash_rnd;
ndc.ndtc_hash_mask = nht->hash_mask;
rcu_read_unlock_bh();
NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
}
{
int cpu;
struct ndt_stats ndst;
memset(&ndst, 0, sizeof(ndst));
for_each_possible_cpu(cpu) {
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
ndst.ndts_allocs += st->allocs;
ndst.ndts_destroys += st->destroys;
ndst.ndts_hash_grows += st->hash_grows;
ndst.ndts_res_failed += st->res_failed;
ndst.ndts_lookups += st->lookups;
ndst.ndts_hits += st->hits;
ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
ndst.ndts_forced_gc_runs += st->forced_gc_runs;
}
NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
}
BUG_ON(tbl->parms.dev);
if (neightbl_fill_parms(skb, &tbl->parms) < 0)
goto nla_put_failure;
read_unlock_bh(&tbl->lock);
return nlmsg_end(skb, nlh);
nla_put_failure:
read_unlock_bh(&tbl->lock);
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int neightbl_fill_param_info(struct sk_buff *skb,
struct neigh_table *tbl,
struct neigh_parms *parms,
u32 pid, u32 seq, int type,
unsigned int flags)
{
struct ndtmsg *ndtmsg;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
if (nlh == NULL)
return -EMSGSIZE;
ndtmsg = nlmsg_data(nlh);
read_lock_bh(&tbl->lock);
ndtmsg->ndtm_family = tbl->family;
ndtmsg->ndtm_pad1 = 0;
ndtmsg->ndtm_pad2 = 0;
if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
neightbl_fill_parms(skb, parms) < 0)
goto errout;
read_unlock_bh(&tbl->lock);
return nlmsg_end(skb, nlh);
errout:
read_unlock_bh(&tbl->lock);
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
[NDTA_NAME] = { .type = NLA_STRING },
[NDTA_THRESH1] = { .type = NLA_U32 },
[NDTA_THRESH2] = { .type = NLA_U32 },
[NDTA_THRESH3] = { .type = NLA_U32 },
[NDTA_GC_INTERVAL] = { .type = NLA_U64 },
[NDTA_PARMS] = { .type = NLA_NESTED },
};
static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
[NDTPA_IFINDEX] = { .type = NLA_U32 },
[NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
[NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
[NDTPA_APP_PROBES] = { .type = NLA_U32 },
[NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
[NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
[NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
[NDTPA_GC_STALETIME] = { .type = NLA_U64 },
[NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
[NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
[NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
[NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
[NDTPA_LOCKTIME] = { .type = NLA_U64 },
};
static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct neigh_table *tbl;
struct ndtmsg *ndtmsg;
struct nlattr *tb[NDTA_MAX+1];
int err;
err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
nl_neightbl_policy);
if (err < 0)
goto errout;
if (tb[NDTA_NAME] == NULL) {
err = -EINVAL;
goto errout;
}
ndtmsg = nlmsg_data(nlh);
read_lock(&neigh_tbl_lock);
for (tbl = neigh_tables; tbl; tbl = tbl->next) {
if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
continue;
if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
break;
}
if (tbl == NULL) {
err = -ENOENT;
goto errout_locked;
}
/*
* We acquire tbl->lock to be nice to the periodic timers and
* make sure they always see a consistent set of values.
*/
write_lock_bh(&tbl->lock);
if (tb[NDTA_PARMS]) {
struct nlattr *tbp[NDTPA_MAX+1];
struct neigh_parms *p;
int i, ifindex = 0;
err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
nl_ntbl_parm_policy);
if (err < 0)
goto errout_tbl_lock;
if (tbp[NDTPA_IFINDEX])
ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
p = lookup_neigh_parms(tbl, net, ifindex);
if (p == NULL) {
err = -ENOENT;
goto errout_tbl_lock;
}
for (i = 1; i <= NDTPA_MAX; i++) {
if (tbp[i] == NULL)
continue;
switch (i) {
case NDTPA_QUEUE_LEN:
p->queue_len = nla_get_u32(tbp[i]);
break;
case NDTPA_PROXY_QLEN:
p->proxy_qlen = nla_get_u32(tbp[i]);
break;
case NDTPA_APP_PROBES:
p->app_probes = nla_get_u32(tbp[i]);
break;
case NDTPA_UCAST_PROBES:
p->ucast_probes = nla_get_u32(tbp[i]);
break;
case NDTPA_MCAST_PROBES:
p->mcast_probes = nla_get_u32(tbp[i]);
break;
case NDTPA_BASE_REACHABLE_TIME:
p->base_reachable_time = nla_get_msecs(tbp[i]);
break;
case NDTPA_GC_STALETIME:
p->gc_staletime = nla_get_msecs(tbp[i]);
break;
case NDTPA_DELAY_PROBE_TIME:
p->delay_probe_time = nla_get_msecs(tbp[i]);
break;
case NDTPA_RETRANS_TIME:
p->retrans_time = nla_get_msecs(tbp[i]);
break;
case NDTPA_ANYCAST_DELAY:
p->anycast_delay = nla_get_msecs(tbp[i]);
break;
case NDTPA_PROXY_DELAY:
p->proxy_delay = nla_get_msecs(tbp[i]);
break;
case NDTPA_LOCKTIME:
p->locktime = nla_get_msecs(tbp[i]);
break;
}
}
}
if (tb[NDTA_THRESH1])
tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
if (tb[NDTA_THRESH2])
tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
if (tb[NDTA_THRESH3])
tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
if (tb[NDTA_GC_INTERVAL])
tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
err = 0;
errout_tbl_lock:
write_unlock_bh(&tbl->lock);
errout_locked:
read_unlock(&neigh_tbl_lock);
errout:
return err;
}
static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int family, tidx, nidx = 0;
int tbl_skip = cb->args[0];
int neigh_skip = cb->args[1];
struct neigh_table *tbl;
family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
read_lock(&neigh_tbl_lock);
for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
struct neigh_parms *p;
if (tidx < tbl_skip || (family && tbl->family != family))
continue;
if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
NLM_F_MULTI) <= 0)
break;
for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
if (!net_eq(neigh_parms_net(p), net))
continue;
if (nidx < neigh_skip)
goto next;
if (neightbl_fill_param_info(skb, tbl, p,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGHTBL,
NLM_F_MULTI) <= 0)
goto out;
next:
nidx++;
}
neigh_skip = 0;
}
out:
read_unlock(&neigh_tbl_lock);
cb->args[0] = tidx;
cb->args[1] = nidx;
return skb->len;
}
static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
u32 pid, u32 seq, int type, unsigned int flags)
{
unsigned long now = jiffies;
struct nda_cacheinfo ci;
struct nlmsghdr *nlh;
struct ndmsg *ndm;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ndm = nlmsg_data(nlh);
ndm->ndm_family = neigh->ops->family;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = neigh->flags;
ndm->ndm_type = neigh->type;
ndm->ndm_ifindex = neigh->dev->ifindex;
NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
read_lock_bh(&neigh->lock);
ndm->ndm_state = neigh->nud_state;
if (neigh->nud_state & NUD_VALID) {
char haddr[MAX_ADDR_LEN];
neigh_ha_snapshot(haddr, neigh, neigh->dev);
if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
read_unlock_bh(&neigh->lock);
goto nla_put_failure;
}
}
ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
read_unlock_bh(&neigh->lock);
NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static void neigh_update_notify(struct neighbour *neigh)
{
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
__neigh_notify(neigh, RTM_NEWNEIGH, 0);
}
static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct neighbour *n;
int rc, h, s_h = cb->args[1];
int idx, s_idx = idx = cb->args[2];
struct neigh_hash_table *nht;
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
for (h = 0; h <= nht->hash_mask; h++) {
if (h < s_h)
continue;
if (h > s_h)
s_idx = 0;
for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
n != NULL;
n = rcu_dereference_bh(n->next)) {
if (!net_eq(dev_net(n->dev), net))
continue;
if (idx < s_idx)
goto next;
if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI) <= 0) {
rc = -1;
goto out;
}
next:
idx++;
}
}
rc = skb->len;
out:
rcu_read_unlock_bh();
cb->args[1] = h;
cb->args[2] = idx;
return rc;
}
static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
{
struct neigh_table *tbl;
int t, family, s_t;
read_lock(&neigh_tbl_lock);
family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
s_t = cb->args[0];
for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
if (t < s_t || (family && tbl->family != family))
continue;
if (t > s_t)
memset(&cb->args[1], 0, sizeof(cb->args) -
sizeof(cb->args[0]));
if (neigh_dump_table(tbl, skb, cb) < 0)
break;
}
read_unlock(&neigh_tbl_lock);
cb->args[0] = t;
return skb->len;
}
void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
{
int chain;
struct neigh_hash_table *nht;
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
read_lock(&tbl->lock); /* avoid resizes */
for (chain = 0; chain <= nht->hash_mask; chain++) {
struct neighbour *n;
for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
n != NULL;
n = rcu_dereference_bh(n->next))
cb(n, cookie);
}
read_unlock(&tbl->lock);
rcu_read_unlock_bh();
}
EXPORT_SYMBOL(neigh_for_each);
/* The tbl->lock must be held as a writer and BH disabled. */
void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *))
{
int chain;
struct neigh_hash_table *nht;
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
for (chain = 0; chain <= nht->hash_mask; chain++) {
struct neighbour *n;
struct neighbour __rcu **np;
np = &nht->hash_buckets[chain];
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock))) != NULL) {
int release;
write_lock(&n->lock);
release = cb(n);
if (release) {
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
n->dead = 1;
} else
np = &n->next;
write_unlock(&n->lock);
if (release)
neigh_cleanup_and_release(n);
}
}
}
EXPORT_SYMBOL(__neigh_for_each_release);
#ifdef CONFIG_PROC_FS
static struct neighbour *neigh_get_first(struct seq_file *seq)
{
struct neigh_seq_state *state = seq->private;
struct net *net = seq_file_net(seq);
struct neigh_hash_table *nht = state->nht;
struct neighbour *n = NULL;
int bucket = state->bucket;
state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
for (bucket = 0; bucket <= nht->hash_mask; bucket++) {
n = rcu_dereference_bh(nht->hash_buckets[bucket]);
while (n) {
if (!net_eq(dev_net(n->dev), net))
goto next;
if (state->neigh_sub_iter) {
loff_t fakep = 0;
void *v;
v = state->neigh_sub_iter(state, n, &fakep);
if (!v)
goto next;
}
if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
break;
if (n->nud_state & ~NUD_NOARP)
break;
next:
n = rcu_dereference_bh(n->next);
}
if (n)
break;
}
state->bucket = bucket;
return n;
}
static struct neighbour *neigh_get_next(struct seq_file *seq,
struct neighbour *n,
loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
struct net *net = seq_file_net(seq);
struct neigh_hash_table *nht = state->nht;
if (state->neigh_sub_iter) {
void *v = state->neigh_sub_iter(state, n, pos);
if (v)
return n;
}
n = rcu_dereference_bh(n->next);
while (1) {
while (n) {
if (!net_eq(dev_net(n->dev), net))
goto next;
if (state->neigh_sub_iter) {
void *v = state->neigh_sub_iter(state, n, pos);
if (v)
return n;
goto next;
}
if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
break;
if (n->nud_state & ~NUD_NOARP)
break;
next:
n = rcu_dereference_bh(n->next);
}
if (n)
break;
if (++state->bucket > nht->hash_mask)
break;
n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
}
if (n && pos)
--(*pos);
return n;
}
static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
{
struct neighbour *n = neigh_get_first(seq);
if (n) {
--(*pos);
while (*pos) {
n = neigh_get_next(seq, n, pos);
if (!n)
break;
}
}
return *pos ? NULL : n;
}
static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
{
struct neigh_seq_state *state = seq->private;
struct net *net = seq_file_net(seq);
struct neigh_table *tbl = state->tbl;
struct pneigh_entry *pn = NULL;
int bucket = state->bucket;
state->flags |= NEIGH_SEQ_IS_PNEIGH;
for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
pn = tbl->phash_buckets[bucket];
while (pn && !net_eq(pneigh_net(pn), net))
pn = pn->next;
if (pn)
break;
}
state->bucket = bucket;
return pn;
}
static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
struct pneigh_entry *pn,
loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
struct net *net = seq_file_net(seq);
struct neigh_table *tbl = state->tbl;
pn = pn->next;
while (!pn) {
if (++state->bucket > PNEIGH_HASHMASK)
break;
pn = tbl->phash_buckets[state->bucket];
while (pn && !net_eq(pneigh_net(pn), net))
pn = pn->next;
if (pn)
break;
}
if (pn && pos)
--(*pos);
return pn;
}
static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
{
struct pneigh_entry *pn = pneigh_get_first(seq);
if (pn) {
--(*pos);
while (*pos) {
pn = pneigh_get_next(seq, pn, pos);
if (!pn)
break;
}
}
return *pos ? NULL : pn;
}
static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
{
struct neigh_seq_state *state = seq->private;
void *rc;
loff_t idxpos = *pos;
rc = neigh_get_idx(seq, &idxpos);
if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
rc = pneigh_get_idx(seq, &idxpos);
return rc;
}
void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
__acquires(rcu_bh)
{
struct neigh_seq_state *state = seq->private;
state->tbl = tbl;
state->bucket = 0;
state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
rcu_read_lock_bh();
state->nht = rcu_dereference_bh(tbl->nht);
return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
}
EXPORT_SYMBOL(neigh_seq_start);
void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct neigh_seq_state *state;
void *rc;
if (v == SEQ_START_TOKEN) {
rc = neigh_get_first(seq);
goto out;
}
state = seq->private;
if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
rc = neigh_get_next(seq, v, NULL);
if (rc)
goto out;
if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
rc = pneigh_get_first(seq);
} else {
BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
rc = pneigh_get_next(seq, v, NULL);
}
out:
++(*pos);
return rc;
}
EXPORT_SYMBOL(neigh_seq_next);
void neigh_seq_stop(struct seq_file *seq, void *v)
__releases(rcu_bh)
{
rcu_read_unlock_bh();
}
EXPORT_SYMBOL(neigh_seq_stop);
/* statistics via seq_file */
static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
{
struct neigh_table *tbl = seq->private;
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return per_cpu_ptr(tbl->stats, cpu);
}
return NULL;
}
static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct neigh_table *tbl = seq->private;
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return per_cpu_ptr(tbl->stats, cpu);
}
return NULL;
}
static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
{
}
static int neigh_stat_seq_show(struct seq_file *seq, void *v)
{
struct neigh_table *tbl = seq->private;
struct neigh_statistics *st = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
return 0;
}
seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
"%08lx %08lx %08lx %08lx %08lx\n",
atomic_read(&tbl->entries),
st->allocs,
st->destroys,
st->hash_grows,
st->lookups,
st->hits,
st->res_failed,
st->rcv_probes_mcast,
st->rcv_probes_ucast,
st->periodic_gc_runs,
st->forced_gc_runs,
st->unres_discards
);
return 0;
}
static const struct seq_operations neigh_stat_seq_ops = {
.start = neigh_stat_seq_start,
.next = neigh_stat_seq_next,
.stop = neigh_stat_seq_stop,
.show = neigh_stat_seq_show,
};
static int neigh_stat_seq_open(struct inode *inode, struct file *file)
{
int ret = seq_open(file, &neigh_stat_seq_ops);
if (!ret) {
struct seq_file *sf = file->private_data;
sf->private = PDE(inode)->data;
}
return ret;
};
static const struct file_operations neigh_stat_seq_fops = {
.owner = THIS_MODULE,
.open = neigh_stat_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static inline size_t neigh_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+ nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
+ nla_total_size(sizeof(struct nda_cacheinfo))
+ nla_total_size(4); /* NDA_PROBES */
}
static void __neigh_notify(struct neighbour *n, int type, int flags)
{
struct net *net = dev_net(n->dev);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
err = neigh_fill_info(skb, n, 0, 0, type, flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
#ifdef CONFIG_ARPD
void neigh_app_ns(struct neighbour *n)
{
__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
}
EXPORT_SYMBOL(neigh_app_ns);
#endif /* CONFIG_ARPD */
#ifdef CONFIG_SYSCTL
#define NEIGH_VARS_MAX 19
static struct neigh_sysctl_table {
struct ctl_table_header *sysctl_header;
struct ctl_table neigh_vars[NEIGH_VARS_MAX];
char *dev_name;
} neigh_sysctl_template __read_mostly = {
.neigh_vars = {
{
.procname = "mcast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ucast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "app_solicit",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "retrans_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
{
.procname = "base_reachable_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "delay_first_probe_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_stale_time",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "unres_qlen",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "proxy_qlen",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "anycast_delay",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
{
.procname = "proxy_delay",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
{
.procname = "locktime",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_userhz_jiffies,
},
{
.procname = "retrans_time_ms",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{
.procname = "base_reachable_time_ms",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{
.procname = "gc_interval",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_thresh1",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_thresh2",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_thresh3",
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{},
},
};
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
char *p_name, proc_handler *handler)
{
struct neigh_sysctl_table *t;
const char *dev_name_source = NULL;
#define NEIGH_CTL_PATH_ROOT 0
#define NEIGH_CTL_PATH_PROTO 1
#define NEIGH_CTL_PATH_NEIGH 2
#define NEIGH_CTL_PATH_DEV 3
struct ctl_path neigh_path[] = {
{ .procname = "net", },
{ .procname = "proto", },
{ .procname = "neigh", },
{ .procname = "default", },
{ },
};
t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
if (!t)
goto err;
t->neigh_vars[0].data = &p->mcast_probes;
t->neigh_vars[1].data = &p->ucast_probes;
t->neigh_vars[2].data = &p->app_probes;
t->neigh_vars[3].data = &p->retrans_time;
t->neigh_vars[4].data = &p->base_reachable_time;
t->neigh_vars[5].data = &p->delay_probe_time;
t->neigh_vars[6].data = &p->gc_staletime;
t->neigh_vars[7].data = &p->queue_len;
t->neigh_vars[8].data = &p->proxy_qlen;
t->neigh_vars[9].data = &p->anycast_delay;
t->neigh_vars[10].data = &p->proxy_delay;
t->neigh_vars[11].data = &p->locktime;
t->neigh_vars[12].data = &p->retrans_time;
t->neigh_vars[13].data = &p->base_reachable_time;
if (dev) {
dev_name_source = dev->name;
/* Terminate the table early */
memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
} else {
dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
t->neigh_vars[14].data = (int *)(p + 1);
t->neigh_vars[15].data = (int *)(p + 1) + 1;
t->neigh_vars[16].data = (int *)(p + 1) + 2;
t->neigh_vars[17].data = (int *)(p + 1) + 3;
}
if (handler) {
/* RetransTime */
t->neigh_vars[3].proc_handler = handler;
t->neigh_vars[3].extra1 = dev;
/* ReachableTime */
t->neigh_vars[4].proc_handler = handler;
t->neigh_vars[4].extra1 = dev;
/* RetransTime (in milliseconds)*/
t->neigh_vars[12].proc_handler = handler;
t->neigh_vars[12].extra1 = dev;
/* ReachableTime (in milliseconds) */
t->neigh_vars[13].proc_handler = handler;
t->neigh_vars[13].extra1 = dev;
}
t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
if (!t->dev_name)
goto free;
neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
t->sysctl_header =
register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
if (!t->sysctl_header)
goto free_procname;
p->sysctl_table = t;
return 0;
free_procname:
kfree(t->dev_name);
free:
kfree(t);
err:
return -ENOBUFS;
}
EXPORT_SYMBOL(neigh_sysctl_register);
void neigh_sysctl_unregister(struct neigh_parms *p)
{
if (p->sysctl_table) {
struct neigh_sysctl_table *t = p->sysctl_table;
p->sysctl_table = NULL;
unregister_sysctl_table(t->sysctl_header);
kfree(t->dev_name);
kfree(t);
}
}
EXPORT_SYMBOL(neigh_sysctl_unregister);
#endif /* CONFIG_SYSCTL */
static int __init neigh_init(void)
{
rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
NULL);
rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
return 0;
}
subsys_initcall(neigh_init);
| gpl-2.0 |
droidcore/KrakenTC | drivers/platform/msm/msm_bus/msm_bus_rules.c | 326 | 15437 | /* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/list_sort.h>
#include <linux/msm-bus-board.h>
#include <linux/msm_bus_rules.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <trace/events/trace_msm_bus.h>
struct node_vote_info {
int id;
u64 ib;
u64 ab;
u64 clk;
};
struct rules_def {
int rule_id;
int num_src;
int state;
struct node_vote_info *src_info;
struct bus_rule_type rule_ops;
bool state_change;
struct list_head link;
};
struct rule_node_info {
int id;
void *data;
struct raw_notifier_head rule_notify_list;
struct rules_def *cur_rule;
int num_rules;
struct list_head node_rules;
struct list_head link;
struct rule_apply_rcm_info apply;
};
DEFINE_MUTEX(msm_bus_rules_lock);
static LIST_HEAD(node_list);
static struct rule_node_info *get_node(u32 id, void *data);
static int node_rules_compare(void *priv, struct list_head *a,
struct list_head *b);
#define LE(op1, op2) (op1 <= op2)
#define LT(op1, op2) (op1 < op2)
#define GE(op1, op2) (op1 >= op2)
#define GT(op1, op2) (op1 > op2)
#define NB_ID (0x201)
static struct rule_node_info *get_node(u32 id, void *data)
{
struct rule_node_info *node_it = NULL;
struct rule_node_info *node_match = NULL;
list_for_each_entry(node_it, &node_list, link) {
if (node_it->id == id) {
if ((id == NB_ID)) {
if ((node_it->data == data)) {
node_match = node_it;
break;
}
} else {
node_match = node_it;
break;
}
}
}
return node_match;
}
static struct rule_node_info *gen_node(u32 id, void *data)
{
struct rule_node_info *node_it = NULL;
struct rule_node_info *node_match = NULL;
list_for_each_entry(node_it, &node_list, link) {
if (node_it->id == id) {
node_match = node_it;
break;
}
}
if (!node_match) {
node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL);
if (!node_match) {
pr_err("%s: Cannot allocate memory", __func__);
goto exit_node_match;
}
node_match->id = id;
node_match->cur_rule = NULL;
node_match->num_rules = 0;
node_match->data = data;
list_add_tail(&node_match->link, &node_list);
INIT_LIST_HEAD(&node_match->node_rules);
RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list);
pr_debug("Added new node %d to list\n", id);
}
exit_node_match:
return node_match;
}
static bool do_compare_op(u64 op1, u64 op2, int op)
{
bool ret = false;
switch (op) {
case OP_LE:
ret = LE(op1, op2);
break;
case OP_LT:
ret = LT(op1, op2);
break;
case OP_GT:
ret = GT(op1, op2);
break;
case OP_GE:
ret = GE(op1, op2);
break;
case OP_NOOP:
ret = true;
break;
default:
pr_info("Invalid OP %d", op);
break;
}
return ret;
}
static void update_src_id_vote(struct rule_update_path_info *inp_node,
struct rule_node_info *rule_node)
{
struct rules_def *rule;
int i;
list_for_each_entry(rule, &rule_node->node_rules, link) {
for (i = 0; i < rule->num_src; i++) {
if (rule->src_info[i].id == inp_node->id) {
rule->src_info[i].ib = inp_node->ib;
rule->src_info[i].ab = inp_node->ab;
rule->src_info[i].clk = inp_node->clk;
}
}
}
}
static u64 get_field(struct rules_def *rule, int src_id)
{
u64 field = 0;
int i;
for (i = 0; i < rule->num_src; i++) {
switch (rule->rule_ops.src_field) {
case FLD_IB:
field += rule->src_info[i].ib;
break;
case FLD_AB:
field += rule->src_info[i].ab;
break;
case FLD_CLK:
field += rule->src_info[i].clk;
break;
}
}
return field;
}
static bool check_rule(struct rules_def *rule,
struct rule_update_path_info *inp)
{
bool ret = false;
if (!rule)
return ret;
switch (rule->rule_ops.op) {
case OP_LE:
case OP_LT:
case OP_GT:
case OP_GE:
{
u64 src_field = get_field(rule, inp->id);
ret = do_compare_op(src_field, rule->rule_ops.thresh,
rule->rule_ops.op);
break;
}
default:
pr_err("Unsupported op %d", rule->rule_ops.op);
break;
}
return ret;
}
static void match_rule(struct rule_update_path_info *inp_node,
struct rule_node_info *node)
{
struct rules_def *rule;
int i;
list_for_each_entry(rule, &node->node_rules, link) {
for (i = 0; i < rule->num_src; i++) {
if (rule->src_info[i].id == inp_node->id) {
if (check_rule(rule, inp_node)) {
trace_bus_rules_matches(
(node->cur_rule ?
node->cur_rule->rule_id : -1),
inp_node->id, inp_node->ab,
inp_node->ib, inp_node->clk);
if (rule->state ==
RULE_STATE_NOT_APPLIED)
rule->state_change = true;
rule->state = RULE_STATE_APPLIED;
} else {
if (rule->state ==
RULE_STATE_APPLIED)
rule->state_change = true;
rule->state = RULE_STATE_NOT_APPLIED;
}
}
}
}
}
static void apply_rule(struct rule_node_info *node,
struct list_head *output_list)
{
struct rules_def *rule;
struct rules_def *last_rule;
last_rule = node->cur_rule;
node->cur_rule = NULL;
list_for_each_entry(rule, &node->node_rules, link) {
if ((rule->state == RULE_STATE_APPLIED) &&
!node->cur_rule)
node->cur_rule = rule;
if (node->id == NB_ID) {
if (rule->state_change) {
rule->state_change = false;
raw_notifier_call_chain(&node->rule_notify_list,
rule->state, (void *)&rule->rule_ops);
}
} else {
if ((rule->state == RULE_STATE_APPLIED) &&
(node->cur_rule &&
(node->cur_rule->rule_id == rule->rule_id))) {
node->apply.id = rule->rule_ops.dst_node[0];
node->apply.throttle = rule->rule_ops.mode;
node->apply.lim_bw = rule->rule_ops.dst_bw;
node->apply.after_clk_commit = false;
if (last_rule != node->cur_rule)
list_add_tail(&node->apply.link,
output_list);
if (last_rule) {
if (node_rules_compare(NULL,
&last_rule->link,
&node->cur_rule->link) == -1)
node->apply.after_clk_commit =
true;
}
}
rule->state_change = false;
}
}
}
int msm_rules_update_path(struct list_head *input_list,
struct list_head *output_list)
{
int ret = 0;
struct rule_update_path_info *inp_node;
struct rule_node_info *node_it = NULL;
mutex_lock(&msm_bus_rules_lock);
list_for_each_entry(inp_node, input_list, link) {
list_for_each_entry(node_it, &node_list, link) {
update_src_id_vote(inp_node, node_it);
match_rule(inp_node, node_it);
}
}
list_for_each_entry(node_it, &node_list, link)
apply_rule(node_it, output_list);
mutex_unlock(&msm_bus_rules_lock);
return ret;
}
static bool ops_equal(int op1, int op2)
{
bool ret = false;
switch (op1) {
case OP_GT:
case OP_GE:
case OP_LT:
case OP_LE:
if (abs(op1 - op2) <= 1)
ret = true;
break;
default:
ret = (op1 == op2);
}
return ret;
}
static bool is_throttle_rule(int mode)
{
bool ret = true;
if (mode == THROTTLE_OFF)
ret = false;
return ret;
}
static int node_rules_compare(void *priv, struct list_head *a,
struct list_head *b)
{
struct rules_def *ra = container_of(a, struct rules_def, link);
struct rules_def *rb = container_of(b, struct rules_def, link);
int ret = -1;
int64_t th_diff = 0;
if (ra->rule_ops.mode == rb->rule_ops.mode) {
if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) {
if ((ra->rule_ops.op == OP_LT) ||
(ra->rule_ops.op == OP_LE)) {
th_diff = ra->rule_ops.thresh -
rb->rule_ops.thresh;
if (th_diff > 0)
ret = 1;
else
ret = -1;
} else if ((ra->rule_ops.op == OP_GT) ||
(ra->rule_ops.op == OP_GE)) {
th_diff = rb->rule_ops.thresh -
ra->rule_ops.thresh;
if (th_diff > 0)
ret = 1;
else
ret = -1;
}
} else
ret = ra->rule_ops.op - rb->rule_ops.op;
} else if (is_throttle_rule(ra->rule_ops.mode) &&
is_throttle_rule(rb->rule_ops.mode)) {
if (ra->rule_ops.mode == THROTTLE_ON)
ret = -1;
else
ret = 1;
} else if ((ra->rule_ops.mode == THROTTLE_OFF) &&
is_throttle_rule(rb->rule_ops.mode)) {
ret = 1;
} else if (is_throttle_rule(ra->rule_ops.mode) &&
(rb->rule_ops.mode == THROTTLE_OFF)) {
ret = -1;
}
return ret;
}
static void print_rules(struct rule_node_info *node_it)
{
struct rules_def *node_rule = NULL;
int i;
if (!node_it) {
pr_err("%s: no node for found", __func__);
return;
}
pr_info("\n Now printing rules for Node %d cur rule %d\n",
node_it->id,
(node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
list_for_each_entry(node_rule, &node_it->node_rules, link) {
pr_info("\n num Rules %d rule Id %d\n",
node_it->num_rules, node_rule->rule_id);
pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field);
for (i = 0; i < node_rule->rule_ops.num_src; i++)
pr_info("Rule: src %d\n",
node_rule->rule_ops.src_id[i]);
for (i = 0; i < node_rule->rule_ops.num_dst; i++)
pr_info("Rule: dst %d dst_bw %llu\n",
node_rule->rule_ops.dst_node[i],
node_rule->rule_ops.dst_bw);
pr_info("Rule: thresh %llu op %d mode %d State %d\n",
node_rule->rule_ops.thresh,
node_rule->rule_ops.op,
node_rule->rule_ops.mode,
node_rule->state);
}
}
void print_all_rules(void)
{
struct rule_node_info *node_it = NULL;
list_for_each_entry(node_it, &node_list, link)
print_rules(node_it);
}
void print_rules_buf(char *buf, int max_buf)
{
struct rule_node_info *node_it = NULL;
struct rules_def *node_rule = NULL;
int i;
int cnt = 0;
list_for_each_entry(node_it, &node_list, link) {
cnt += scnprintf(buf + cnt, max_buf - cnt,
"\n Now printing rules for Node %d cur_rule %d\n",
node_it->id,
(node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
list_for_each_entry(node_rule, &node_it->node_rules, link) {
cnt += scnprintf(buf + cnt, max_buf - cnt,
"\nNum Rules:%d ruleId %d STATE:%d change:%d\n",
node_it->num_rules, node_rule->rule_id,
node_rule->state, node_rule->state_change);
cnt += scnprintf(buf + cnt, max_buf - cnt,
"Src_field %d\n",
node_rule->rule_ops.src_field);
for (i = 0; i < node_rule->rule_ops.num_src; i++)
cnt += scnprintf(buf + cnt, max_buf - cnt,
"Src %d Cur Ib %llu Ab %llu\n",
node_rule->rule_ops.src_id[i],
node_rule->src_info[i].ib,
node_rule->src_info[i].ab);
for (i = 0; i < node_rule->rule_ops.num_dst; i++)
cnt += scnprintf(buf + cnt, max_buf - cnt,
"Dst %d dst_bw %llu\n",
node_rule->rule_ops.dst_node[0],
node_rule->rule_ops.dst_bw);
cnt += scnprintf(buf + cnt, max_buf - cnt,
"Thresh %llu op %d mode %d\n",
node_rule->rule_ops.thresh,
node_rule->rule_ops.op,
node_rule->rule_ops.mode);
}
}
}
static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule,
struct notifier_block *nb)
{
int i;
int ret = 0;
memcpy(&node_rule->rule_ops, src,
sizeof(struct bus_rule_type));
node_rule->rule_ops.src_id = kzalloc(
(sizeof(int) * node_rule->rule_ops.num_src),
GFP_KERNEL);
if (!node_rule->rule_ops.src_id) {
pr_err("%s:Failed to allocate for src_id",
__func__);
return -ENOMEM;
}
memcpy(node_rule->rule_ops.src_id, src->src_id,
sizeof(int) * src->num_src);
if (!nb) {
node_rule->rule_ops.dst_node = kzalloc(
(sizeof(int) * node_rule->rule_ops.num_dst),
GFP_KERNEL);
if (!node_rule->rule_ops.dst_node) {
pr_err("%s:Failed to allocate for src_id",
__func__);
return -ENOMEM;
}
memcpy(node_rule->rule_ops.dst_node, src->dst_node,
sizeof(int) * src->num_dst);
}
node_rule->num_src = src->num_src;
node_rule->src_info = kzalloc(
(sizeof(struct node_vote_info) * node_rule->rule_ops.num_src),
GFP_KERNEL);
if (!node_rule->src_info) {
pr_err("%s:Failed to allocate for src_id",
__func__);
return -ENOMEM;
}
for (i = 0; i < src->num_src; i++)
node_rule->src_info[i].id = src->src_id[i];
return ret;
}
void msm_rule_register(int num_rules, struct bus_rule_type *rule,
struct notifier_block *nb)
{
struct rule_node_info *node = NULL;
int i, j;
struct rules_def *node_rule = NULL;
int num_dst = 0;
if (!rule)
return;
mutex_lock(&msm_bus_rules_lock);
for (i = 0; i < num_rules; i++) {
if (nb)
num_dst = 1;
else
num_dst = rule[i].num_dst;
for (j = 0; j < num_dst; j++) {
int id = 0;
if (nb)
id = NB_ID;
else
id = rule[i].dst_node[j];
node = gen_node(id, nb);
if (!node) {
pr_info("Error getting rule");
goto exit_rule_register;
}
node_rule = kzalloc(sizeof(struct rules_def),
GFP_KERNEL);
if (!node_rule) {
pr_err("%s: Failed to allocate for rule",
__func__);
goto exit_rule_register;
}
if (copy_rule(&rule[i], node_rule, nb)) {
pr_err("Error copying rule");
goto exit_rule_register;
}
node_rule->rule_id = node->num_rules++;
if (nb)
node->data = nb;
list_add_tail(&node_rule->link, &node->node_rules);
}
}
list_sort(NULL, &node->node_rules, node_rules_compare);
if (nb)
raw_notifier_chain_register(&node->rule_notify_list, nb);
exit_rule_register:
mutex_unlock(&msm_bus_rules_lock);
return;
}
static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb)
{
int ret = 1;
if (rulea->num_src == ruleb->num_src)
ret = memcmp(rulea->src_id, ruleb->src_id,
(sizeof(int) * rulea->num_src));
if (!ret && (rulea->num_dst == ruleb->num_dst))
ret = memcmp(rulea->dst_node, ruleb->dst_node,
(sizeof(int) * rulea->num_dst));
if (!ret && (rulea->dst_bw == ruleb->dst_bw) &&
(rulea->op == ruleb->op) && (rulea->thresh == ruleb->thresh))
ret = 0;
return ret;
}
void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
struct notifier_block *nb)
{
int i;
struct rule_node_info *node = NULL;
struct rule_node_info *node_tmp = NULL;
struct rules_def *node_rule;
struct rules_def *node_rule_tmp;
bool match_found = false;
if (!rule)
return;
mutex_lock(&msm_bus_rules_lock);
if (nb) {
node = get_node(NB_ID, nb);
if (!node) {
pr_err("%s: Can't find node", __func__);
goto exit_unregister_rule;
}
list_for_each_entry_safe(node_rule, node_rule_tmp,
&node->node_rules, link) {
list_del(&node_rule->link);
kfree(node_rule);
node->num_rules--;
}
raw_notifier_chain_unregister(&node->rule_notify_list, nb);
} else {
for (i = 0; i < num_rules; i++) {
match_found = false;
list_for_each_entry(node, &node_list, link) {
list_for_each_entry_safe(node_rule,
node_rule_tmp, &node->node_rules, link) {
if (comp_rules(&node_rule->rule_ops,
&rule[i]) == 0) {
list_del(&node_rule->link);
kfree(node_rule);
match_found = true;
node->num_rules--;
list_sort(NULL,
&node->node_rules,
node_rules_compare);
break;
}
}
}
}
}
list_for_each_entry_safe(node, node_tmp,
&node_list, link) {
if (!node->num_rules) {
pr_debug("Deleting Rule node %d", node->id);
list_del(&node->link);
kfree(node);
}
}
exit_unregister_rule:
mutex_unlock(&msm_bus_rules_lock);
}
bool msm_rule_are_rules_registered(void)
{
bool ret = false;
if (list_empty(&node_list))
ret = false;
else
ret = true;
return ret;
}
| gpl-2.0 |
leexning/openwrt | target/linux/adm5120/files-3.18/drivers/usb/host/adm5120-q.c | 582 | 26020 | /*
* ADM5120 HCD (Host Controller Driver) for USB
*
* Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
*
* This file was derived from: drivers/usb/host/ohci-q.c
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
*/
#include <linux/irq.h>
#include <linux/slab.h>
/*-------------------------------------------------------------------------*/
/*
* URB goes back to driver, and isn't reissued.
* It's completely gone from HC data structures.
* PRECONDITION: ahcd lock held, irqs blocked.
*/
static void
finish_urb(struct admhcd *ahcd, struct urb *urb, int status)
__releases(ahcd->lock)
__acquires(ahcd->lock)
{
urb_priv_free(ahcd, urb->hcpriv);
if (likely(status == -EINPROGRESS))
status = 0;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs--;
break;
case PIPE_INTERRUPT:
admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs--;
break;
}
#ifdef ADMHC_VERBOSE_DEBUG
urb_print(ahcd, urb, "RET", usb_pipeout(urb->pipe), status);
#endif
/* urb->complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(admhcd_to_hcd(ahcd), urb);
spin_unlock(&ahcd->lock);
usb_hcd_giveback_urb(admhcd_to_hcd(ahcd), urb, status);
spin_lock(&ahcd->lock);
}
/*-------------------------------------------------------------------------*
* ED handling functions
*-------------------------------------------------------------------------*/
#if 0 /* FIXME */
/* search for the right schedule branch to use for a periodic ed.
* does some load balancing; returns the branch, or negative errno.
*/
static int balance(struct admhcd *ahcd, int interval, int load)
{
int i, branch = -ENOSPC;
/* iso periods can be huge; iso tds specify frame numbers */
if (interval > NUM_INTS)
interval = NUM_INTS;
/* search for the least loaded schedule branch of that period
* that has enough bandwidth left unreserved.
*/
for (i = 0; i < interval ; i++) {
if (branch < 0 || ahcd->load[branch] > ahcd->load[i]) {
int j;
/* usb 1.1 says 90% of one frame */
for (j = i; j < NUM_INTS; j += interval) {
if ((ahcd->load[j] + load) > 900)
break;
}
if (j < NUM_INTS)
continue;
branch = i;
}
}
return branch;
}
#endif
/*-------------------------------------------------------------------------*/
#if 0 /* FIXME */
/* both iso and interrupt requests have periods; this routine puts them
* into the schedule tree in the apppropriate place. most iso devices use
* 1msec periods, but that's not required.
*/
static void periodic_link(struct admhcd *ahcd, struct ed *ed)
{
unsigned i;
admhc_vdbg(ahcd, "link %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
struct ed **prev = &ahcd->periodic[i];
__hc32 *prev_p = &ahcd->hcca->int_table[i];
struct ed *here = *prev;
/* sorting each branch by period (slow before fast)
* lets us share the faster parts of the tree.
* (plus maybe: put interrupt eds before iso)
*/
while (here && ed != here) {
if (ed->interval > here->interval)
break;
prev = &here->ed_next;
prev_p = &here->hwNextED;
here = *prev;
}
if (ed != here) {
ed->ed_next = here;
if (here)
ed->hwNextED = *prev_p;
wmb();
*prev = ed;
*prev_p = cpu_to_hc32(ahcd, ed->dma);
wmb();
}
ahcd->load[i] += ed->load;
}
admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval;
}
#endif
/* link an ed into the HC chain */
static int ed_schedule(struct admhcd *ahcd, struct ed *ed)
{
struct ed *old_tail;
if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING)
return -EAGAIN;
ed->state = ED_OPER;
old_tail = ahcd->ed_tails[ed->type];
ed->ed_next = old_tail->ed_next;
if (ed->ed_next) {
ed->ed_next->ed_prev = ed;
ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma);
}
ed->ed_prev = old_tail;
old_tail->ed_next = ed;
old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma);
ahcd->ed_tails[ed->type] = ed;
admhc_dma_enable(ahcd);
return 0;
}
/*-------------------------------------------------------------------------*/
#if 0 /* FIXME */
/* scan the periodic table to find and unlink this ED */
static void periodic_unlink(struct admhcd *ahcd, struct ed *ed)
{
int i;
for (i = ed->branch; i < NUM_INTS; i += ed->interval) {
struct ed *temp;
struct ed **prev = &ahcd->periodic[i];
__hc32 *prev_p = &ahcd->hcca->int_table[i];
while (*prev && (temp = *prev) != ed) {
prev_p = &temp->hwNextED;
prev = &temp->ed_next;
}
if (*prev) {
*prev_p = ed->hwNextED;
*prev = ed->ed_next;
}
ahcd->load[i] -= ed->load;
}
admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval;
admhc_vdbg(ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n",
(ed->hwINFO & cpu_to_hc32(ahcd, ED_ISO)) ? "iso " : "",
ed, ed->branch, ed->load, ed->interval);
}
#endif
/* unlink an ed from the HC chain.
* just the link to the ed is unlinked.
* the link from the ed still points to another operational ed or 0
* so the HC can eventually finish the processing of the unlinked ed
* (assuming it already started that, which needn't be true).
*
* ED_UNLINK is a transient state: the HC may still see this ED, but soon
* it won't. ED_SKIP means the HC will finish its current transaction,
* but won't start anything new. The TD queue may still grow; device
* drivers don't know about this HCD-internal state.
*
* When the HC can't see the ED, something changes ED_UNLINK to one of:
*
* - ED_OPER: when there's any request queued, the ED gets rescheduled
* immediately. HC should be working on them.
*
* - ED_IDLE: when there's no TD queue. there's no reason for the HC
* to care about this ED; safe to disable the endpoint.
*
* When finish_unlinks() runs later, after SOF interrupt, it will often
* complete one or more URB unlinks before making that state change.
*/
static void ed_deschedule(struct admhcd *ahcd, struct ed *ed)
{
#ifdef ADMHC_VERBOSE_DEBUG
admhc_dump_ed(ahcd, "ED-DESCHED", ed, 1);
#endif
ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
wmb();
ed->state = ED_UNLINK;
/* remove this ED from the HC list */
ed->ed_prev->hwNextED = ed->hwNextED;
/* and remove it from our list also */
ed->ed_prev->ed_next = ed->ed_next;
if (ed->ed_next)
ed->ed_next->ed_prev = ed->ed_prev;
if (ahcd->ed_tails[ed->type] == ed)
ahcd->ed_tails[ed->type] = ed->ed_prev;
}
/*-------------------------------------------------------------------------*/
static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info)
{
struct ed *ed;
struct td *td;
ed = ed_alloc(ahcd, GFP_ATOMIC);
if (!ed)
goto err;
/* dummy td; end of td list for this ed */
td = td_alloc(ahcd, GFP_ATOMIC);
if (!td)
goto err_free_ed;
switch (type) {
case PIPE_INTERRUPT:
info |= ED_INT;
break;
case PIPE_ISOCHRONOUS:
info |= ED_ISO;
break;
}
ed->dummy = td;
ed->state = ED_IDLE;
ed->type = type;
ed->hwINFO = cpu_to_hc32(ahcd, info);
ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma);
ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
return ed;
err_free_ed:
ed_free(ahcd, ed);
err:
return NULL;
}
/* get and maybe (re)init an endpoint. init _should_ be done only as part
* of enumeration, usb_set_configuration() or usb_set_interface().
*/
static struct ed *ed_get(struct admhcd *ahcd, struct usb_host_endpoint *ep,
struct usb_device *udev, unsigned int pipe, int interval)
{
struct ed *ed;
unsigned long flags;
spin_lock_irqsave(&ahcd->lock, flags);
ed = ep->hcpriv;
if (!ed) {
u32 info;
/* FIXME: usbcore changes dev->devnum before SET_ADDRESS
* succeeds ... otherwise we wouldn't need "pipe".
*/
info = usb_pipedevice(pipe);
info |= (ep->desc.bEndpointAddress & ~USB_DIR_IN) << ED_EN_SHIFT;
info |= le16_to_cpu(ep->desc.wMaxPacketSize) << ED_MPS_SHIFT;
if (udev->speed == USB_SPEED_FULL)
info |= ED_SPEED_FULL;
ed = ed_create(ahcd, usb_pipetype(pipe), info);
if (ed)
ep->hcpriv = ed;
}
spin_unlock_irqrestore(&ahcd->lock, flags);
return ed;
}
/*-------------------------------------------------------------------------*/
/* request unlinking of an endpoint from an operational HC.
* put the ep on the rm_list
* real work is done at the next start frame (SOFI) hardware interrupt
* caller guarantees HCD is running, so hardware access is safe,
* and that ed->state is ED_OPER
*/
static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed)
{
#ifdef ADMHC_VERBOSE_DEBUG
admhc_dump_ed(ahcd, "ED-UNLINK", ed, 1);
#endif
ed->hwINFO |= cpu_to_hc32(ahcd, ED_DEQUEUE);
ed_deschedule(ahcd, ed);
/* add this ED into the remove list */
ed->ed_rm_next = ahcd->ed_rm_list;
ahcd->ed_rm_list = ed;
/* enable SOF interrupt */
admhc_intr_ack(ahcd, ADMHC_INTR_SOFI);
admhc_intr_enable(ahcd, ADMHC_INTR_SOFI);
/* flush those writes */
admhc_writel_flush(ahcd);
/* SOF interrupt might get delayed; record the frame counter value that
* indicates when the HC isn't looking at it, so concurrent unlinks
* behave. frame_no wraps every 2^16 msec, and changes right before
* SOF is triggered.
*/
ed->tick = admhc_frame_no(ahcd) + 1;
}
/*-------------------------------------------------------------------------*
* TD handling functions
*-------------------------------------------------------------------------*/
/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */
static void
td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len,
struct urb *urb, int index)
{
struct td *td, *td_pt;
struct urb_priv *urb_priv = urb->hcpriv;
int hash;
u32 cbl = 0;
#if 1
if (index == (urb_priv->td_cnt - 1) &&
((urb->transfer_flags & URB_NO_INTERRUPT) == 0))
cbl |= TD_IE;
#else
if (index == (urb_priv->td_cnt - 1))
cbl |= TD_IE;
#endif
/* use this td as the next dummy */
td_pt = urb_priv->td[index];
/* fill the old dummy TD */
td = urb_priv->td[index] = urb_priv->ed->dummy;
urb_priv->ed->dummy = td_pt;
td->ed = urb_priv->ed;
td->next_dl_td = NULL;
td->index = index;
td->urb = urb;
td->data_dma = data;
if (!len)
data = 0;
if (data)
cbl |= (len & TD_BL_MASK);
info |= TD_OWN;
/* setup hardware specific fields */
td->hwINFO = cpu_to_hc32(ahcd, info);
td->hwDBP = cpu_to_hc32(ahcd, data);
td->hwCBL = cpu_to_hc32(ahcd, cbl);
td->hwNextTD = cpu_to_hc32(ahcd, td_pt->td_dma);
/* append to queue */
list_add_tail(&td->td_list, &td->ed->td_list);
/* hash it for later reverse mapping */
hash = TD_HASH_FUNC(td->td_dma);
td->td_hash = ahcd->td_hash[hash];
ahcd->td_hash[hash] = td;
/* HC might read the TD (or cachelines) right away ... */
wmb();
td->ed->hwTailP = td->hwNextTD;
}
/*-------------------------------------------------------------------------*/
/* Prepare all TDs of a transfer, and queue them onto the ED.
* Caller guarantees HC is active.
* Usually the ED is already on the schedule, so TDs might be
* processed as soon as they're queued.
*/
static void td_submit_urb(struct admhcd *ahcd, struct urb *urb)
{
struct urb_priv *urb_priv = urb->hcpriv;
dma_addr_t data;
int data_len = urb->transfer_buffer_length;
int cnt = 0;
u32 info = 0;
int is_out = usb_pipeout(urb->pipe);
u32 toggle = 0;
/* OHCI handles the bulk/interrupt data toggles itself. We just
* use the device toggle bits for resetting, and rely on the fact
* that resetting toggle is meaningless if the endpoint is active.
*/
if (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), is_out)) {
toggle = TD_T_CARRY;
} else {
toggle = TD_T_DATA0;
usb_settoggle(urb->dev, usb_pipeendpoint (urb->pipe),
is_out, 1);
}
urb_priv->td_idx = 0;
list_add(&urb_priv->pending, &ahcd->pending);
if (data_len)
data = urb->transfer_dma;
else
data = 0;
/* NOTE: TD_CC is set so we can tell which TDs the HC processed by
* using TD_CC_GET, as well as by seeing them on the done list.
* (CC = NotAccessed ... 0x0F, or 0x0E in PSWs for ISO.)
*/
switch (urb_priv->ed->type) {
case PIPE_INTERRUPT:
info = is_out
? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
: TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
/* setup service interval and starting frame number */
info |= (urb->start_frame & TD_FN_MASK);
info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT;
td_fill(ahcd, info, data, data_len, urb, cnt);
cnt++;
admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++;
break;
case PIPE_BULK:
info = is_out
? TD_SCC_NOTACCESSED | TD_DP_OUT
: TD_SCC_NOTACCESSED | TD_DP_IN;
/* TDs _could_ transfer up to 8K each */
while (data_len > TD_DATALEN_MAX) {
td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
data, TD_DATALEN_MAX, urb, cnt);
data += TD_DATALEN_MAX;
data_len -= TD_DATALEN_MAX;
cnt++;
}
td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data,
data_len, urb, cnt);
cnt++;
if ((urb->transfer_flags & URB_ZERO_PACKET)
&& (cnt < urb_priv->td_cnt)) {
td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle),
0, 0, urb, cnt);
cnt++;
}
break;
/* control manages DATA0/DATA1 toggle per-request; SETUP resets it,
* any DATA phase works normally, and the STATUS ack is special.
*/
case PIPE_CONTROL:
/* fill a TD for the setup */
info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0;
td_fill(ahcd, info, urb->setup_dma, 8, urb, cnt++);
if (data_len > 0) {
/* fill a TD for the data */
info = TD_SCC_NOTACCESSED | TD_T_DATA1;
info |= is_out ? TD_DP_OUT : TD_DP_IN;
/* NOTE: mishandles transfers >8K, some >4K */
td_fill(ahcd, info, data, data_len, urb, cnt++);
}
/* fill a TD for the ACK */
info = (is_out || data_len == 0)
? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1
: TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1;
td_fill(ahcd, info, data, 0, urb, cnt++);
break;
/* ISO has no retransmit, so no toggle;
* Each TD could handle multiple consecutive frames (interval 1);
* we could often reduce the number of TDs here.
*/
case PIPE_ISOCHRONOUS:
info = is_out
? TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_OUT
: TD_T_CARRY | TD_SCC_NOTACCESSED | TD_DP_IN;
for (cnt = 0; cnt < urb->number_of_packets; cnt++) {
int frame = urb->start_frame;
frame += cnt * urb->interval;
frame &= TD_FN_MASK;
td_fill(ahcd, info | frame,
data + urb->iso_frame_desc[cnt].offset,
urb->iso_frame_desc[cnt].length, urb, cnt);
}
admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++;
break;
}
if (urb_priv->td_cnt != cnt)
admhc_err(ahcd, "bad number of tds created for urb %p\n", urb);
}
/*-------------------------------------------------------------------------*
* Done List handling functions
*-------------------------------------------------------------------------*/
/* calculate transfer length/status and update the urb */
static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td)
{
struct urb_priv *urb_priv = urb->hcpriv;
u32 info;
u32 bl;
u32 tdDBP;
int type = usb_pipetype(urb->pipe);
int cc;
int status = -EINPROGRESS;
info = hc32_to_cpup(ahcd, &td->hwINFO);
tdDBP = hc32_to_cpup(ahcd, &td->hwDBP);
bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL));
cc = TD_CC_GET(info);
/* ISO ... drivers see per-TD length/status */
if (type == PIPE_ISOCHRONOUS) {
/* TODO */
int dlen = 0;
/* NOTE: assumes FC in tdINFO == 0, and that
* only the first of 0..MAXPSW psws is used.
*/
if (info & TD_CC) /* hc didn't touch? */
return status;
if (usb_pipeout(urb->pipe))
dlen = urb->iso_frame_desc[td->index].length;
else {
/* short reads are always OK for ISO */
if (cc == TD_CC_DATAUNDERRUN)
cc = TD_CC_NOERROR;
dlen = tdDBP - td->data_dma + bl;
}
urb->actual_length += dlen;
urb->iso_frame_desc[td->index].actual_length = dlen;
urb->iso_frame_desc[td->index].status = cc_to_error[cc];
if (cc != TD_CC_NOERROR)
admhc_vdbg(ahcd,
"urb %p iso td %p (%d) len %d cc %d\n",
urb, td, 1 + td->index, dlen, cc);
/* BULK, INT, CONTROL ... drivers see aggregate length/status,
* except that "setup" bytes aren't counted and "short" transfers
* might not be reported as errors.
*/
} else {
/* update packet status if needed (short is normally ok) */
if (cc == TD_CC_DATAUNDERRUN
&& !(urb->transfer_flags & URB_SHORT_NOT_OK))
cc = TD_CC_NOERROR;
if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
status = cc_to_error[cc];
/* count all non-empty packets except control SETUP packet */
if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0)
urb->actual_length += tdDBP - td->data_dma + bl;
if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0)
admhc_vdbg(ahcd,
"urb %p td %p (%d) cc %d, len=%d/%d\n",
urb, td, td->index, cc,
urb->actual_length,
urb->transfer_buffer_length);
}
list_del(&td->td_list);
urb_priv->td_idx++;
return status;
}
/*-------------------------------------------------------------------------*/
static void ed_halted(struct admhcd *ahcd, struct td *td, int cc)
{
struct urb *urb = td->urb;
struct urb_priv *urb_priv = urb->hcpriv;
struct ed *ed = td->ed;
struct list_head *tmp = td->td_list.next;
__hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
admhc_dump_ed(ahcd, "ed halted", td->ed, 1);
/* clear ed halt; this is the td that caused it, but keep it inactive
* until its urb->complete() has a chance to clean up.
*/
ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
wmb();
ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
/* Get rid of all later tds from this urb. We don't have
* to be careful: no errors and nothing was transferred.
* Also patch the ed so it looks as if those tds completed normally.
*/
while (tmp != &ed->td_list) {
struct td *next;
next = list_entry(tmp, struct td, td_list);
tmp = next->td_list.next;
if (next->urb != urb)
break;
/* NOTE: if multi-td control DATA segments get supported,
* this urb had one of them, this td wasn't the last td
* in that segment (TD_R clear), this ed halted because
* of a short read, _and_ URB_SHORT_NOT_OK is clear ...
* then we need to leave the control STATUS packet queued
* and clear ED_SKIP.
*/
list_del(&next->td_list);
urb_priv->td_cnt++;
ed->hwHeadP = next->hwNextTD | toggle;
}
/* help for troubleshooting: report anything that
* looks odd ... that doesn't include protocol stalls
* (or maybe some other things)
*/
switch (cc) {
case TD_CC_DATAUNDERRUN:
if ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0)
break;
/* fallthrough */
case TD_CC_STALL:
if (usb_pipecontrol(urb->pipe))
break;
/* fallthrough */
default:
admhc_dbg(ahcd,
"urb %p path %s ep%d%s %08x cc %d --> status %d\n",
urb, urb->dev->devpath,
usb_pipeendpoint (urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
hc32_to_cpu(ahcd, td->hwINFO),
cc, cc_to_error[cc]);
}
}
/*-------------------------------------------------------------------------*/
/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */
static void
finish_unlinks(struct admhcd *ahcd, u16 tick)
{
struct ed *ed, **last;
rescan_all:
for (last = &ahcd->ed_rm_list, ed = *last; ed != NULL; ed = *last) {
struct list_head *entry, *tmp;
int completed, modified;
__hc32 *prev;
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) {
if (tick_before(tick, ed->tick)) {
skip_ed:
last = &ed->ed_rm_next;
continue;
}
#if 0
if (!list_empty(&ed->td_list)) {
struct td *td;
u32 head;
td = list_entry(ed->td_list.next, struct td,
td_list);
head = hc32_to_cpu(ahcd, ed->hwHeadP) &
TD_MASK;
/* INTR_WDH may need to clean up first */
if (td->td_dma != head)
goto skip_ed;
}
#endif
}
/* reentrancy: if we drop the schedule lock, someone might
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
*last = ed->ed_rm_next;
ed->ed_rm_next = NULL;
modified = 0;
/* unlink urbs as requested, but rescan the list after
* we call a completion since it might have unlinked
* another (earlier) urb
*
* When we get here, the HC doesn't see this ed. But it
* must not be rescheduled until all completed URBs have
* been given back to the driver.
*/
rescan_this:
completed = 0;
prev = &ed->hwHeadP;
list_for_each_safe(entry, tmp, &ed->td_list) {
struct td *td;
struct urb *urb;
struct urb_priv *urb_priv;
__hc32 savebits;
u32 tdINFO;
int status;
td = list_entry(entry, struct td, td_list);
urb = td->urb;
urb_priv = td->urb->hcpriv;
if (!urb->unlinked) {
prev = &td->hwNextTD;
continue;
}
if ((urb_priv) == NULL)
continue;
/* patch pointer hc uses */
savebits = *prev & ~cpu_to_hc32(ahcd, TD_MASK);
*prev = td->hwNextTD | savebits;
/* If this was unlinked, the TD may not have been
* retired ... so manually save dhe data toggle.
* The controller ignores the value we save for
* control and ISO endpoints.
*/
tdINFO = hc32_to_cpup(ahcd, &td->hwINFO);
if ((tdINFO & TD_T) == TD_T_DATA0)
ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_C);
else if ((tdINFO & TD_T) == TD_T_DATA1)
ed->hwHeadP |= cpu_to_hc32(ahcd, ED_C);
/* HC may have partly processed this TD */
#ifdef ADMHC_VERBOSE_DEBUG
urb_print(ahcd, urb, "PARTIAL", 0);
#endif
status = td_done(ahcd, urb, td);
/* if URB is done, clean up */
if (urb_priv->td_idx == urb_priv->td_cnt) {
modified = completed = 1;
finish_urb(ahcd, urb, status);
}
}
if (completed && !list_empty(&ed->td_list))
goto rescan_this;
/* ED's now officially unlinked, hc doesn't see */
ed->state = ED_IDLE;
ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
ed->hwNextED = 0;
wmb();
ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP | ED_DEQUEUE);
/* but if there's work queued, reschedule */
if (!list_empty(&ed->td_list)) {
if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))
ed_schedule(ahcd, ed);
}
if (modified)
goto rescan_all;
}
}
/*-------------------------------------------------------------------------*/
/*
* Process normal completions (error or success) and clean the schedules.
*
* This is the main path for handing urbs back to drivers. The only other
* normal path is finish_unlinks(), which unlinks URBs using ed_rm_list,
* instead of scanning the (re-reversed) donelist as this does.
*/
static void ed_unhalt(struct admhcd *ahcd, struct ed *ed, struct urb *urb)
{
struct list_head *entry, *tmp;
__hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
#ifdef ADMHC_VERBOSE_DEBUG
admhc_dump_ed(ahcd, "UNHALT", ed, 0);
#endif
/* clear ed halt; this is the td that caused it, but keep it inactive
* until its urb->complete() has a chance to clean up.
*/
ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP);
wmb();
ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H);
list_for_each_safe(entry, tmp, &ed->td_list) {
struct td *td = list_entry(entry, struct td, td_list);
__hc32 info;
if (td->urb != urb)
break;
info = td->hwINFO;
info &= ~cpu_to_hc32(ahcd, TD_CC | TD_OWN);
td->hwINFO = info;
ed->hwHeadP = td->hwNextTD | toggle;
wmb();
}
}
static void ed_intr_refill(struct admhcd *ahcd, struct ed *ed)
{
__hc32 toggle = ed->hwHeadP & cpu_to_hc32(ahcd, ED_C);
ed->hwHeadP = ed->hwTailP | toggle;
}
static inline int is_ed_halted(struct admhcd *ahcd, struct ed *ed)
{
return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) == ED_H);
}
static inline int is_td_halted(struct admhcd *ahcd, struct ed *ed,
struct td *td)
{
return ((hc32_to_cpup(ahcd, &ed->hwHeadP) & TD_MASK) ==
(hc32_to_cpup(ahcd, &td->hwNextTD) & TD_MASK));
}
static void ed_update(struct admhcd *ahcd, struct ed *ed)
{
struct list_head *entry, *tmp;
#ifdef ADMHC_VERBOSE_DEBUG
admhc_dump_ed(ahcd, "UPDATE", ed, 1);
#endif
list_for_each_safe(entry, tmp, &ed->td_list) {
struct td *td = list_entry(entry, struct td, td_list);
struct urb *urb = td->urb;
struct urb_priv *urb_priv = urb->hcpriv;
int status;
if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN)
break;
/* update URB's length and status from TD */
status = td_done(ahcd, urb, td);
if (is_ed_halted(ahcd, ed) && is_td_halted(ahcd, ed, td))
ed_unhalt(ahcd, ed, urb);
if (ed->type == PIPE_INTERRUPT)
ed_intr_refill(ahcd, ed);
/* If all this urb's TDs are done, call complete() */
if (urb_priv->td_idx == urb_priv->td_cnt)
finish_urb(ahcd, urb, status);
/* clean schedule: unlink EDs that are no longer busy */
if (list_empty(&ed->td_list)) {
if (ed->state == ED_OPER)
start_ed_unlink(ahcd, ed);
/* ... reenabling halted EDs only after fault cleanup */
} else if ((ed->hwINFO & cpu_to_hc32(ahcd,
ED_SKIP | ED_DEQUEUE))
== cpu_to_hc32(ahcd, ED_SKIP)) {
td = list_entry(ed->td_list.next, struct td, td_list);
#if 0
if (!(td->hwINFO & cpu_to_hc32(ahcd, TD_DONE))) {
ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
/* ... hc may need waking-up */
switch (ed->type) {
case PIPE_CONTROL:
admhc_writel(ahcd, OHCI_CLF,
&ahcd->regs->cmdstatus);
break;
case PIPE_BULK:
admhc_writel(ahcd, OHCI_BLF,
&ahcd->regs->cmdstatus);
break;
}
}
#else
if ((td->hwINFO & cpu_to_hc32(ahcd, TD_OWN)))
ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP);
#endif
}
}
}
/* there are some tds completed; called in_irq(), with HCD locked */
static void admhc_td_complete(struct admhcd *ahcd)
{
struct ed *ed;
for (ed = ahcd->ed_head; ed; ed = ed->ed_next) {
if (ed->state != ED_OPER)
continue;
ed_update(ahcd, ed);
}
}
| gpl-2.0 |
pikatechnologies/openuwarp-attitude | target/linux/adm5120/files/arch/mips/adm5120/osbridge/5gxi.c | 582 | 1895 | /*
* OSBRiDGE 5GXi/5XLi board support
*
* Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/etherdevice.h>
#include <asm/mips_machine.h>
#include <asm/mach-adm5120/adm5120_defs.h>
#include <asm/mach-adm5120/adm5120_platform.h>
#include <asm/mach-adm5120/adm5120_info.h>
static struct mtd_partition osbridge_5gxi_partitions[] = {
{
.name = "bootloader",
.offset = 0,
.size = 64*1024,
.mask_flags = MTD_WRITEABLE,
} , {
.name = "boardcfg",
.offset = 64*1024,
.size = 64*1024,
} , {
.name = "firmware",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
}
};
static struct gpio_led osbridge_5gxi_gpio_leds[] __initdata = {
GPIO_LED_INV(ADM5120_GPIO_PIN6, "5gxi:green:user", NULL),
GPIO_LED_INV(ADM5120_GPIO_P0L0, "5gxi:yellow:lan", NULL),
};
static struct adm5120_pci_irq osbridge_5gxi_pci_irqs[] __initdata = {
PCIIRQ(2, 0, 1, ADM5120_IRQ_PCI0),
};
static u8 osbridge_5gxi_vlans[6] __initdata = {
0x41, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void __init osbridge_5gxi_setup(void)
{
adm5120_flash0_data.nr_parts = ARRAY_SIZE(osbridge_5gxi_partitions);
adm5120_flash0_data.parts = osbridge_5gxi_partitions;
adm5120_add_device_uart(0);
adm5120_add_device_uart(1);
adm5120_add_device_flash(0);
adm5120_add_device_switch(1, osbridge_5gxi_vlans);
adm5120_add_device_gpio_leds(ARRAY_SIZE(osbridge_5gxi_gpio_leds),
osbridge_5gxi_gpio_leds);
adm5120_pci_set_irq_map(ARRAY_SIZE(osbridge_5gxi_pci_irqs),
osbridge_5gxi_pci_irqs);
}
MIPS_MACHINE(MACH_ADM5120_5GXI, "5GXi", "OSBRiDGE 5GXi/5XLi board",
osbridge_5gxi_setup);
| gpl-2.0 |
MSM8226-Samsung/kernel_samsung_msm8226 | drivers/staging/iio/imu/mpu_primary/inv_slave_compass.c | 582 | 21728 | /*
* Copyright (C) 2012 Invensense, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include "inv_mpu_iio.h"
/* AKM definitions */
#define REG_AKM_ID 0x00
#define REG_AKM_INFO 0x01
#define REG_AKM_STATUS 0x02
#define REG_AKM_MEASURE_DATA 0x03
#define REG_AKM_MODE 0x0A
#define REG_AKM_ST_CTRL 0x0C
#define REG_AKM_SENSITIVITY 0x10
#define REG_AKM8963_CNTL1 0x0A
/* AK09911 register definition */
#define REG_AK09911_DMP_READ 0x3
#define REG_AK09911_STATUS1 0x10
#define REG_AK09911_CNTL2 0x31
#define REG_AK09911_SENSITIVITY 0x60
#define DATA_AKM_ID 0x48
#define DATA_AKM_MODE_PD 0x00
#define DATA_AKM_MODE_SM 0x01
#define DATA_AKM_MODE_ST 0x08
#define DATA_AKM_MODE_FR 0x0F
#define DATA_AK09911_MODE_FR 0x1F
#define DATA_AKM_SELF_TEST 0x40
#define DATA_AKM_DRDY 0x01
#define DATA_AKM8963_BIT 0x10
#define DATA_AKM_STAT_MASK 0x0C
#define DATA_AKM8975_SCALE (9830 * (1L << 15))
#define DATA_AKM8972_SCALE (19661 * (1L << 15))
#define DATA_AKM8963_SCALE0 (19661 * (1L << 15))
#define DATA_AKM8963_SCALE1 (4915 * (1L << 15))
#define DATA_AK09911_SCALE (19661 * (1L << 15))
#define DATA_MLX_SCALE (4915 * (1L << 15))
#define DATA_MLX_SCALE_EMPIRICAL (26214 * (1L << 15))
#define DATA_AKM8963_SCALE_SHIFT 4
#define DATA_AKM_99_BYTES_DMP 10
#define DATA_AKM_89_BYTES_DMP 9
#define DATA_AKM_MIN_READ_TIME (9 * NSEC_PER_MSEC)
#define DEF_ST_COMPASS_WAIT_MIN (10 * 1000)
#define DEF_ST_COMPASS_WAIT_MAX (15 * 1000)
#define DEF_ST_COMPASS_TRY_TIMES 10
#define DEF_ST_COMPASS_8963_SHIFT 2
#define X 0
#define Y 1
#define Z 2
/* milliseconds between each access */
#define AKM_RATE_SCALE 10
#define MLX_RATE_SCALE 50
/* MLX90399 compass definition */
#define DATA_MLX_CMD_READ_MEASURE 0x4F
#define DATA_MLX_CMD_SINGLE_MEASURE 0x3F
#define DATA_MLX_READ_DATA_BYTES 9
#define DATA_MLX_STATUS_DATA 3
#define DATA_MLX_MIN_READ_TIME (95 * NSEC_PER_MSEC)
static const short AKM8975_ST_Lower[3] = {-100, -100, -1000};
static const short AKM8975_ST_Upper[3] = {100, 100, -300};
static const short AKM8972_ST_Lower[3] = {-50, -50, -500};
static const short AKM8972_ST_Upper[3] = {50, 50, -100};
static const short AKM8963_ST_Lower[3] = {-200, -200, -3200};
static const short AKM8963_ST_Upper[3] = {200, 200, -800};
/*
* inv_setup_compass_akm() - Configure akm series compass.
*/
static int inv_setup_compass_akm(struct inv_mpu_state *st)
{
int result;
u8 data[4];
u8 sens, mode, cmd;
/* set to bypass mode */
result = inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config | BIT_BYPASS_EN);
if (result)
return result;
/* read secondary i2c ID register */
result = inv_secondary_read(REG_AKM_ID, 1, data);
if (result)
return result;
if (data[0] != DATA_AKM_ID)
return -ENXIO;
/* set AKM to Fuse ROM access mode */
if (COMPASS_ID_AK09911 == st->plat_data.sec_slave_id) {
mode = REG_AK09911_CNTL2;
sens = REG_AK09911_SENSITIVITY;
cmd = DATA_AK09911_MODE_FR;
} else {
mode = REG_AKM_MODE;
sens = REG_AKM_SENSITIVITY;
cmd = DATA_AKM_MODE_FR;
}
result = inv_secondary_write(mode, cmd);
if (result)
return result;
result = inv_secondary_read(sens, THREE_AXIS,
st->chip_info.compass_sens);
if (result)
return result;
/* revert to power down mode */
result = inv_secondary_write(mode, DATA_AKM_MODE_PD);
if (result)
return result;
pr_debug("%s senx=%d, seny=%d, senz=%d\n",
st->hw->name,
st->chip_info.compass_sens[0],
st->chip_info.compass_sens[1],
st->chip_info.compass_sens[2]);
/* restore to non-bypass mode */
result = inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config);
if (result)
return result;
/* setup master mode and master clock and ES bit */
result = inv_i2c_single_write(st, REG_I2C_MST_CTRL, BIT_WAIT_FOR_ES);
if (result)
return result;
/* slave 1 is used for AKM mode change only */
result = inv_i2c_single_write(st, REG_I2C_SLV1_ADDR,
st->plat_data.secondary_i2c_addr);
if (result)
return result;
/* AKM mode register address */
result = inv_i2c_single_write(st, REG_I2C_SLV1_REG, mode);
if (result)
return result;
/* output data for slave 1 is fixed, single measure mode */
st->slave_compass->scale = 1;
if (COMPASS_ID_AK8975 == st->plat_data.sec_slave_id) {
st->slave_compass->st_upper = AKM8975_ST_Upper;
st->slave_compass->st_lower = AKM8975_ST_Lower;
data[0] = DATA_AKM_MODE_SM;
} else if (COMPASS_ID_AK8972 == st->plat_data.sec_slave_id) {
st->slave_compass->st_upper = AKM8972_ST_Upper;
st->slave_compass->st_lower = AKM8972_ST_Lower;
data[0] = DATA_AKM_MODE_SM;
} else if (COMPASS_ID_AK8963 == st->plat_data.sec_slave_id) {
st->slave_compass->st_upper = AKM8963_ST_Upper;
st->slave_compass->st_lower = AKM8963_ST_Lower;
data[0] = DATA_AKM_MODE_SM |
(st->slave_compass->scale << DATA_AKM8963_SCALE_SHIFT);
} else if (COMPASS_ID_AK09911 == st->plat_data.sec_slave_id) {
st->slave_compass->st_upper = AKM8963_ST_Upper;
st->slave_compass->st_lower = AKM8963_ST_Lower;
data[0] = DATA_AKM_MODE_SM;
} else {
return -EINVAL;
}
result = inv_i2c_single_write(st, INV_MPU_REG_I2C_SLV1_DO, data[0]);
return result;
}
static int inv_akm_read_data(struct inv_mpu_state *st, short *o)
{
int result, shift;
int i;
u8 d[DATA_AKM_99_BYTES_DMP - 1];
u8 *sens;
sens = st->chip_info.compass_sens;
result = 0;
if (st->chip_config.dmp_on) {
for (i = 0; i < 6; i++)
d[1 + i] = st->fifo_data[i];
} else {
result = inv_i2c_read(st, REG_EXT_SENS_DATA_00,
DATA_AKM_99_BYTES_DMP - 1, d);
if (((DATA_AKM_DRDY != d[0]) && (!d[7])) || result)
result = -EINVAL;
}
if (COMPASS_ID_AK09911 == st->plat_data.sec_slave_id)
shift = 7;
else
shift = 8;
for (i = 0; i < 3; i++) {
o[i] = (short)((d[i * 2 + 1] << 8) | d[i * 2 + 2]);
o[i] = (short)(((int)o[i] * (sens[i] + 128)) >> shift);
}
return result;
}
static int inv_mlx_read_data(struct inv_mpu_state *st, short *o)
{
int result;
int i, z;
u8 d[DATA_MLX_READ_DATA_BYTES];
result = inv_i2c_read(st, REG_EXT_SENS_DATA_00,
DATA_MLX_READ_DATA_BYTES, d);
if ((!(d[0] & ~DATA_MLX_STATUS_DATA)) && (!result)) {
for (i = 0; i < 3; i++)
o[i] = (short)((d[i * 2 + 3] << 8) + d[i * 2 + 4]);
} else {
for (i = 0; i < 3; i++)
o[i] = 0;
}
z = o[2];
/* axis sensitivity conversion. Z axis has different sensitiviy from
x and y */
z *= 26;
z /= 15;
o[2] = z;
return 0;
}
static int inv_check_akm_self_test(struct inv_mpu_state *st)
{
int result;
u8 data[6], mode;
u8 counter, cntl;
short x, y, z;
u8 *sens;
sens = st->chip_info.compass_sens;
/* set to bypass mode */
result = inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config | BIT_BYPASS_EN);
if (result) {
result = inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config);
return result;
}
if (COMPASS_ID_AK09911 == st->plat_data.sec_slave_id)
mode = REG_AK09911_CNTL2;
else
mode = REG_AKM_MODE;
/* set to power down mode */
result = inv_secondary_write(mode, DATA_AKM_MODE_PD);
if (result)
goto AKM_fail;
/* write 1 to ASTC register */
result = inv_secondary_write(REG_AKM_ST_CTRL, DATA_AKM_SELF_TEST);
if (result)
goto AKM_fail;
/* set self test mode */
result = inv_secondary_write(mode, DATA_AKM_MODE_ST);
if (result)
goto AKM_fail;
counter = DEF_ST_COMPASS_TRY_TIMES;
while (counter > 0) {
usleep_range(DEF_ST_COMPASS_WAIT_MIN, DEF_ST_COMPASS_WAIT_MAX);
result = inv_secondary_read(REG_AKM_STATUS, 1, data);
if (result)
goto AKM_fail;
if ((data[0] & DATA_AKM_DRDY) == 0)
counter--;
else
counter = 0;
}
if ((data[0] & DATA_AKM_DRDY) == 0) {
result = -EINVAL;
goto AKM_fail;
}
result = inv_secondary_read(REG_AKM_MEASURE_DATA,
BYTES_PER_SENSOR, data);
if (result)
goto AKM_fail;
x = le16_to_cpup((__le16 *)(&data[0]));
y = le16_to_cpup((__le16 *)(&data[2]));
z = le16_to_cpup((__le16 *)(&data[4]));
x = ((x * (sens[0] + 128)) >> 8);
y = ((y * (sens[1] + 128)) >> 8);
z = ((z * (sens[2] + 128)) >> 8);
if (COMPASS_ID_AK8963 == st->plat_data.sec_slave_id) {
result = inv_secondary_read(REG_AKM8963_CNTL1, 1, &cntl);
if (result)
goto AKM_fail;
if (0 == (cntl & DATA_AKM8963_BIT)) {
x <<= DEF_ST_COMPASS_8963_SHIFT;
y <<= DEF_ST_COMPASS_8963_SHIFT;
z <<= DEF_ST_COMPASS_8963_SHIFT;
}
}
result = -EINVAL;
if (x > st->slave_compass->st_upper[X] ||
x < st->slave_compass->st_lower[X])
goto AKM_fail;
if (y > st->slave_compass->st_upper[Y] ||
y < st->slave_compass->st_lower[Y])
goto AKM_fail;
if (z > st->slave_compass->st_upper[Z] ||
z < st->slave_compass->st_lower[Z])
goto AKM_fail;
result = 0;
AKM_fail:
/*write 0 to ASTC register */
result |= inv_secondary_write(REG_AKM_ST_CTRL, 0);
/*set to power down mode */
result |= inv_secondary_write(mode, DATA_AKM_MODE_PD);
/*restore to non-bypass mode */
result |= inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config);
return result;
}
/*
* inv_write_akm_scale() - Configure the akm scale range.
*/
static int inv_write_akm_scale(struct inv_mpu_state *st, int data)
{
char d, en;
int result;
if (COMPASS_ID_AK8963 != st->plat_data.sec_slave_id)
return 0;
en = !!data;
if (st->slave_compass->scale == en)
return 0;
d = (DATA_AKM_MODE_SM | (en << DATA_AKM8963_SCALE_SHIFT));
result = inv_i2c_single_write(st, INV_MPU_REG_I2C_SLV1_DO, d);
if (result)
return result;
st->slave_compass->scale = en;
return 0;
}
/*
* inv_read_akm_scale() - show AKM scale.
*/
static int inv_read_akm_scale(struct inv_mpu_state *st, int *scale)
{
if (COMPASS_ID_AK8975 == st->plat_data.sec_slave_id)
*scale = DATA_AKM8975_SCALE;
else if (COMPASS_ID_AK8972 == st->plat_data.sec_slave_id)
*scale = DATA_AKM8972_SCALE;
else if (COMPASS_ID_AK8963 == st->plat_data.sec_slave_id)
if (st->slave_compass->scale)
*scale = DATA_AKM8963_SCALE1;
else
*scale = DATA_AKM8963_SCALE0;
else if (COMPASS_ID_AK09911 == st->plat_data.sec_slave_id)
*scale = DATA_AK09911_SCALE;
else
return -EINVAL;
return IIO_VAL_INT;
}
static int inv_suspend_akm(struct inv_mpu_state *st)
{
int result;
/* slave 0 is disabled */
result = inv_i2c_single_write(st, REG_I2C_SLV0_CTRL, 0);
if (result)
return result;
/* slave 1 is disabled */
result = inv_i2c_single_write(st, REG_I2C_SLV1_CTRL, 0);
return result;
}
static int inv_resume_akm(struct inv_mpu_state *st)
{
int result;
u8 reg_addr, bytes;
/* slave 0 is used to read data from compass */
/*read mode */
result = inv_i2c_single_write(st, REG_I2C_SLV0_ADDR,
INV_MPU_BIT_I2C_READ |
st->plat_data.secondary_i2c_addr);
if (result)
return result;
/* AKM status register address is 1 */
if (COMPASS_ID_AK09911 == st->plat_data.sec_slave_id) {
if (st->chip_config.dmp_on) {
reg_addr = REG_AK09911_DMP_READ;
bytes = DATA_AKM_99_BYTES_DMP;
} else {
reg_addr = REG_AK09911_STATUS1;
bytes = DATA_AKM_99_BYTES_DMP - 1;
}
} else {
if (st->chip_config.dmp_on) {
reg_addr = REG_AKM_INFO;
bytes = DATA_AKM_89_BYTES_DMP;
} else {
reg_addr = REG_AKM_STATUS;
bytes = DATA_AKM_89_BYTES_DMP - 1;
}
}
result = inv_i2c_single_write(st, REG_I2C_SLV0_REG, reg_addr);
if (result)
return result;
/* slave 0 is enabled, read 10 or 8 bytes from here, swap bytes */
result = inv_i2c_single_write(st, REG_I2C_SLV0_CTRL,
INV_MPU_BIT_GRP |
INV_MPU_BIT_BYTE_SW |
INV_MPU_BIT_SLV_EN |
bytes);
if (result)
return result;
/* slave 1 is enabled, write byte length is 1 */
result = inv_i2c_single_write(st, REG_I2C_SLV1_CTRL,
INV_MPU_BIT_SLV_EN | 1);
return result;
}
/*
* inv_write_mlx_scale() - Configure the mlx90399 scale range.
*/
static int inv_write_mlx_scale(struct inv_mpu_state *st, int data)
{
st->slave_compass->scale = data;
return 0;
}
/*
* inv_read_mlx_scale() - show mlx90399 scale.
*/
static int inv_read_mlx_scale(struct inv_mpu_state *st, int *scale)
{
*scale = st->slave_compass->scale;
return IIO_VAL_INT;
}
static int inv_i2c_read_mlx(struct inv_mpu_state *st, u16 i2c_addr,
u16 length, u8 *data)
{
struct i2c_msg msgs[1];
int res;
if (!data)
return -EINVAL;
msgs[0].addr = i2c_addr;
msgs[0].flags = I2C_M_RD;
msgs[0].buf = data;
msgs[0].len = length;
res = i2c_transfer(st->sl_handle, msgs, 1);
if (res < 1) {
if (res >= 0)
res = -EIO;
} else
res = 0;
return res;
}
static int inv_i2c_write_mlx(struct inv_mpu_state *st,
u16 i2c_addr, u8 data)
{
u8 tmp[1];
struct i2c_msg msg;
int res;
tmp[0] = data;
msg.addr = i2c_addr;
msg.flags = 0; /* write */
msg.buf = tmp;
msg.len = 1;
res = i2c_transfer(st->sl_handle, &msg, 1);
if (res < 1) {
if (res == 0)
res = -EIO;
return res;
} else
return 0;
}
static int inv_i2c_read_reg_mlx(struct inv_mpu_state *st,
u16 i2c_addr, u8 reg, u16 *val)
{
u8 tmp[10];
struct i2c_msg msg;
int res;
tmp[0] = 0x50;
tmp[1] = (reg << 2);
msg.addr = i2c_addr;
msg.flags = 0; /* write */
msg.buf = tmp;
msg.len = 2;
res = i2c_transfer(st->sl_handle, &msg, 1);
if (res < 1) {
if (res == 0)
res = -EIO;
return res;
}
res = inv_i2c_read_mlx(st, i2c_addr, 10, tmp);
if (res)
return res;
*val = ((tmp[1] << 8) | tmp[2]);
return res;
}
static int inv_i2c_write_mlx_reg(struct inv_mpu_state *st,
u16 i2c_addr, int reg, u16 d)
{
u8 tmp[10];
struct i2c_msg msg;
int res;
/* write register command, writing volatile memory */
tmp[0] = 0x60;
tmp[1] = ((d >> 8) & 0xff);
tmp[2] = (d & 0xff);
tmp[3] = (reg << 2);
msg.addr = i2c_addr;
msg.flags = 0; /* write */
msg.buf = tmp;
msg.len = 4;
res = i2c_transfer(st->sl_handle, &msg, 1);
if (res < 1) {
if (res == 0)
res = -EIO;
return res;
}
/* read status */
res = inv_i2c_read_mlx(st, i2c_addr, 10, tmp);
return res;
}
static int inv_write_mlx_cmd(struct inv_mpu_state *st, u8 cmd)
{
int result;
u8 d[10];
int addr;
addr = st->plat_data.secondary_i2c_addr;
result = inv_i2c_write_mlx(st, addr, cmd);
if (result)
return result;
/* read back status byte */
result = inv_i2c_read_mlx(st, addr, 10, d);
return result;
}
static int inv_read_mlx_z_axis(struct inv_mpu_state *st, s16 *z)
{
int result;
u8 d[10];
int addr;
addr = st->plat_data.secondary_i2c_addr;
/* measure z axis */
result = inv_write_mlx_cmd(st, 0x39);
if (result)
return result;
msleep(100);
/* read z axis */
result = inv_i2c_write_mlx(st, addr, 0x49);
if (result)
return result;
/* read back status byte */
result = inv_i2c_read_mlx(st, addr, 10, d);
if (result)
return result;
if ((d[0] & 0x3) == 1)
*z = (short)((d[3] << 8) + d[4]);
else
return -EINVAL;
return 0;
}
static int inv_write_mlx_reg(struct inv_mpu_state *st)
{
int result;
int addr;
u16 r_val;
addr = st->plat_data.secondary_i2c_addr;
/* write register 0.
set GAIN_SEL as 7;
set HALL_CONF as 0xC. */
result = inv_i2c_write_mlx_reg(st, addr, 0, 0x7c);
if (result)
return result;
/* write register 2.
set resolution is zero for all axes;
set DIGI filter as 6.
set OSR as 0.
set OSR2 as 0. */
result = inv_i2c_write_mlx_reg(st, addr, 2, 0x18);
if (result)
return result;
/* read register 1 */
result = inv_i2c_read_reg_mlx(st, addr, 1, &r_val);
if (result)
return result;
/* enable temp comp */
r_val |= 0x400;
result = inv_i2c_write_mlx_reg(st, addr, 1, r_val);
/* the value should be kept in the volatile memory */
return result;
}
static int inv_check_mlx_self_test(struct inv_mpu_state *st)
{
int result;
int addr;
s16 meas_ref, meas_coil;
u16 diff, r_val;
/* set to bypass mode */
result = inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config | BIT_BYPASS_EN);
if (result) {
result = inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config);
return result;
}
addr = st->plat_data.secondary_i2c_addr;
/* fake read to flush the previous data */
result = inv_read_mlx_z_axis(st, &meas_ref);
result = inv_read_mlx_z_axis(st, &meas_ref);
if (result)
return result;
/* read register 1 */
result = inv_i2c_read_reg_mlx(st, addr, 0, &r_val);
if (result)
return result;
/* enable self test */
r_val |= 0x100;
result = inv_i2c_write_mlx_reg(st, addr, 0, r_val);
if (result)
return result;
msleep(200);
result = inv_read_mlx_z_axis(st, &meas_coil);
if (result)
return result;
result = inv_write_mlx_cmd(st, 0xD0);
if (result)
return result;
result = inv_write_mlx_reg(st);
if (result)
return result;
diff = abs(meas_ref - meas_coil);
if (diff < 25 || diff > 300)
result = 1;
/*restore to non-bypass mode */
result |= inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config);
return result;
}
/*
* inv_setup_compass_mlx() - Configure akm series compass.
*/
static int inv_setup_compass_mlx(struct inv_mpu_state *st)
{
int result;
int addr;
addr = st->plat_data.secondary_i2c_addr;
/* set to bypass mode */
result = inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config | BIT_BYPASS_EN);
if (result)
return result;
result = inv_write_mlx_reg(st);
if (result)
return result;
/*restore to non-bypass mode */
result = inv_i2c_single_write(st, REG_INT_PIN_CFG,
st->plat_data.int_config);
if (result)
return result;
/*setup master mode and master clock and ES bit*/
result = inv_i2c_single_write(st, REG_I2C_MST_CTRL, BIT_WAIT_FOR_ES);
if (result)
return result;
/* slave 0 used to write read measurement command, write mode */
result = inv_i2c_single_write(st, REG_I2C_SLV0_ADDR, addr);
if (result)
return result;
/* ignore the register address, send out data only */
result = inv_i2c_single_write(st, INV_MPU_REG_I2C_SLV0_DO,
DATA_MLX_CMD_READ_MEASURE);
if (result)
return result;
/* slave 1 used to read status bytes and data of read measurement */
result = inv_i2c_single_write(st, REG_I2C_SLV1_ADDR,
INV_MPU_BIT_I2C_READ | addr);
if (result)
return result;
/* slave 2 used to write single measurement command, write mode */
result = inv_i2c_single_write(st, REG_I2C_SLV2_ADDR, addr);
if (result)
return result;
/* ignore the register address, send out data only */
result = inv_i2c_single_write(st, INV_MPU_REG_I2C_SLV2_DO,
DATA_MLX_CMD_SINGLE_MEASURE);
if (result)
return result;
/* slave 3 used to read status bytes and data of read measurement */
result = inv_i2c_single_write(st, REG_I2C_SLV3_ADDR,
INV_MPU_BIT_I2C_READ | addr);
st->slave_compass->scale = DATA_MLX_SCALE;
return result;
}
static int inv_suspend_mlx(struct inv_mpu_state *st)
{
int result;
result = inv_i2c_single_write(st, REG_I2C_SLV0_CTRL, 0);
if (result)
return result;
result = inv_i2c_single_write(st, REG_I2C_SLV1_CTRL, 0);
if (result)
return result;
result = inv_i2c_single_write(st, REG_I2C_SLV2_CTRL, 0);
if (result)
return result;
result = inv_i2c_single_write(st, REG_I2C_SLV3_CTRL, 0);
return result;
}
static int inv_resume_mlx(struct inv_mpu_state *st)
{
int result;
/* enable, ignore register, write 1 bytes */
result = inv_i2c_single_write(st, REG_I2C_SLV0_CTRL,
INV_MPU_BIT_SLV_EN |
INV_MPU_BIT_REG_DIS |
1);
if (result)
return result;
/* enable, ignore register, read 9 bytes */
result = inv_i2c_single_write(st, REG_I2C_SLV1_CTRL,
INV_MPU_BIT_SLV_EN |
INV_MPU_BIT_REG_DIS |
DATA_MLX_READ_DATA_BYTES);
if (result)
return result;
/* enable, ignore register, write 1 bytes */
result = inv_i2c_single_write(st, REG_I2C_SLV2_CTRL,
INV_MPU_BIT_SLV_EN |
INV_MPU_BIT_REG_DIS |
1);
if (result)
return result;
/* enable, ignore register, read 1 bytes */
result = inv_i2c_single_write(st, REG_I2C_SLV3_CTRL,
INV_MPU_BIT_SLV_EN |
INV_MPU_BIT_REG_DIS |
1);
return result;
}
static struct inv_mpu_slave slave_akm = {
.suspend = inv_suspend_akm,
.resume = inv_resume_akm,
.get_scale = inv_read_akm_scale,
.set_scale = inv_write_akm_scale,
.self_test = inv_check_akm_self_test,
.setup = inv_setup_compass_akm,
.read_data = inv_akm_read_data,
.rate_scale = AKM_RATE_SCALE,
.min_read_time = DATA_AKM_MIN_READ_TIME,
};
static struct inv_mpu_slave slave_mlx90399 = {
.suspend = inv_suspend_mlx,
.resume = inv_resume_mlx,
.get_scale = inv_read_mlx_scale,
.set_scale = inv_write_mlx_scale,
.self_test = inv_check_mlx_self_test,
.setup = inv_setup_compass_mlx,
.read_data = inv_mlx_read_data,
.rate_scale = MLX_RATE_SCALE,
.min_read_time = DATA_MLX_MIN_READ_TIME,
};
int inv_mpu_setup_compass_slave(struct inv_mpu_state *st)
{
switch (st->plat_data.sec_slave_id) {
case COMPASS_ID_AK8975:
case COMPASS_ID_AK8972:
case COMPASS_ID_AK8963:
case COMPASS_ID_AK09911:
st->slave_compass = &slave_akm;
break;
case COMPASS_ID_MLX90399:
st->slave_compass = &slave_mlx90399;
break;
default:
return -EINVAL;
}
return st->slave_compass->setup(st);
}
| gpl-2.0 |
DC07/android_kernel_lge_dory | arch/powerpc/kvm/emulate.c | 1862 | 12935 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2007
* Copyright 2011 Freescale Semiconductor, Inc.
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#include <linux/jiffies.h>
#include <linux/hrtimer.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kvm_host.h>
#include <linux/clockchips.h>
#include <asm/reg.h>
#include <asm/time.h>
#include <asm/byteorder.h>
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include "timing.h"
#include "trace.h"
#define OP_TRAP 3
#define OP_TRAP_64 2
#define OP_31_XOP_TRAP 4
#define OP_31_XOP_LWZX 23
#define OP_31_XOP_DCBST 54
#define OP_31_XOP_TRAP_64 68
#define OP_31_XOP_DCBF 86
#define OP_31_XOP_LBZX 87
#define OP_31_XOP_STWX 151
#define OP_31_XOP_STBX 215
#define OP_31_XOP_LBZUX 119
#define OP_31_XOP_STBUX 247
#define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LHAX 343
#define OP_31_XOP_STHX 407
#define OP_31_XOP_STHUX 439
#define OP_31_XOP_MTSPR 467
#define OP_31_XOP_DCBI 470
#define OP_31_XOP_LWBRX 534
#define OP_31_XOP_TLBSYNC 566
#define OP_31_XOP_STWBRX 662
#define OP_31_XOP_LHBRX 790
#define OP_31_XOP_STHBRX 918
#define OP_LWZ 32
#define OP_LD 58
#define OP_LWZU 33
#define OP_LBZ 34
#define OP_LBZU 35
#define OP_STW 36
#define OP_STWU 37
#define OP_STD 62
#define OP_STB 38
#define OP_STBU 39
#define OP_LHZ 40
#define OP_LHZU 41
#define OP_LHA 42
#define OP_LHAU 43
#define OP_STH 44
#define OP_STHU 45
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
{
unsigned long dec_nsec;
unsigned long long dec_time;
pr_debug("mtDEC: %x\n", vcpu->arch.dec);
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
#ifdef CONFIG_PPC_BOOK3S
/* mtdec lowers the interrupt line when positive. */
kvmppc_core_dequeue_dec(vcpu);
/* POWER4+ triggers a dec interrupt if the value is < 0 */
if (vcpu->arch.dec & 0x80000000) {
kvmppc_core_queue_dec(vcpu);
return;
}
#endif
#ifdef CONFIG_BOOKE
/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
if (vcpu->arch.dec == 0)
return;
#endif
/*
* The decrementer ticks at the same rate as the timebase, so
* that's how we convert the guest DEC value to the number of
* host ticks.
*/
dec_time = vcpu->arch.dec;
/*
* Guest timebase ticks at the same frequency as host decrementer.
* So use the host decrementer calculations for decrementer emulation.
*/
dec_time = dec_time << decrementer_clockevent.shift;
do_div(dec_time, decrementer_clockevent.mult);
dec_nsec = do_div(dec_time, NSEC_PER_SEC);
hrtimer_start(&vcpu->arch.dec_timer,
ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
vcpu->arch.dec_jiffies = get_tb();
}
u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
{
u64 jd = tb - vcpu->arch.dec_jiffies;
#ifdef CONFIG_BOOKE
if (vcpu->arch.dec < jd)
return 0;
#endif
return vcpu->arch.dec - jd;
}
static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
enum emulation_result emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SRR0:
vcpu->arch.shared->srr0 = spr_val;
break;
case SPRN_SRR1:
vcpu->arch.shared->srr1 = spr_val;
break;
/* XXX We need to context-switch the timebase for
* watchdog and FIT. */
case SPRN_TBWL: break;
case SPRN_TBWU: break;
case SPRN_DEC:
vcpu->arch.dec = spr_val;
kvmppc_emulate_dec(vcpu);
break;
case SPRN_SPRG0:
vcpu->arch.shared->sprg0 = spr_val;
break;
case SPRN_SPRG1:
vcpu->arch.shared->sprg1 = spr_val;
break;
case SPRN_SPRG2:
vcpu->arch.shared->sprg2 = spr_val;
break;
case SPRN_SPRG3:
vcpu->arch.shared->sprg3 = spr_val;
break;
default:
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
spr_val);
if (emulated == EMULATE_FAIL)
printk(KERN_INFO "mtspr: unknown spr "
"0x%x\n", sprn);
break;
}
kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
return emulated;
}
static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
enum emulation_result emulated = EMULATE_DONE;
ulong spr_val = 0;
switch (sprn) {
case SPRN_SRR0:
spr_val = vcpu->arch.shared->srr0;
break;
case SPRN_SRR1:
spr_val = vcpu->arch.shared->srr1;
break;
case SPRN_PVR:
spr_val = vcpu->arch.pvr;
break;
case SPRN_PIR:
spr_val = vcpu->vcpu_id;
break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
spr_val = get_tb() >> 32;
break;
case SPRN_TBWU:
spr_val = get_tb();
break;
case SPRN_SPRG0:
spr_val = vcpu->arch.shared->sprg0;
break;
case SPRN_SPRG1:
spr_val = vcpu->arch.shared->sprg1;
break;
case SPRN_SPRG2:
spr_val = vcpu->arch.shared->sprg2;
break;
case SPRN_SPRG3:
spr_val = vcpu->arch.shared->sprg3;
break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
case SPRN_DEC:
spr_val = kvmppc_get_dec(vcpu, get_tb());
break;
default:
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
&spr_val);
if (unlikely(emulated == EMULATE_FAIL)) {
printk(KERN_INFO "mfspr: unknown spr "
"0x%x\n", sprn);
}
break;
}
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, rt, spr_val);
kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
return emulated;
}
/* XXX to do:
* lhax
* lhaux
* lswx
* lswi
* stswx
* stswi
* lha
* lhau
* lmw
* stmw
*
* XXX is_bigendian should depend on MMU mapping or MSR[LE]
*/
/* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 inst = kvmppc_get_last_inst(vcpu);
int ra = get_ra(inst);
int rs = get_rs(inst);
int rt = get_rt(inst);
int sprn = get_sprn(inst);
enum emulation_result emulated = EMULATE_DONE;
int advance = 1;
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
switch (get_op(inst)) {
case OP_TRAP:
#ifdef CONFIG_PPC_BOOK3S
case OP_TRAP_64:
kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
kvmppc_core_queue_program(vcpu,
vcpu->arch.shared->esr | ESR_PTR);
#endif
advance = 0;
break;
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
kvmppc_core_queue_program(vcpu,
vcpu->arch.shared->esr | ESR_PTR);
#endif
advance = 0;
break;
case OP_31_XOP_LWZX:
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
case OP_31_XOP_LBZX:
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
case OP_31_XOP_LBZUX:
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_STWX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_31_XOP_STBX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_31_XOP_STBUX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_LHAX:
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
case OP_31_XOP_LHZX:
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
case OP_31_XOP_LHZUX:
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_MFSPR:
emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
break;
case OP_31_XOP_STHX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_31_XOP_STHUX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_31_XOP_MTSPR:
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
break;
case OP_31_XOP_DCBST:
case OP_31_XOP_DCBF:
case OP_31_XOP_DCBI:
/* Do nothing. The guest is performing dcbi because
* hardware DMA is not snooped by the dcache, but
* emulated DMA either goes through the dcache as
* normal writes, or the host kernel has handled dcache
* coherence. */
break;
case OP_31_XOP_LWBRX:
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
break;
case OP_31_XOP_TLBSYNC:
break;
case OP_31_XOP_STWBRX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 0);
break;
case OP_31_XOP_LHBRX:
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
break;
case OP_31_XOP_STHBRX:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 0);
break;
default:
/* Attempt core-specific emulation below. */
emulated = EMULATE_FAIL;
}
break;
case OP_LWZ:
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
case OP_LD:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
break;
case OP_LWZU:
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LBZ:
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break;
case OP_LBZU:
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STW:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
case OP_STD:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
8, 1);
break;
case OP_STWU:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STB:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_STBU:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
1, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LHZ:
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break;
case OP_LHZU:
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_LHA:
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
break;
case OP_LHAU:
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
case OP_STH:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_STHU:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
2, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
default:
emulated = EMULATE_FAIL;
}
if (emulated == EMULATE_FAIL) {
emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
if (emulated == EMULATE_AGAIN) {
advance = 0;
} else if (emulated == EMULATE_FAIL) {
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
kvmppc_core_queue_program(vcpu, 0);
}
}
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
/* Advance past emulated instruction. */
if (advance)
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
return emulated;
}
| gpl-2.0 |
SchulerControl/linux | net/dccp/ipv4.c | 2118 | 29107 | /*
* net/dccp/ipv4.c
*
* An implementation of the DCCP protocol
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/dccp.h>
#include <linux/icmp.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/random.h>
#include <net/icmp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/inet_sock.h>
#include <net/protocol.h>
#include <net/sock.h>
#include <net/timewait_sock.h>
#include <net/tcp_states.h>
#include <net/xfrm.h>
#include <net/secure_seq.h>
#include "ackvec.h"
#include "ccid.h"
#include "dccp.h"
#include "feat.h"
/*
* The per-net dccp.v4_ctl_sk socket is used for responding to
* the Out-of-the-blue (OOTB) packets. A control sock will be created
* for this socket at the initialization time.
*/
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct inet_sock *inet = inet_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
__be16 orig_sport, orig_dport;
__be32 daddr, nexthop;
struct flowi4 *fl4;
struct rtable *rt;
int err;
struct ip_options_rcu *inet_opt;
dp->dccps_role = DCCP_ROLE_CLIENT;
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
nexthop = daddr = usin->sin_addr.s_addr;
inet_opt = rcu_dereference_protected(inet->inet_opt,
sock_owned_by_user(sk));
if (inet_opt != NULL && inet_opt->opt.srr) {
if (daddr == 0)
return -EINVAL;
nexthop = inet_opt->opt.faddr;
}
orig_sport = inet->inet_sport;
orig_dport = usin->sin_port;
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_DCCP,
orig_sport, orig_dport, sk, true);
if (IS_ERR(rt))
return PTR_ERR(rt);
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
return -ENETUNREACH;
}
if (inet_opt == NULL || !inet_opt->opt.srr)
daddr = fl4->daddr;
if (inet->inet_saddr == 0)
inet->inet_saddr = fl4->saddr;
inet->inet_rcv_saddr = inet->inet_saddr;
inet->inet_dport = usin->sin_port;
inet->inet_daddr = daddr;
inet_csk(sk)->icsk_ext_hdr_len = 0;
if (inet_opt)
inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
/*
* Socket identity is still unknown (sport may be zero).
* However we set state to DCCP_REQUESTING and not releasing socket
* lock select source port, enter ourselves into the hash tables and
* complete initialization after this.
*/
dccp_set_state(sk, DCCP_REQUESTING);
err = inet_hash_connect(&dccp_death_row, sk);
if (err != 0)
goto failure;
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto failure;
}
/* OK, now commit destination to socket. */
sk_setup_caps(sk, &rt->dst);
dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
inet->inet_sport,
inet->inet_dport);
inet->inet_id = dp->dccps_iss ^ jiffies;
err = dccp_connect(sk);
rt = NULL;
if (err != 0)
goto failure;
out:
return err;
failure:
/*
* This unhashes the socket and releases the local port, if necessary.
*/
dccp_set_state(sk, DCCP_CLOSED);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
goto out;
}
EXPORT_SYMBOL_GPL(dccp_v4_connect);
/*
* This routine does path mtu discovery as defined in RFC1191.
*/
static inline void dccp_do_pmtu_discovery(struct sock *sk,
const struct iphdr *iph,
u32 mtu)
{
struct dst_entry *dst;
const struct inet_sock *inet = inet_sk(sk);
const struct dccp_sock *dp = dccp_sk(sk);
/* We are not interested in DCCP_LISTEN and request_socks (RESPONSEs
* send out by Linux are always < 576bytes so they should go through
* unfragmented).
*/
if (sk->sk_state == DCCP_LISTEN)
return;
dst = inet_csk_update_pmtu(sk, mtu);
if (!dst)
return;
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk->sk_err_soft = EMSGSIZE;
mtu = dst_mtu(dst);
if (inet->pmtudisc != IP_PMTUDISC_DONT &&
inet_csk(sk)->icsk_pmtu_cookie > mtu) {
dccp_sync_mss(sk, mtu);
/*
* From RFC 4340, sec. 14.1:
*
* DCCP-Sync packets are the best choice for upward
* probing, since DCCP-Sync probes do not risk application
* data loss.
*/
dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
} /* else let the usual retransmit timer handle it */
}
static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
{
struct dst_entry *dst = __sk_dst_check(sk, 0);
if (dst)
dst->ops->redirect(dst, sk, skb);
}
/*
* This routine is called by the ICMP module when it gets some sort of error
* condition. If err < 0 then the socket should be closed and the error
* returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
* After adjustment header points to the first 8 bytes of the tcp header. We
* need to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When someone else
* accesses the socket the ICMP is just dropped and for some paths there is no
* check at all. A more general error queue to queue errors for later handling
* is probably better.
*/
static void dccp_v4_err(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (struct iphdr *)skb->data;
const u8 offset = iph->ihl << 2;
const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
struct dccp_sock *dp;
struct inet_sock *inet;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
__u64 seq;
int err;
struct net *net = dev_net(skb->dev);
if (skb->len < offset + sizeof(*dh) ||
skb->len < offset + __dccp_basic_hdr_len(dh)) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
sk = inet_lookup(net, &dccp_hashinfo,
iph->daddr, dh->dccph_dport,
iph->saddr, dh->dccph_sport, inet_iif(skb));
if (sk == NULL) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
if (sk->sk_state == DCCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
*/
if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED)
goto out;
dp = dccp_sk(sk);
seq = dccp_hdr_seq(dh);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
switch (type) {
case ICMP_REDIRECT:
dccp_do_redirect(skb, sk);
goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
break;
case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
if (!sock_owned_by_user(sk))
dccp_do_pmtu_discovery(sk, iph, info);
goto out;
}
err = icmp_err_convert[code].errno;
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
default:
goto out;
}
switch (sk->sk_state) {
struct request_sock *req , **prev;
case DCCP_LISTEN:
if (sock_owned_by_user(sk))
goto out;
req = inet_csk_search_req(sk, &prev, dh->dccph_dport,
iph->daddr, iph->saddr);
if (!req)
goto out;
/*
* ICMPs are not backlogged, hence we cannot get an established
* socket here.
*/
WARN_ON(req->sk);
if (!between48(seq, dccp_rsk(req)->dreq_iss,
dccp_rsk(req)->dreq_gss)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
/*
* Still in RESPOND, just remove it silently.
* There is no good way to pass the error to the newly
* created socket, and POSIX does not want network
* errors returned from accept().
*/
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
case DCCP_REQUESTING:
case DCCP_RESPOND:
if (!sock_owned_by_user(sk)) {
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
sk->sk_err = err;
sk->sk_error_report(sk);
dccp_done(sk);
} else
sk->sk_err_soft = err;
goto out;
}
/* If we've already connected we will keep trying
* until we time out, or the user gives up.
*
* rfc1122 4.2.3.9 allows to consider as hard errors
* only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
* but it is obsoleted by pmtu discovery).
*
* Note, that in modern internet, where routing is unreliable
* and in each dark corner broken firewalls sit, sending random
* errors ordered by their masters even this two messages finally lose
* their original sense (even Linux sends invalid PORT_UNREACHs)
*
* Now we are in compliance with RFCs.
* --ANK (980905)
*/
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else /* Only an error on timeout */
sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
sock_put(sk);
}
static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
__be32 src, __be32 dst)
{
return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
}
void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
const struct inet_sock *inet = inet_sk(sk);
struct dccp_hdr *dh = dccp_hdr(skb);
dccp_csum_outgoing(skb);
dh->dccph_checksum = dccp_v4_csum_finish(skb,
inet->inet_saddr,
inet->inet_daddr);
}
EXPORT_SYMBOL_GPL(dccp_v4_send_check);
static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb)
{
return secure_dccp_sequence_number(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr,
dccp_hdr(skb)->dccph_dport,
dccp_hdr(skb)->dccph_sport);
}
/*
* The three way handshake has completed - we got a valid ACK or DATAACK -
* now create the new socket.
*
* This is the equivalent of TCP's tcp_v4_syn_recv_sock
*/
struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
struct sock *newsk;
if (sk_acceptq_is_full(sk))
goto exit_overflow;
newsk = dccp_create_openreq_child(sk, req, skb);
if (newsk == NULL)
goto exit_nonewsk;
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
newinet->inet_daddr = ireq->rmt_addr;
newinet->inet_rcv_saddr = ireq->loc_addr;
newinet->inet_saddr = ireq->loc_addr;
newinet->inet_opt = ireq->opt;
ireq->opt = NULL;
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->inet_id = jiffies;
if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
goto put_and_exit;
sk_setup_caps(newsk, dst);
dccp_sync_mss(newsk, dst_mtu(dst));
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
__inet_hash_nolisten(newsk, NULL);
return newsk;
exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk:
dst_release(dst);
exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
inet_csk_prepare_forced_close(newsk);
dccp_done(newsk);
goto exit;
}
EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
{
const struct dccp_hdr *dh = dccp_hdr(skb);
const struct iphdr *iph = ip_hdr(skb);
struct sock *nsk;
struct request_sock **prev;
/* Find possible connection requests. */
struct request_sock *req = inet_csk_search_req(sk, &prev,
dh->dccph_sport,
iph->saddr, iph->daddr);
if (req != NULL)
return dccp_check_req(sk, skb, req, prev);
nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
iph->saddr, dh->dccph_sport,
iph->daddr, dh->dccph_dport,
inet_iif(skb));
if (nsk != NULL) {
if (nsk->sk_state != DCCP_TIME_WAIT) {
bh_lock_sock(nsk);
return nsk;
}
inet_twsk_put(inet_twsk(nsk));
return NULL;
}
return sk;
}
static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
struct rtable *rt;
const struct iphdr *iph = ip_hdr(skb);
struct flowi4 fl4 = {
.flowi4_oif = inet_iif(skb),
.daddr = iph->saddr,
.saddr = iph->daddr,
.flowi4_tos = RT_CONN_FLAGS(sk),
.flowi4_proto = sk->sk_protocol,
.fl4_sport = dccp_hdr(skb)->dccph_dport,
.fl4_dport = dccp_hdr(skb)->dccph_sport,
};
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
return &rt->dst;
}
static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
{
int err = -1;
struct sk_buff *skb;
struct dst_entry *dst;
struct flowi4 fl4;
dst = inet_csk_route_req(sk, &fl4, req);
if (dst == NULL)
goto out;
skb = dccp_make_response(sk, dst, req);
if (skb != NULL) {
const struct inet_request_sock *ireq = inet_rsk(req);
struct dccp_hdr *dh = dccp_hdr(skb);
dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
ireq->rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
ireq->rmt_addr,
ireq->opt);
err = net_xmit_eval(err);
}
out:
dst_release(dst);
return err;
}
static void dccp_v4_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
{
int err;
const struct iphdr *rxiph;
struct sk_buff *skb;
struct dst_entry *dst;
struct net *net = dev_net(skb_dst(rxskb)->dev);
struct sock *ctl_sk = net->dccp.v4_ctl_sk;
/* Never send a reset in response to a reset. */
if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
return;
if (skb_rtable(rxskb)->rt_type != RTN_LOCAL)
return;
dst = dccp_v4_route_skb(net, ctl_sk, rxskb);
if (dst == NULL)
return;
skb = dccp_ctl_make_reset(ctl_sk, rxskb);
if (skb == NULL)
goto out;
rxiph = ip_hdr(rxskb);
dccp_hdr(skb)->dccph_checksum = dccp_v4_csum_finish(skb, rxiph->saddr,
rxiph->daddr);
skb_dst_set(skb, dst_clone(dst));
bh_lock_sock(ctl_sk);
err = ip_build_and_send_pkt(skb, ctl_sk,
rxiph->daddr, rxiph->saddr, NULL);
bh_unlock_sock(ctl_sk);
if (net_xmit_eval(err) == 0) {
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
}
out:
dst_release(dst);
}
static void dccp_v4_reqsk_destructor(struct request_sock *req)
{
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
kfree(inet_rsk(req)->opt);
}
void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
{
}
EXPORT_SYMBOL(dccp_syn_ack_timeout);
static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct dccp_request_sock),
.rtx_syn_ack = dccp_v4_send_response,
.send_ack = dccp_reqsk_send_ack,
.destructor = dccp_v4_reqsk_destructor,
.send_reset = dccp_v4_ctl_send_reset,
.syn_ack_timeout = dccp_syn_ack_timeout,
};
int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct inet_request_sock *ireq;
struct request_sock *req;
struct dccp_request_sock *dreq;
const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
/* Never answer to DCCP_PKT_REQUESTs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
return 0; /* discard, don't send a reset here */
if (dccp_bad_service_code(sk, service)) {
dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
goto drop;
}
/*
* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is
* evidently real one.
*/
dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
if (inet_csk_reqsk_queue_is_full(sk))
goto drop;
/*
* Accept backlog is full. If we have already queued enough
* of warm entries in syn queue, drop request. It is better than
* clogging syn queue with openreqs with exponentially increasing
* timeout.
*/
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
req = inet_reqsk_alloc(&dccp_request_sock_ops);
if (req == NULL)
goto drop;
if (dccp_reqsk_init(req, dccp_sk(sk), skb))
goto drop_and_free;
dreq = dccp_rsk(req);
if (dccp_parse_options(sk, dreq, skb))
goto drop_and_free;
if (security_inet_conn_request(sk, skb, req))
goto drop_and_free;
ireq = inet_rsk(req);
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
/*
* Step 3: Process LISTEN state
*
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
*
* Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
*/
dreq->dreq_isr = dcb->dccpd_seq;
dreq->dreq_gsr = dreq->dreq_isr;
dreq->dreq_iss = dccp_v4_init_sequence(skb);
dreq->dreq_gss = dreq->dreq_iss;
dreq->dreq_service = service;
if (dccp_v4_send_response(sk, req))
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
return 0;
drop_and_free:
reqsk_free(req);
drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
return -1;
}
EXPORT_SYMBOL_GPL(dccp_v4_conn_request);
int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct dccp_hdr *dh = dccp_hdr(skb);
if (sk->sk_state == DCCP_OPEN) { /* Fast path */
if (dccp_rcv_established(sk, skb, dh, skb->len))
goto reset;
return 0;
}
/*
* Step 3: Process LISTEN state
* If P.type == Request or P contains a valid Init Cookie option,
* (* Must scan the packet's options to check for Init
* Cookies. Only Init Cookies are processed here,
* however; other options are processed in Step 8. This
* scan need only be performed if the endpoint uses Init
* Cookies *)
* (* Generate a new socket and switch to that socket *)
* Set S := new socket for this port pair
* S.state = RESPOND
* Choose S.ISS (initial seqno) or set from Init Cookies
* Initialize S.GAR := S.ISS
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
* Continue with S.state == RESPOND
* (* A Response packet will be generated in Step 11 *)
* Otherwise,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*
* NOTE: the check for the packet types is done in
* dccp_rcv_state_process
*/
if (sk->sk_state == DCCP_LISTEN) {
struct sock *nsk = dccp_v4_hnd_req(sk, skb);
if (nsk == NULL)
goto discard;
if (nsk != sk) {
if (dccp_child_process(sk, nsk, skb))
goto reset;
return 0;
}
}
if (dccp_rcv_state_process(sk, skb, dh, skb->len))
goto reset;
return 0;
reset:
dccp_v4_ctl_send_reset(sk, skb);
discard:
kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
/**
* dccp_invalid_packet - check for malformed packets
* Implements RFC 4340, 8.5: Step 1: Check header basics
* Packets that fail these checks are ignored and do not receive Resets.
*/
int dccp_invalid_packet(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
unsigned int cscov;
if (skb->pkt_type != PACKET_HOST)
return 1;
/* If the packet is shorter than 12 bytes, drop packet and return */
if (!pskb_may_pull(skb, sizeof(struct dccp_hdr))) {
DCCP_WARN("pskb_may_pull failed\n");
return 1;
}
dh = dccp_hdr(skb);
/* If P.type is not understood, drop packet and return */
if (dh->dccph_type >= DCCP_PKT_INVALID) {
DCCP_WARN("invalid packet type\n");
return 1;
}
/*
* If P.Data Offset is too small for packet type, drop packet and return
*/
if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
return 1;
}
/*
* If P.Data Offset is too too large for packet, drop packet and return
*/
if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
return 1;
}
/*
* If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
* has short sequence numbers), drop packet and return
*/
if ((dh->dccph_type < DCCP_PKT_DATA ||
dh->dccph_type > DCCP_PKT_DATAACK) && dh->dccph_x == 0) {
DCCP_WARN("P.type (%s) not Data || [Data]Ack, while P.X == 0\n",
dccp_packet_name(dh->dccph_type));
return 1;
}
/*
* If P.CsCov is too large for the packet size, drop packet and return.
* This must come _before_ checksumming (not as RFC 4340 suggests).
*/
cscov = dccp_csum_coverage(skb);
if (cscov > skb->len) {
DCCP_WARN("P.CsCov %u exceeds packet length %d\n",
dh->dccph_cscov, skb->len);
return 1;
}
/* If header checksum is incorrect, drop packet and return.
* (This step is completed in the AF-dependent functions.) */
skb->csum = skb_checksum(skb, 0, cscov, 0);
return 0;
}
EXPORT_SYMBOL_GPL(dccp_invalid_packet);
/* this is called when real data arrives */
static int dccp_v4_rcv(struct sk_buff *skb)
{
const struct dccp_hdr *dh;
const struct iphdr *iph;
struct sock *sk;
int min_cov;
/* Step 1: Check header basics */
if (dccp_invalid_packet(skb))
goto discard_it;
iph = ip_hdr(skb);
/* Step 1: If header checksum is incorrect, drop packet and return */
if (dccp_v4_csum_finish(skb, iph->saddr, iph->daddr)) {
DCCP_WARN("dropped packet with invalid checksum\n");
goto discard_it;
}
dh = dccp_hdr(skb);
DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh);
DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
dccp_pr_debug("%8.8s src=%pI4@%-5d dst=%pI4@%-5d seq=%llu",
dccp_packet_name(dh->dccph_type),
&iph->saddr, ntohs(dh->dccph_sport),
&iph->daddr, ntohs(dh->dccph_dport),
(unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq);
if (dccp_packet_without_ack(skb)) {
DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
dccp_pr_debug_cat("\n");
} else {
DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
dccp_pr_debug_cat(", ack=%llu\n", (unsigned long long)
DCCP_SKB_CB(skb)->dccpd_ack_seq);
}
/* Step 2:
* Look up flow ID in table and get corresponding socket */
sk = __inet_lookup_skb(&dccp_hashinfo, skb,
dh->dccph_sport, dh->dccph_dport);
/*
* Step 2:
* If no socket ...
*/
if (sk == NULL) {
dccp_pr_debug("failed to look up flow ID in table and "
"get corresponding socket\n");
goto no_dccp_socket;
}
/*
* Step 2:
* ... or S.state == TIMEWAIT,
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (sk->sk_state == DCCP_TIME_WAIT) {
dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
inet_twsk_put(inet_twsk(sk));
goto no_dccp_socket;
}
/*
* RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
* o if MinCsCov = 0, only packets with CsCov = 0 are accepted
* o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
*/
min_cov = dccp_sk(sk)->dccps_pcrlen;
if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) {
dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
dh->dccph_cscov, min_cov);
/* FIXME: "Such packets SHOULD be reported using Data Dropped
* options (Section 11.7) with Drop Code 0, Protocol
* Constraints." */
goto discard_and_relse;
}
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
nf_reset(skb);
return sk_receive_skb(sk, skb, 1);
no_dccp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
/*
* Step 2:
* If no socket ...
* Generate Reset(No Connection) unless P.type == Reset
* Drop packet and return
*/
if (dh->dccph_type != DCCP_PKT_RESET) {
DCCP_SKB_CB(skb)->dccpd_reset_code =
DCCP_RESET_CODE_NO_CONNECTION;
dccp_v4_ctl_send_reset(sk, skb);
}
discard_it:
kfree_skb(skb);
return 0;
discard_and_relse:
sock_put(sk);
goto discard_it;
}
static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
.queue_xmit = ip_queue_xmit,
.send_check = dccp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.conn_request = dccp_v4_conn_request,
.syn_recv_sock = dccp_v4_request_recv_sock,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
.bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
};
static int dccp_v4_init_sock(struct sock *sk)
{
static __u8 dccp_v4_ctl_sock_initialized;
int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized);
if (err == 0) {
if (unlikely(!dccp_v4_ctl_sock_initialized))
dccp_v4_ctl_sock_initialized = 1;
inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
}
return err;
}
static struct timewait_sock_ops dccp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct inet_timewait_sock),
};
static struct proto dccp_v4_prot = {
.name = "DCCP",
.owner = THIS_MODULE,
.close = dccp_close,
.connect = dccp_v4_connect,
.disconnect = dccp_disconnect,
.ioctl = dccp_ioctl,
.init = dccp_v4_init_sock,
.setsockopt = dccp_setsockopt,
.getsockopt = dccp_getsockopt,
.sendmsg = dccp_sendmsg,
.recvmsg = dccp_recvmsg,
.backlog_rcv = dccp_v4_do_rcv,
.hash = inet_hash,
.unhash = inet_unhash,
.accept = inet_csk_accept,
.get_port = inet_csk_get_port,
.shutdown = dccp_shutdown,
.destroy = dccp_destroy_sock,
.orphan_count = &dccp_orphan_count,
.max_header = MAX_DCCP_HEADER,
.obj_size = sizeof(struct dccp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.rsk_prot = &dccp_request_sock_ops,
.twsk_prot = &dccp_timewait_sock_ops,
.h.hashinfo = &dccp_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
#endif
};
static const struct net_protocol dccp_v4_protocol = {
.handler = dccp_v4_rcv,
.err_handler = dccp_v4_err,
.no_policy = 1,
.netns_ok = 1,
};
static const struct proto_ops inet_dccp_ops = {
.family = PF_INET,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
.connect = inet_stream_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname,
/* FIXME: work on tcp_poll to rename it to inet_csk_poll */
.poll = dccp_poll,
.ioctl = inet_ioctl,
/* FIXME: work on inet_listen to rename it to sock_common_listen */
.listen = inet_dccp_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw dccp_v4_protosw = {
.type = SOCK_DCCP,
.protocol = IPPROTO_DCCP,
.prot = &dccp_v4_prot,
.ops = &inet_dccp_ops,
.no_check = 0,
.flags = INET_PROTOSW_ICSK,
};
static int __net_init dccp_v4_init_net(struct net *net)
{
if (dccp_hashinfo.bhash == NULL)
return -ESOCKTNOSUPPORT;
return inet_ctl_sock_create(&net->dccp.v4_ctl_sk, PF_INET,
SOCK_DCCP, IPPROTO_DCCP, net);
}
static void __net_exit dccp_v4_exit_net(struct net *net)
{
inet_ctl_sock_destroy(net->dccp.v4_ctl_sk);
}
static struct pernet_operations dccp_v4_ops = {
.init = dccp_v4_init_net,
.exit = dccp_v4_exit_net,
};
static int __init dccp_v4_init(void)
{
int err = proto_register(&dccp_v4_prot, 1);
if (err != 0)
goto out;
err = inet_add_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
if (err != 0)
goto out_proto_unregister;
inet_register_protosw(&dccp_v4_protosw);
err = register_pernet_subsys(&dccp_v4_ops);
if (err)
goto out_destroy_ctl_sock;
out:
return err;
out_destroy_ctl_sock:
inet_unregister_protosw(&dccp_v4_protosw);
inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
out_proto_unregister:
proto_unregister(&dccp_v4_prot);
goto out;
}
static void __exit dccp_v4_exit(void)
{
unregister_pernet_subsys(&dccp_v4_ops);
inet_unregister_protosw(&dccp_v4_protosw);
inet_del_protocol(&dccp_v4_protocol, IPPROTO_DCCP);
proto_unregister(&dccp_v4_prot);
}
module_init(dccp_v4_init);
module_exit(dccp_v4_exit);
/*
* __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
* values directly, Also cover the case where the protocol is not specified,
* i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
*/
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 33, 6);
MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 0, 6);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
| gpl-2.0 |
itgb/linux-4.1.10 | drivers/net/wireless/rtlwifi/rtl8723com/main.c | 2118 | 1242 | /******************************************************************************
*
* Copyright(c) 2009-2014 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
*****************************************************************************/
#include "../wifi.h"
#include <linux/module.h>
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek RTL8723AE/RTL8723BE 802.11n PCI wireless common routines");
| gpl-2.0 |
djvoleur/S6_UniPR_BOI1 | drivers/net/wireless/ath/carl9170/tx.c | 2118 | 42845 | /*
* Atheros CARL9170 driver
*
* 802.11 xmit & status routines
*
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "carl9170.h"
#include "hw.h"
#include "cmd.h"
static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
unsigned int queue)
{
if (unlikely(modparam_noht)) {
return queue;
} else {
/*
* This is just another workaround, until
* someone figures out how to get QoS and
* AMPDU to play nicely together.
*/
return 2; /* AC_BE */
}
}
static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
struct sk_buff *skb)
{
return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
}
static bool is_mem_full(struct ar9170 *ar)
{
return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
atomic_read(&ar->mem_free_blocks));
}
static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
{
int queue, i;
bool mem_full;
atomic_inc(&ar->tx_total_queued);
queue = skb_get_queue_mapping(skb);
spin_lock_bh(&ar->tx_stats_lock);
/*
* The driver has to accept the frame, regardless if the queue is
* full to the brim, or not. We have to do the queuing internally,
* since mac80211 assumes that a driver which can operate with
* aggregated frames does not reject frames for this reason.
*/
ar->tx_stats[queue].len++;
ar->tx_stats[queue].count++;
mem_full = is_mem_full(ar);
for (i = 0; i < ar->hw->queues; i++) {
if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
ieee80211_stop_queue(ar->hw, i);
ar->queue_stop_timeout[i] = jiffies;
}
}
spin_unlock_bh(&ar->tx_stats_lock);
}
/* needs rcu_read_lock */
static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar,
struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct ieee80211_hdr *hdr = (void *) super->frame_data;
struct ieee80211_vif *vif;
unsigned int vif_id;
vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
CARL9170_TX_SUPER_MISC_VIF_ID_S;
if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
return NULL;
vif = rcu_dereference(ar->vif_priv[vif_id].vif);
if (unlikely(!vif))
return NULL;
/*
* Normally we should use wrappers like ieee80211_get_DA to get
* the correct peer ieee80211_sta.
*
* But there is a problem with indirect traffic (broadcasts, or
* data which is designated for other stations) in station mode.
* The frame will be directed to the AP for distribution and not
* to the actual destination.
*/
return ieee80211_find_sta(vif, hdr->addr1);
}
static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_sta *sta;
struct carl9170_sta_info *sta_info;
rcu_read_lock();
sta = __carl9170_get_tx_sta(ar, skb);
if (unlikely(!sta))
goto out_rcu;
sta_info = (struct carl9170_sta_info *) sta->drv_priv;
if (atomic_dec_return(&sta_info->pending_frames) == 0)
ieee80211_sta_block_awake(ar->hw, sta, false);
out_rcu:
rcu_read_unlock();
}
static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
{
int queue;
queue = skb_get_queue_mapping(skb);
spin_lock_bh(&ar->tx_stats_lock);
ar->tx_stats[queue].len--;
if (!is_mem_full(ar)) {
unsigned int i;
for (i = 0; i < ar->hw->queues; i++) {
if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
continue;
if (ieee80211_queue_stopped(ar->hw, i)) {
unsigned long tmp;
tmp = jiffies - ar->queue_stop_timeout[i];
if (tmp > ar->max_queue_stop_timeout[i])
ar->max_queue_stop_timeout[i] = tmp;
}
ieee80211_wake_queue(ar->hw, i);
}
}
spin_unlock_bh(&ar->tx_stats_lock);
if (atomic_dec_and_test(&ar->tx_total_queued))
complete(&ar->tx_flush);
}
static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
unsigned int chunks;
int cookie = -1;
atomic_inc(&ar->mem_allocs);
chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
atomic_add(chunks, &ar->mem_free_blocks);
return -ENOSPC;
}
spin_lock_bh(&ar->mem_lock);
cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
spin_unlock_bh(&ar->mem_lock);
if (unlikely(cookie < 0)) {
atomic_add(chunks, &ar->mem_free_blocks);
return -ENOSPC;
}
super = (void *) skb->data;
/*
* Cookie #0 serves two special purposes:
* 1. The firmware might use it generate BlockACK frames
* in responds of an incoming BlockAckReqs.
*
* 2. Prevent double-free bugs.
*/
super->s.cookie = (u8) cookie + 1;
return 0;
}
static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
int cookie;
/* make a local copy of the cookie */
cookie = super->s.cookie;
/* invalidate cookie */
super->s.cookie = 0;
/*
* Do a out-of-bounds check on the cookie:
*
* * cookie "0" is reserved and won't be assigned to any
* out-going frame. Internally however, it is used to
* mark no longer/un-accounted frames and serves as a
* cheap way of preventing frames from being freed
* twice by _accident_. NB: There is a tiny race...
*
* * obviously, cookie number is limited by the amount
* of available memory blocks, so the number can
* never execeed the mem_blocks count.
*/
if (unlikely(WARN_ON_ONCE(cookie == 0) ||
WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
return;
atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
&ar->mem_free_blocks);
spin_lock_bh(&ar->mem_lock);
bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
spin_unlock_bh(&ar->mem_lock);
}
/* Called from any context */
static void carl9170_tx_release(struct kref *ref)
{
struct ar9170 *ar;
struct carl9170_tx_info *arinfo;
struct ieee80211_tx_info *txinfo;
struct sk_buff *skb;
arinfo = container_of(ref, struct carl9170_tx_info, ref);
txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
rate_driver_data);
skb = container_of((void *) txinfo, struct sk_buff, cb);
ar = arinfo->ar;
if (WARN_ON_ONCE(!ar))
return;
BUILD_BUG_ON(
offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
memset(&txinfo->status.ack_signal, 0,
sizeof(struct ieee80211_tx_info) -
offsetof(struct ieee80211_tx_info, status.ack_signal));
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
if (!atomic_read(&ar->tx_ampdu_upload))
ar->tx_ampdu_schedule = true;
if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
struct _carl9170_tx_superframe *super;
super = (void *)skb->data;
txinfo->status.ampdu_len = super->s.rix;
txinfo->status.ampdu_ack_len = super->s.cnt;
} else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) &&
!(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
/*
* drop redundant tx_status reports:
*
* 1. ampdu_ack_len of the final tx_status does
* include the feedback of this particular frame.
*
* 2. tx_status_irqsafe only queues up to 128
* tx feedback reports and discards the rest.
*
* 3. minstrel_ht is picky, it only accepts
* reports of frames with the TX_STATUS_AMPDU flag.
*
* 4. mac80211 is not particularly interested in
* feedback either [CTL_REQ_TX_STATUS not set]
*/
ieee80211_free_txskb(ar->hw, skb);
return;
} else {
/*
* Either the frame transmission has failed or
* mac80211 requested tx status.
*/
}
}
skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
void carl9170_tx_get_skb(struct sk_buff *skb)
{
struct carl9170_tx_info *arinfo = (void *)
(IEEE80211_SKB_CB(skb))->rate_driver_data;
kref_get(&arinfo->ref);
}
int carl9170_tx_put_skb(struct sk_buff *skb)
{
struct carl9170_tx_info *arinfo = (void *)
(IEEE80211_SKB_CB(skb))->rate_driver_data;
return kref_put(&arinfo->ref, carl9170_tx_release);
}
/* Caller must hold the tid_info->lock & rcu_read_lock */
static void carl9170_tx_shift_bm(struct ar9170 *ar,
struct carl9170_sta_tid *tid_info, u16 seq)
{
u16 off;
off = SEQ_DIFF(seq, tid_info->bsn);
if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
return;
/*
* Sanity check. For each MPDU we set the bit in bitmap and
* clear it once we received the tx_status.
* But if the bit is already cleared then we've been bitten
* by a bug.
*/
WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
return;
if (!bitmap_empty(tid_info->bitmap, off))
off = find_first_bit(tid_info->bitmap, off);
tid_info->bsn += off;
tid_info->bsn &= 0x0fff;
bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
off, CARL9170_BAW_BITS);
}
static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct ieee80211_hdr *hdr = (void *) super->frame_data;
struct ieee80211_sta *sta;
struct carl9170_sta_info *sta_info;
struct carl9170_sta_tid *tid_info;
u8 tid;
if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
txinfo->flags & IEEE80211_TX_CTL_INJECTED)
return;
rcu_read_lock();
sta = __carl9170_get_tx_sta(ar, skb);
if (unlikely(!sta))
goto out_rcu;
tid = get_tid_h(hdr);
sta_info = (void *) sta->drv_priv;
tid_info = rcu_dereference(sta_info->agg[tid]);
if (!tid_info)
goto out_rcu;
spin_lock_bh(&tid_info->lock);
if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
if (sta_info->stats[tid].clear) {
sta_info->stats[tid].clear = false;
sta_info->stats[tid].req = false;
sta_info->stats[tid].ampdu_len = 0;
sta_info->stats[tid].ampdu_ack_len = 0;
}
sta_info->stats[tid].ampdu_len++;
if (txinfo->status.rates[0].count == 1)
sta_info->stats[tid].ampdu_ack_len++;
if (!(txinfo->flags & IEEE80211_TX_STAT_ACK))
sta_info->stats[tid].req = true;
if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
super->s.rix = sta_info->stats[tid].ampdu_len;
super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
if (sta_info->stats[tid].req)
txinfo->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
sta_info->stats[tid].clear = true;
}
spin_unlock_bh(&tid_info->lock);
out_rcu:
rcu_read_unlock();
}
static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct ieee80211_bar *bar = (void *) super->frame_data;
/*
* Unlike all other frames, the status report for BARs does
* not directly come from the hardware as it is incapable of
* matching a BA to a previously send BAR.
* Instead the RX-path will scan for incoming BAs and set the
* IEEE80211_TX_STAT_ACK if it sees one that was likely
* caused by a BAR from us.
*/
if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
struct carl9170_bar_list_entry *entry;
int queue = skb_get_queue_mapping(skb);
rcu_read_lock();
list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
if (entry->skb == skb) {
spin_lock_bh(&ar->bar_list_lock[queue]);
list_del_rcu(&entry->list);
spin_unlock_bh(&ar->bar_list_lock[queue]);
kfree_rcu(entry, head);
goto out;
}
}
WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
queue, bar->ra, bar->ta, bar->control,
bar->start_seq_num);
out:
rcu_read_unlock();
}
}
void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
const bool success)
{
struct ieee80211_tx_info *txinfo;
carl9170_tx_accounting_free(ar, skb);
txinfo = IEEE80211_SKB_CB(skb);
carl9170_tx_bar_status(ar, skb, txinfo);
if (success)
txinfo->flags |= IEEE80211_TX_STAT_ACK;
else
ar->tx_ack_failures++;
if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
carl9170_tx_status_process_ampdu(ar, skb, txinfo);
carl9170_tx_ps_unblock(ar, skb);
carl9170_tx_put_skb(skb);
}
/* This function may be called form any context */
void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
atomic_dec(&ar->tx_total_pending);
if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_upload);
if (carl9170_tx_put_skb(skb))
tasklet_hi_schedule(&ar->usb_tasklet);
}
static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
struct sk_buff_head *queue)
{
struct sk_buff *skb;
spin_lock_bh(&queue->lock);
skb_queue_walk(queue, skb) {
struct _carl9170_tx_superframe *txc = (void *) skb->data;
if (txc->s.cookie != cookie)
continue;
__skb_unlink(skb, queue);
spin_unlock_bh(&queue->lock);
carl9170_release_dev_space(ar, skb);
return skb;
}
spin_unlock_bh(&queue->lock);
return NULL;
}
static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
unsigned int tries, struct ieee80211_tx_info *txinfo)
{
unsigned int i;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (txinfo->status.rates[i].idx < 0)
break;
if (i == rix) {
txinfo->status.rates[i].count = tries;
i++;
break;
}
}
for (; i < IEEE80211_TX_MAX_RATES; i++) {
txinfo->status.rates[i].idx = -1;
txinfo->status.rates[i].count = 0;
}
}
static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
{
int i;
struct sk_buff *skb;
struct ieee80211_tx_info *txinfo;
struct carl9170_tx_info *arinfo;
bool restart = false;
for (i = 0; i < ar->hw->queues; i++) {
spin_lock_bh(&ar->tx_status[i].lock);
skb = skb_peek(&ar->tx_status[i]);
if (!skb)
goto next;
txinfo = IEEE80211_SKB_CB(skb);
arinfo = (void *) txinfo->rate_driver_data;
if (time_is_before_jiffies(arinfo->timeout +
msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
restart = true;
next:
spin_unlock_bh(&ar->tx_status[i].lock);
}
if (restart) {
/*
* At least one queue has been stuck for long enough.
* Give the device a kick and hope it gets back to
* work.
*
* possible reasons may include:
* - frames got lost/corrupted (bad connection to the device)
* - stalled rx processing/usb controller hiccups
* - firmware errors/bugs
* - every bug you can think of.
* - all bugs you can't...
* - ...
*/
carl9170_restart(ar, CARL9170_RR_STUCK_TX);
}
}
static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
{
struct carl9170_sta_tid *iter;
struct sk_buff *skb;
struct ieee80211_tx_info *txinfo;
struct carl9170_tx_info *arinfo;
struct ieee80211_sta *sta;
rcu_read_lock();
list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
if (iter->state < CARL9170_TID_STATE_IDLE)
continue;
spin_lock_bh(&iter->lock);
skb = skb_peek(&iter->queue);
if (!skb)
goto unlock;
txinfo = IEEE80211_SKB_CB(skb);
arinfo = (void *)txinfo->rate_driver_data;
if (time_is_after_jiffies(arinfo->timeout +
msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
goto unlock;
sta = __carl9170_get_tx_sta(ar, skb);
if (WARN_ON(!sta))
goto unlock;
ieee80211_stop_tx_ba_session(sta, iter->tid);
unlock:
spin_unlock_bh(&iter->lock);
}
rcu_read_unlock();
}
void carl9170_tx_janitor(struct work_struct *work)
{
struct ar9170 *ar = container_of(work, struct ar9170,
tx_janitor.work);
if (!IS_STARTED(ar))
return;
ar->tx_janitor_last_run = jiffies;
carl9170_check_queue_stop_timeout(ar);
carl9170_tx_ampdu_timeout(ar);
if (!atomic_read(&ar->tx_total_queued))
return;
ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
msecs_to_jiffies(CARL9170_TX_TIMEOUT));
}
static void __carl9170_tx_process_status(struct ar9170 *ar,
const uint8_t cookie, const uint8_t info)
{
struct sk_buff *skb;
struct ieee80211_tx_info *txinfo;
unsigned int r, t, q;
bool success = true;
q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
if (!skb) {
/*
* We have lost the race to another thread.
*/
return ;
}
txinfo = IEEE80211_SKB_CB(skb);
if (!(info & CARL9170_TX_STATUS_SUCCESS))
success = false;
r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
carl9170_tx_status(ar, skb, success);
}
void carl9170_tx_process_status(struct ar9170 *ar,
const struct carl9170_rsp *cmd)
{
unsigned int i;
for (i = 0; i < cmd->hdr.ext; i++) {
if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
(void *) cmd, cmd->hdr.len + 4);
break;
}
__carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
cmd->_tx_status[i].info);
}
}
static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate,
unsigned int *phyrate, unsigned int *tpc, unsigned int *chains)
{
struct ieee80211_rate *rate = NULL;
u8 *txpower;
unsigned int idx;
idx = txrate->idx;
*tpc = 0;
*phyrate = 0;
if (txrate->flags & IEEE80211_TX_RC_MCS) {
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
/* +1 dBm for HT40 */
*tpc += 2;
if (info->band == IEEE80211_BAND_2GHZ)
txpower = ar->power_2G_ht40;
else
txpower = ar->power_5G_ht40;
} else {
if (info->band == IEEE80211_BAND_2GHZ)
txpower = ar->power_2G_ht20;
else
txpower = ar->power_5G_ht20;
}
*phyrate = txrate->idx;
*tpc += txpower[idx & 7];
} else {
if (info->band == IEEE80211_BAND_2GHZ) {
if (idx < 4)
txpower = ar->power_2G_cck;
else
txpower = ar->power_2G_ofdm;
} else {
txpower = ar->power_5G_leg;
idx += 4;
}
rate = &__carl9170_ratetable[idx];
*tpc += txpower[(rate->hw_value & 0x30) >> 4];
*phyrate = rate->hw_value & 0xf;
}
if (ar->eeprom.tx_mask == 1) {
*chains = AR9170_TX_PHY_TXCHAIN_1;
} else {
if (!(txrate->flags & IEEE80211_TX_RC_MCS) &&
rate && rate->bitrate >= 360)
*chains = AR9170_TX_PHY_TXCHAIN_1;
else
*chains = AR9170_TX_PHY_TXCHAIN_2;
}
*tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2);
}
static __le32 carl9170_tx_physet(struct ar9170 *ar,
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
{
unsigned int power = 0, chains = 0, phyrate = 0;
__le32 tmp;
tmp = cpu_to_le32(0);
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
AR9170_TX_PHY_BW_S);
/* this works because 40 MHz is 2 and dup is 3 */
if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
AR9170_TX_PHY_BW_S);
if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
if (txrate->flags & IEEE80211_TX_RC_MCS) {
SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx);
/* heavy clip control */
tmp |= cpu_to_le32((txrate->idx & 0x7) <<
AR9170_TX_PHY_TX_HEAVY_CLIP_S);
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
/*
* green field preamble does not work.
*
* if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
* tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
*/
} else {
if (info->band == IEEE80211_BAND_2GHZ) {
if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
else
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
} else {
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
}
/*
* short preamble seems to be broken too.
*
* if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
* tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
*/
}
carl9170_tx_rate_tpc_chains(ar, info, txrate,
&phyrate, &power, &chains);
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate));
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power));
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains));
return tmp;
}
static bool carl9170_tx_rts_check(struct ar9170 *ar,
struct ieee80211_tx_rate *rate,
bool ampdu, bool multi)
{
switch (ar->erp_mode) {
case CARL9170_ERP_AUTO:
if (ampdu)
break;
case CARL9170_ERP_MAC80211:
if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
break;
case CARL9170_ERP_RTS:
if (likely(!multi))
return true;
default:
break;
}
return false;
}
static bool carl9170_tx_cts_check(struct ar9170 *ar,
struct ieee80211_tx_rate *rate)
{
switch (ar->erp_mode) {
case CARL9170_ERP_AUTO:
case CARL9170_ERP_MAC80211:
if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
break;
case CARL9170_ERP_CTS:
return true;
default:
break;
}
return false;
}
static int carl9170_tx_prepare(struct ar9170 *ar,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct _carl9170_tx_superframe *txc;
struct carl9170_vif_info *cvif;
struct ieee80211_tx_info *info;
struct ieee80211_tx_rate *txrate;
struct carl9170_tx_info *arinfo;
unsigned int hw_queue;
int i;
__le16 mac_tmp;
u16 len;
bool ampdu, no_ack;
BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
CARL9170_TX_SUPERDESC_LEN);
BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
AR9170_TX_HWDESC_LEN);
BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
((CARL9170_TX_SUPER_MISC_VIF_ID >>
CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
hdr = (void *)skb->data;
info = IEEE80211_SKB_CB(skb);
len = skb->len;
/*
* Note: If the frame was sent through a monitor interface,
* the ieee80211_vif pointer can be NULL.
*/
if (likely(info->control.vif))
cvif = (void *) info->control.vif->drv_priv;
else
cvif = NULL;
txc = (void *)skb_push(skb, sizeof(*txc));
memset(txc, 0, sizeof(*txc));
SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
if (likely(cvif))
SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id);
if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
AR9170_TX_MAC_BACKOFF);
mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
AR9170_TX_MAC_QOS);
no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
if (unlikely(no_ack))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
if (info->control.hw_key) {
len += info->control.hw_key->icv_len;
switch (info->control.hw_key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4);
break;
case WLAN_CIPHER_SUITE_CCMP:
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES);
break;
default:
WARN_ON(1);
goto err_out;
}
}
ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
if (ampdu) {
unsigned int density, factor;
if (unlikely(!sta || !cvif))
goto err_out;
factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
density = sta->ht_cap.ampdu_density;
if (density) {
/*
* Watch out!
*
* Otus uses slightly different density values than
* those from the 802.11n spec.
*/
density = max_t(unsigned int, density + 1, 7u);
}
SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
txc->s.ampdu_settings, density);
SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
txc->s.ampdu_settings, factor);
}
/*
* NOTE: For the first rate, the ERP & AMPDU flags are directly
* taken from mac_control. For all fallback rate, the firmware
* updates the mac_control flags from the rate info field.
*/
for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
__le32 phy_set;
txrate = &info->control.rates[i];
if (txrate->idx < 0)
break;
phy_set = carl9170_tx_physet(ar, info, txrate);
if (i == 0) {
/* first rate - part of the hw's frame header */
txc->f.phy_control = phy_set;
if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
else if (carl9170_tx_cts_check(ar, txrate))
mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
} else {
/* fallback rates are stored in the firmware's
* retry rate set array.
*/
txc->s.rr[i - 1] = phy_set;
}
SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
txrate->count);
if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
CARL9170_TX_SUPER_RI_ERP_PROT_S);
else if (carl9170_tx_cts_check(ar, txrate))
txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
CARL9170_TX_SUPER_RI_ERP_PROT_S);
if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
}
txc->s.len = cpu_to_le16(skb->len);
txc->f.length = cpu_to_le16(len + FCS_LEN);
txc->f.mac_control = mac_tmp;
arinfo = (void *)info->rate_driver_data;
arinfo->timeout = jiffies;
arinfo->ar = ar;
kref_init(&arinfo->ref);
return 0;
err_out:
skb_pull(skb, sizeof(*txc));
return -EINVAL;
}
static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super;
super = (void *) skb->data;
super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
}
static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super;
int tmp;
super = (void *) skb->data;
tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
CARL9170_TX_SUPER_AMPDU_DENSITY_S;
/*
* If you haven't noticed carl9170_tx_prepare has already filled
* in all ampdu spacing & factor parameters.
* Now it's the time to check whenever the settings have to be
* updated by the firmware, or if everything is still the same.
*
* There's no sane way to handle different density values with
* this hardware, so we may as well just do the compare in the
* driver.
*/
if (tmp != ar->current_density) {
ar->current_density = tmp;
super->s.ampdu_settings |=
CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
}
tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
CARL9170_TX_SUPER_AMPDU_FACTOR_S;
if (tmp != ar->current_factor) {
ar->current_factor = tmp;
super->s.ampdu_settings |=
CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
}
}
static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
struct sk_buff *_src)
{
struct _carl9170_tx_superframe *dest, *src;
dest = (void *) _dest->data;
src = (void *) _src->data;
/*
* The mac80211 rate control algorithm expects that all MPDUs in
* an AMPDU share the same tx vectors.
* This is not really obvious right now, because the hardware
* does the AMPDU setup according to its own rulebook.
* Our nicely assembled, strictly monotonic increasing mpdu
* chains will be broken up, mashed back together...
*/
return (dest->f.phy_control == src->f.phy_control);
}
static void carl9170_tx_ampdu(struct ar9170 *ar)
{
struct sk_buff_head agg;
struct carl9170_sta_tid *tid_info;
struct sk_buff *skb, *first;
unsigned int i = 0, done_ampdus = 0;
u16 seq, queue, tmpssn;
atomic_inc(&ar->tx_ampdu_scheduler);
ar->tx_ampdu_schedule = false;
if (atomic_read(&ar->tx_ampdu_upload))
return;
if (!ar->tx_ampdu_list_len)
return;
__skb_queue_head_init(&agg);
rcu_read_lock();
tid_info = rcu_dereference(ar->tx_ampdu_iter);
if (WARN_ON_ONCE(!tid_info)) {
rcu_read_unlock();
return;
}
retry:
list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
i++;
if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
continue;
queue = TID_TO_WME_AC(tid_info->tid);
spin_lock_bh(&tid_info->lock);
if (tid_info->state != CARL9170_TID_STATE_XMIT)
goto processed;
tid_info->counter++;
first = skb_peek(&tid_info->queue);
tmpssn = carl9170_get_seq(first);
seq = tid_info->snx;
if (unlikely(tmpssn != seq)) {
tid_info->state = CARL9170_TID_STATE_IDLE;
goto processed;
}
while ((skb = skb_peek(&tid_info->queue))) {
/* strict 0, 1, ..., n - 1, n frame sequence order */
if (unlikely(carl9170_get_seq(skb) != seq))
break;
/* don't upload more than AMPDU FACTOR allows. */
if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
(tid_info->max - 1)))
break;
if (!carl9170_tx_rate_check(ar, skb, first))
break;
atomic_inc(&ar->tx_ampdu_upload);
tid_info->snx = seq = SEQ_NEXT(seq);
__skb_unlink(skb, &tid_info->queue);
__skb_queue_tail(&agg, skb);
if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
break;
}
if (skb_queue_empty(&tid_info->queue) ||
carl9170_get_seq(skb_peek(&tid_info->queue)) !=
tid_info->snx) {
/*
* stop TID, if A-MPDU frames are still missing,
* or whenever the queue is empty.
*/
tid_info->state = CARL9170_TID_STATE_IDLE;
}
done_ampdus++;
processed:
spin_unlock_bh(&tid_info->lock);
if (skb_queue_empty(&agg))
continue;
/* apply ampdu spacing & factor settings */
carl9170_set_ampdu_params(ar, skb_peek(&agg));
/* set aggregation push bit */
carl9170_set_immba(ar, skb_peek_tail(&agg));
spin_lock_bh(&ar->tx_pending[queue].lock);
skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
spin_unlock_bh(&ar->tx_pending[queue].lock);
ar->tx_schedule = true;
}
if ((done_ampdus++ == 0) && (i++ == 0))
goto retry;
rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
rcu_read_unlock();
}
static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
struct sk_buff_head *queue)
{
struct sk_buff *skb;
struct ieee80211_tx_info *info;
struct carl9170_tx_info *arinfo;
BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
spin_lock_bh(&queue->lock);
skb = skb_peek(queue);
if (unlikely(!skb))
goto err_unlock;
if (carl9170_alloc_dev_space(ar, skb))
goto err_unlock;
__skb_unlink(skb, queue);
spin_unlock_bh(&queue->lock);
info = IEEE80211_SKB_CB(skb);
arinfo = (void *) info->rate_driver_data;
arinfo->timeout = jiffies;
return skb;
err_unlock:
spin_unlock_bh(&queue->lock);
return NULL;
}
void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super;
uint8_t q = 0;
ar->tx_dropped++;
super = (void *)skb->data;
SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
ar9170_qmap[carl9170_get_queue(ar, skb)]);
__carl9170_tx_process_status(ar, super->s.cookie, q);
}
static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_sta *sta;
struct carl9170_sta_info *sta_info;
struct ieee80211_tx_info *tx_info;
rcu_read_lock();
sta = __carl9170_get_tx_sta(ar, skb);
if (!sta)
goto out_rcu;
sta_info = (void *) sta->drv_priv;
tx_info = IEEE80211_SKB_CB(skb);
if (unlikely(sta_info->sleeping) &&
!(tx_info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER |
IEEE80211_TX_CTL_CLEAR_PS_FILT))) {
rcu_read_unlock();
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_upload);
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
carl9170_release_dev_space(ar, skb);
carl9170_tx_status(ar, skb, false);
return true;
}
out_rcu:
rcu_read_unlock();
return false;
}
static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct ieee80211_bar *bar = (void *) super->frame_data;
if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
skb->len >= sizeof(struct ieee80211_bar)) {
struct carl9170_bar_list_entry *entry;
unsigned int queue = skb_get_queue_mapping(skb);
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!WARN_ON_ONCE(!entry)) {
entry->skb = skb;
spin_lock_bh(&ar->bar_list_lock[queue]);
list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
spin_unlock_bh(&ar->bar_list_lock[queue]);
}
}
}
static void carl9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
unsigned int i, q;
bool schedule_garbagecollector = false;
ar->tx_schedule = false;
if (unlikely(!IS_STARTED(ar)))
return;
carl9170_usb_handle_tx_err(ar);
for (i = 0; i < ar->hw->queues; i++) {
while (!skb_queue_empty(&ar->tx_pending[i])) {
skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
if (unlikely(!skb))
break;
if (unlikely(carl9170_tx_ps_drop(ar, skb)))
continue;
carl9170_bar_check(ar, skb);
atomic_inc(&ar->tx_total_pending);
q = __carl9170_get_queue(ar, i);
/*
* NB: tx_status[i] vs. tx_status[q],
* TODO: Move into pick_skb or alloc_dev_space.
*/
skb_queue_tail(&ar->tx_status[q], skb);
/*
* increase ref count to "2".
* Ref counting is the easiest way to solve the
* race between the urb's completion routine:
* carl9170_tx_callback
* and wlan tx status functions:
* carl9170_tx_status/janitor.
*/
carl9170_tx_get_skb(skb);
carl9170_usb_tx(ar, skb);
schedule_garbagecollector = true;
}
}
if (!schedule_garbagecollector)
return;
ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
msecs_to_jiffies(CARL9170_TX_TIMEOUT));
}
static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
struct ieee80211_sta *sta, struct sk_buff *skb,
struct ieee80211_tx_info *txinfo)
{
struct carl9170_sta_info *sta_info;
struct carl9170_sta_tid *agg;
struct sk_buff *iter;
u16 tid, seq, qseq, off;
bool run = false;
tid = carl9170_get_tid(skb);
seq = carl9170_get_seq(skb);
sta_info = (void *) sta->drv_priv;
rcu_read_lock();
agg = rcu_dereference(sta_info->agg[tid]);
if (!agg)
goto err_unlock_rcu;
spin_lock_bh(&agg->lock);
if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
goto err_unlock;
/* check if sequence is within the BA window */
if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
goto err_unlock;
if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
goto err_unlock;
off = SEQ_DIFF(seq, agg->bsn);
if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
goto err_unlock;
if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
__skb_queue_tail(&agg->queue, skb);
agg->hsn = seq;
goto queued;
}
skb_queue_reverse_walk(&agg->queue, iter) {
qseq = carl9170_get_seq(iter);
if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
__skb_queue_after(&agg->queue, iter, skb);
goto queued;
}
}
__skb_queue_head(&agg->queue, skb);
queued:
if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
agg->state = CARL9170_TID_STATE_XMIT;
run = true;
}
}
spin_unlock_bh(&agg->lock);
rcu_read_unlock();
return run;
err_unlock:
spin_unlock_bh(&agg->lock);
err_unlock_rcu:
rcu_read_unlock();
txinfo->flags &= ~IEEE80211_TX_CTL_AMPDU;
carl9170_tx_status(ar, skb, false);
ar->tx_dropped++;
return false;
}
void carl9170_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
struct ieee80211_sta *sta = control->sta;
bool run;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
info = IEEE80211_SKB_CB(skb);
if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
goto err_free;
carl9170_tx_accounting(ar, skb);
/*
* from now on, one has to use carl9170_tx_status to free
* all ressouces which are associated with the frame.
*/
if (sta) {
struct carl9170_sta_info *stai = (void *) sta->drv_priv;
atomic_inc(&stai->pending_frames);
}
if (info->flags & IEEE80211_TX_CTL_AMPDU) {
/* to static code analyzers and reviewers:
* mac80211 guarantees that a valid "sta"
* reference is present, if a frame is to
* be part of an ampdu. Hence any extra
* sta == NULL checks are redundant in this
* special case.
*/
run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
if (run)
carl9170_tx_ampdu(ar);
} else {
unsigned int queue = skb_get_queue_mapping(skb);
skb_queue_tail(&ar->tx_pending[queue], skb);
}
carl9170_tx(ar);
return;
err_free:
ar->tx_dropped++;
ieee80211_free_txskb(ar->hw, skb);
}
void carl9170_tx_scheduler(struct ar9170 *ar)
{
if (ar->tx_ampdu_schedule)
carl9170_tx_ampdu(ar);
if (ar->tx_schedule)
carl9170_tx(ar);
}
/* caller has to take rcu_read_lock */
static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
{
struct carl9170_vif_info *cvif;
int i = 1;
/* The AR9170 hardware has no fancy beacon queue or some
* other scheduling mechanism. So, the driver has to make
* due by setting the two beacon timers (pretbtt and tbtt)
* once and then swapping the beacon address in the HW's
* register file each time the pretbtt fires.
*/
cvif = rcu_dereference(ar->beacon_iter);
if (ar->vifs > 0 && cvif) {
do {
list_for_each_entry_continue_rcu(cvif, &ar->vif_list,
list) {
if (cvif->active && cvif->enable_beacon)
goto out;
}
} while (ar->beacon_enabled && i--);
}
out:
rcu_assign_pointer(ar->beacon_iter, cvif);
return cvif;
}
static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb,
u32 *ht1, u32 *plcp)
{
struct ieee80211_tx_info *txinfo;
struct ieee80211_tx_rate *rate;
unsigned int power, chains;
bool ht_rate;
txinfo = IEEE80211_SKB_CB(skb);
rate = &txinfo->control.rates[0];
ht_rate = !!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS);
carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains);
*ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
if (chains == AR9170_TX_PHY_TXCHAIN_2)
*ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, *ht1, 7);
SET_VAL(AR9170_MAC_BCN_HT1_TPC, *ht1, power);
SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, *ht1, chains);
if (ht_rate) {
*ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
*plcp |= AR9170_MAC_BCN_HT2_SGI;
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
*ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
*plcp |= AR9170_MAC_BCN_HT2_BW40;
} else if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
*ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
*plcp |= AR9170_MAC_BCN_HT2_BW40;
}
SET_VAL(AR9170_MAC_BCN_HT2_LEN, *plcp, skb->len + FCS_LEN);
} else {
if (*plcp <= AR9170_TX_PHY_RATE_CCK_11M)
*plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
else
*plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
}
return ht_rate;
}
int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
{
struct sk_buff *skb = NULL;
struct carl9170_vif_info *cvif;
__le32 *data, *old = NULL;
u32 word, ht1, plcp, off, addr, len;
int i = 0, err = 0;
bool ht_rate;
rcu_read_lock();
cvif = carl9170_pick_beaconing_vif(ar);
if (!cvif)
goto out_unlock;
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
NULL, NULL);
if (!skb) {
err = -ENOMEM;
goto err_free;
}
spin_lock_bh(&ar->beacon_lock);
data = (__le32 *)skb->data;
if (cvif->beacon)
old = (__le32 *)cvif->beacon->data;
off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
addr = ar->fw.beacon_addr + off;
len = roundup(skb->len + FCS_LEN, 4);
if ((off + len) > ar->fw.beacon_max_len) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "beacon does not "
"fit into device memory!\n");
}
err = -EINVAL;
goto err_unlock;
}
if (len > AR9170_MAC_BCN_LENGTH_MAX) {
if (net_ratelimit()) {
wiphy_err(ar->hw->wiphy, "no support for beacons "
"bigger than %d (yours:%d).\n",
AR9170_MAC_BCN_LENGTH_MAX, len);
}
err = -EMSGSIZE;
goto err_unlock;
}
ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp);
carl9170_async_regwrite_begin(ar);
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
if (ht_rate)
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
else
carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
/*
* XXX: This accesses beyond skb data for up
* to the last 3 bytes!!
*/
if (old && (data[i] == old[i]))
continue;
word = le32_to_cpu(data[i]);
carl9170_async_regwrite(addr + 4 * i, word);
}
carl9170_async_regwrite_finish();
dev_kfree_skb_any(cvif->beacon);
cvif->beacon = NULL;
err = carl9170_async_regwrite_result();
if (!err)
cvif->beacon = skb;
spin_unlock_bh(&ar->beacon_lock);
if (err)
goto err_free;
if (submit) {
err = carl9170_bcn_ctrl(ar, cvif->id,
CARL9170_BCN_CTRL_CAB_TRIGGER,
addr, skb->len + FCS_LEN);
if (err)
goto err_free;
}
out_unlock:
rcu_read_unlock();
return 0;
err_unlock:
spin_unlock_bh(&ar->beacon_lock);
err_free:
rcu_read_unlock();
dev_kfree_skb_any(skb);
return err;
}
| gpl-2.0 |
Neph81/cm-kernel_lge_iproj | net/batman-adv/routing.c | 2374 | 41876 | /*
* Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*
*/
#include "main.h"
#include "routing.h"
#include "send.h"
#include "hash.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "icmp_socket.h"
#include "translation-table.h"
#include "originator.h"
#include "ring_buffer.h"
#include "vis.h"
#include "aggregation.h"
#include "gateway_common.h"
#include "gateway_client.h"
#include "unicast.h"
void slide_own_bcast_window(struct hard_iface *hard_iface)
{
struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
struct hashtable_t *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
unsigned long *word;
int i;
size_t word_index;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
word_index = hard_iface->if_num * NUM_WORDS;
word = &(orig_node->bcast_own[word_index]);
bit_get_packet(bat_priv, word, 1, 0);
orig_node->bcast_own_sum[hard_iface->if_num] =
bit_packet_count(word);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
}
rcu_read_unlock();
}
}
static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node,
unsigned char *tt_buff, int tt_buff_len)
{
if ((tt_buff_len != orig_node->tt_buff_len) ||
((tt_buff_len > 0) &&
(orig_node->tt_buff_len > 0) &&
(memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) {
if (orig_node->tt_buff_len > 0)
tt_global_del_orig(bat_priv, orig_node,
"originator changed tt");
if ((tt_buff_len > 0) && (tt_buff))
tt_global_add_orig(bat_priv, orig_node,
tt_buff, tt_buff_len);
}
}
static void update_route(struct bat_priv *bat_priv,
struct orig_node *orig_node,
struct neigh_node *neigh_node,
unsigned char *tt_buff, int tt_buff_len)
{
struct neigh_node *curr_router;
curr_router = orig_node_get_router(orig_node);
/* route deleted */
if ((curr_router) && (!neigh_node)) {
bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
orig_node->orig);
tt_global_del_orig(bat_priv, orig_node,
"originator timed out");
/* route added */
} else if ((!curr_router) && (neigh_node)) {
bat_dbg(DBG_ROUTES, bat_priv,
"Adding route towards: %pM (via %pM)\n",
orig_node->orig, neigh_node->addr);
tt_global_add_orig(bat_priv, orig_node,
tt_buff, tt_buff_len);
/* route changed */
} else {
bat_dbg(DBG_ROUTES, bat_priv,
"Changing route towards: %pM "
"(now via %pM - was via %pM)\n",
orig_node->orig, neigh_node->addr,
curr_router->addr);
}
if (curr_router)
neigh_node_free_ref(curr_router);
/* increase refcount of new best neighbor */
if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
neigh_node = NULL;
spin_lock_bh(&orig_node->neigh_list_lock);
rcu_assign_pointer(orig_node->router, neigh_node);
spin_unlock_bh(&orig_node->neigh_list_lock);
/* decrease refcount of previous best neighbor */
if (curr_router)
neigh_node_free_ref(curr_router);
}
void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
struct neigh_node *neigh_node, unsigned char *tt_buff,
int tt_buff_len)
{
struct neigh_node *router = NULL;
if (!orig_node)
goto out;
router = orig_node_get_router(orig_node);
if (router != neigh_node)
update_route(bat_priv, orig_node, neigh_node,
tt_buff, tt_buff_len);
/* may be just TT changed */
else
update_TT(bat_priv, orig_node, tt_buff, tt_buff_len);
out:
if (router)
neigh_node_free_ref(router);
}
static int is_bidirectional_neigh(struct orig_node *orig_node,
struct orig_node *orig_neigh_node,
struct batman_packet *batman_packet,
struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
struct hlist_node *node;
unsigned char total_count;
uint8_t orig_eq_count, neigh_rq_count, tq_own;
int tq_asym_penalty, ret = 0;
/* find corresponding one hop neighbor */
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_neigh_node->neigh_list, list) {
if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
continue;
if (tmp_neigh_node->if_incoming != if_incoming)
continue;
if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
continue;
neigh_node = tmp_neigh_node;
break;
}
rcu_read_unlock();
if (!neigh_node)
neigh_node = create_neighbor(orig_neigh_node,
orig_neigh_node,
orig_neigh_node->orig,
if_incoming);
if (!neigh_node)
goto out;
/* if orig_node is direct neighbour update neigh_node last_valid */
if (orig_node == orig_neigh_node)
neigh_node->last_valid = jiffies;
orig_node->last_valid = jiffies;
/* find packet count of corresponding one hop neighbor */
spin_lock_bh(&orig_node->ogm_cnt_lock);
orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
neigh_rq_count = neigh_node->real_packet_count;
spin_unlock_bh(&orig_node->ogm_cnt_lock);
/* pay attention to not get a value bigger than 100 % */
total_count = (orig_eq_count > neigh_rq_count ?
neigh_rq_count : orig_eq_count);
/* if we have too few packets (too less data) we set tq_own to zero */
/* if we receive too few packets it is not considered bidirectional */
if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
(neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
tq_own = 0;
else
/* neigh_node->real_packet_count is never zero as we
* only purge old information when getting new
* information */
tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
/*
* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
* affect the nearly-symmetric links only a little, but
* punishes asymmetric links more. This will give a value
* between 0 and TQ_MAX_VALUE
*/
tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
(TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
(TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
(TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
(TQ_LOCAL_WINDOW_SIZE *
TQ_LOCAL_WINDOW_SIZE *
TQ_LOCAL_WINDOW_SIZE);
batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
(TQ_MAX_VALUE * TQ_MAX_VALUE));
bat_dbg(DBG_BATMAN, bat_priv,
"bidirectional: "
"orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
"real recv = %2i, local tq: %3i, asym_penalty: %3i, "
"total tq: %3i\n",
orig_node->orig, orig_neigh_node->orig, total_count,
neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
/* if link has the minimum required transmission quality
* consider it bidirectional */
if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
ret = 1;
out:
if (neigh_node)
neigh_node_free_ref(neigh_node);
return ret;
}
/* caller must hold the neigh_list_lock */
void bonding_candidate_del(struct orig_node *orig_node,
struct neigh_node *neigh_node)
{
/* this neighbor is not part of our candidate list */
if (list_empty(&neigh_node->bonding_list))
goto out;
list_del_rcu(&neigh_node->bonding_list);
INIT_LIST_HEAD(&neigh_node->bonding_list);
neigh_node_free_ref(neigh_node);
atomic_dec(&orig_node->bond_candidates);
out:
return;
}
static void bonding_candidate_add(struct orig_node *orig_node,
struct neigh_node *neigh_node)
{
struct hlist_node *node;
struct neigh_node *tmp_neigh_node, *router = NULL;
uint8_t interference_candidate = 0;
spin_lock_bh(&orig_node->neigh_list_lock);
/* only consider if it has the same primary address ... */
if (!compare_eth(orig_node->orig,
neigh_node->orig_node->primary_addr))
goto candidate_del;
router = orig_node_get_router(orig_node);
if (!router)
goto candidate_del;
/* ... and is good enough to be considered */
if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
goto candidate_del;
/**
* check if we have another candidate with the same mac address or
* interface. If we do, we won't select this candidate because of
* possible interference.
*/
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
if (tmp_neigh_node == neigh_node)
continue;
/* we only care if the other candidate is even
* considered as candidate. */
if (list_empty(&tmp_neigh_node->bonding_list))
continue;
if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
(compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
interference_candidate = 1;
break;
}
}
/* don't care further if it is an interference candidate */
if (interference_candidate)
goto candidate_del;
/* this neighbor already is part of our candidate list */
if (!list_empty(&neigh_node->bonding_list))
goto out;
if (!atomic_inc_not_zero(&neigh_node->refcount))
goto out;
list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
atomic_inc(&orig_node->bond_candidates);
goto out;
candidate_del:
bonding_candidate_del(orig_node, neigh_node);
out:
spin_unlock_bh(&orig_node->neigh_list_lock);
if (router)
neigh_node_free_ref(router);
}
/* copy primary address for bonding */
static void bonding_save_primary(struct orig_node *orig_node,
struct orig_node *orig_neigh_node,
struct batman_packet *batman_packet)
{
if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
return;
memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
}
static void update_orig(struct bat_priv *bat_priv,
struct orig_node *orig_node,
struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
struct hard_iface *if_incoming,
unsigned char *tt_buff, int tt_buff_len,
char is_duplicate)
{
struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
struct neigh_node *router = NULL;
struct orig_node *orig_node_tmp;
struct hlist_node *node;
int tmp_tt_buff_len;
uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
"Searching and updating originator entry of received packet\n");
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
(tmp_neigh_node->if_incoming == if_incoming) &&
atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
if (neigh_node)
neigh_node_free_ref(neigh_node);
neigh_node = tmp_neigh_node;
continue;
}
if (is_duplicate)
continue;
spin_lock_bh(&tmp_neigh_node->tq_lock);
ring_buffer_set(tmp_neigh_node->tq_recv,
&tmp_neigh_node->tq_index, 0);
tmp_neigh_node->tq_avg =
ring_buffer_avg(tmp_neigh_node->tq_recv);
spin_unlock_bh(&tmp_neigh_node->tq_lock);
}
if (!neigh_node) {
struct orig_node *orig_tmp;
orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
if (!orig_tmp)
goto unlock;
neigh_node = create_neighbor(orig_node, orig_tmp,
ethhdr->h_source, if_incoming);
orig_node_free_ref(orig_tmp);
if (!neigh_node)
goto unlock;
} else
bat_dbg(DBG_BATMAN, bat_priv,
"Updating existing last-hop neighbor of originator\n");
rcu_read_unlock();
orig_node->flags = batman_packet->flags;
neigh_node->last_valid = jiffies;
spin_lock_bh(&neigh_node->tq_lock);
ring_buffer_set(neigh_node->tq_recv,
&neigh_node->tq_index,
batman_packet->tq);
neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
spin_unlock_bh(&neigh_node->tq_lock);
if (!is_duplicate) {
orig_node->last_ttl = batman_packet->ttl;
neigh_node->last_ttl = batman_packet->ttl;
}
bonding_candidate_add(orig_node, neigh_node);
tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ?
batman_packet->num_tt * ETH_ALEN : tt_buff_len);
/* if this neighbor already is our next hop there is nothing
* to change */
router = orig_node_get_router(orig_node);
if (router == neigh_node)
goto update_tt;
/* if this neighbor does not offer a better TQ we won't consider it */
if (router && (router->tq_avg > neigh_node->tq_avg))
goto update_tt;
/* if the TQ is the same and the link not more symetric we
* won't consider it either */
if (router && (neigh_node->tq_avg == router->tq_avg)) {
orig_node_tmp = router->orig_node;
spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
bcast_own_sum_orig =
orig_node_tmp->bcast_own_sum[if_incoming->if_num];
spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
orig_node_tmp = neigh_node->orig_node;
spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
bcast_own_sum_neigh =
orig_node_tmp->bcast_own_sum[if_incoming->if_num];
spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
if (bcast_own_sum_orig >= bcast_own_sum_neigh)
goto update_tt;
}
update_routes(bat_priv, orig_node, neigh_node,
tt_buff, tmp_tt_buff_len);
goto update_gw;
update_tt:
update_routes(bat_priv, orig_node, router,
tt_buff, tmp_tt_buff_len);
update_gw:
if (orig_node->gw_flags != batman_packet->gw_flags)
gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
orig_node->gw_flags = batman_packet->gw_flags;
/* restart gateway selection if fast or late switching was enabled */
if ((orig_node->gw_flags) &&
(atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
(atomic_read(&bat_priv->gw_sel_class) > 2))
gw_check_election(bat_priv, orig_node);
goto out;
unlock:
rcu_read_unlock();
out:
if (neigh_node)
neigh_node_free_ref(neigh_node);
if (router)
neigh_node_free_ref(router);
}
/* checks whether the host restarted and is in the protection time.
* returns:
* 0 if the packet is to be accepted
* 1 if the packet is to be ignored.
*/
static int window_protected(struct bat_priv *bat_priv,
int32_t seq_num_diff,
unsigned long *last_reset)
{
if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
|| (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
if (time_after(jiffies, *last_reset +
msecs_to_jiffies(RESET_PROTECTION_MS))) {
*last_reset = jiffies;
bat_dbg(DBG_BATMAN, bat_priv,
"old packet received, start protection\n");
return 0;
} else
return 1;
}
return 0;
}
/* processes a batman packet for all interfaces, adjusts the sequence number and
* finds out whether it is a duplicate.
* returns:
* 1 the packet is a duplicate
* 0 the packet has not yet been received
* -1 the packet is old and has been received while the seqno window
* was protected. Caller should drop it.
*/
static char count_real_packets(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct orig_node *orig_node;
struct neigh_node *tmp_neigh_node;
struct hlist_node *node;
char is_duplicate = 0;
int32_t seq_diff;
int need_update = 0;
int set_mark, ret = -1;
orig_node = get_orig_node(bat_priv, batman_packet->orig);
if (!orig_node)
return 0;
spin_lock_bh(&orig_node->ogm_cnt_lock);
seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
/* signalize caller that the packet is to be dropped. */
if (window_protected(bat_priv, seq_diff,
&orig_node->batman_seqno_reset))
goto out;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
orig_node->last_real_seqno,
batman_packet->seqno);
if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
(tmp_neigh_node->if_incoming == if_incoming))
set_mark = 1;
else
set_mark = 0;
/* if the window moved, set the update flag. */
need_update |= bit_get_packet(bat_priv,
tmp_neigh_node->real_bits,
seq_diff, set_mark);
tmp_neigh_node->real_packet_count =
bit_packet_count(tmp_neigh_node->real_bits);
}
rcu_read_unlock();
if (need_update) {
bat_dbg(DBG_BATMAN, bat_priv,
"updating last_seqno: old %d, new %d\n",
orig_node->last_real_seqno, batman_packet->seqno);
orig_node->last_real_seqno = batman_packet->seqno;
}
ret = is_duplicate;
out:
spin_unlock_bh(&orig_node->ogm_cnt_lock);
orig_node_free_ref(orig_node);
return ret;
}
void receive_bat_packet(struct ethhdr *ethhdr,
struct batman_packet *batman_packet,
unsigned char *tt_buff, int tt_buff_len,
struct hard_iface *if_incoming)
{
struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
struct hard_iface *hard_iface;
struct orig_node *orig_neigh_node, *orig_node;
struct neigh_node *router = NULL, *router_router = NULL;
struct neigh_node *orig_neigh_router = NULL;
char has_directlink_flag;
char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
char is_duplicate;
uint32_t if_incoming_seqno;
/* Silently drop when the batman packet is actually not a
* correct packet.
*
* This might happen if a packet is padded (e.g. Ethernet has a
* minimum frame length of 64 byte) and the aggregation interprets
* it as an additional length.
*
* TODO: A more sane solution would be to have a bit in the
* batman_packet to detect whether the packet is the last
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
if (batman_packet->packet_type != BAT_PACKET)
return;
/* could be changed by schedule_own_packet() */
if_incoming_seqno = atomic_read(&if_incoming->seqno);
has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
is_single_hop_neigh = (compare_eth(ethhdr->h_source,
batman_packet->orig) ? 1 : 0);
bat_dbg(DBG_BATMAN, bat_priv,
"Received BATMAN packet via NB: %pM, IF: %s [%pM] "
"(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
"TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->net_dev->name,
if_incoming->net_dev->dev_addr, batman_packet->orig,
batman_packet->prev_sender, batman_packet->seqno,
batman_packet->tq, batman_packet->ttl, batman_packet->version,
has_directlink_flag);
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
if (hard_iface->if_status != IF_ACTIVE)
continue;
if (hard_iface->soft_iface != if_incoming->soft_iface)
continue;
if (compare_eth(ethhdr->h_source,
hard_iface->net_dev->dev_addr))
is_my_addr = 1;
if (compare_eth(batman_packet->orig,
hard_iface->net_dev->dev_addr))
is_my_orig = 1;
if (compare_eth(batman_packet->prev_sender,
hard_iface->net_dev->dev_addr))
is_my_oldorig = 1;
if (compare_eth(ethhdr->h_source, broadcast_addr))
is_broadcast = 1;
}
rcu_read_unlock();
if (batman_packet->version != COMPAT_VERSION) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
batman_packet->version);
return;
}
if (is_my_addr) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: received my own broadcast (sender: %pM"
")\n",
ethhdr->h_source);
return;
}
if (is_broadcast) {
bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
"ignoring all packets with broadcast source addr (sender: %pM"
")\n", ethhdr->h_source);
return;
}
if (is_my_orig) {
unsigned long *word;
int offset;
orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
if (!orig_neigh_node)
return;
/* neighbor has to indicate direct link and it has to
* come via the corresponding interface */
/* if received seqno equals last send seqno save new
* seqno for bidirectional check */
if (has_directlink_flag &&
compare_eth(if_incoming->net_dev->dev_addr,
batman_packet->orig) &&
(batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
offset = if_incoming->if_num * NUM_WORDS;
spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
word = &(orig_neigh_node->bcast_own[offset]);
bit_mark(word, 0);
orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
bit_packet_count(word);
spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
}
bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
"originator packet from myself (via neighbor)\n");
orig_node_free_ref(orig_neigh_node);
return;
}
if (is_my_oldorig) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: ignoring all rebroadcast echos (sender: "
"%pM)\n", ethhdr->h_source);
return;
}
orig_node = get_orig_node(bat_priv, batman_packet->orig);
if (!orig_node)
return;
is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
if (is_duplicate == -1) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: packet within seqno protection time "
"(sender: %pM)\n", ethhdr->h_source);
goto out;
}
if (batman_packet->tq == 0) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: originator packet with tq equal 0\n");
goto out;
}
router = orig_node_get_router(orig_node);
if (router)
router_router = orig_node_get_router(router->orig_node);
/* avoid temporary routing loops */
if (router && router_router &&
(compare_eth(router->addr, batman_packet->prev_sender)) &&
!(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
(compare_eth(router->addr, router_router->addr))) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: ignoring all rebroadcast packets that "
"may make me loop (sender: %pM)\n", ethhdr->h_source);
goto out;
}
/* if sender is a direct neighbor the sender mac equals
* originator mac */
orig_neigh_node = (is_single_hop_neigh ?
orig_node :
get_orig_node(bat_priv, ethhdr->h_source));
if (!orig_neigh_node)
goto out;
orig_neigh_router = orig_node_get_router(orig_neigh_node);
/* drop packet if sender is not a direct neighbor and if we
* don't route towards it */
if (!is_single_hop_neigh && (!orig_neigh_router)) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: OGM via unknown neighbor!\n");
goto out_neigh;
}
is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
batman_packet, if_incoming);
bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
/* update ranking if it is not a duplicate or has the same
* seqno and similar ttl as the non-duplicate */
if (is_bidirectional &&
(!is_duplicate ||
((orig_node->last_real_seqno == batman_packet->seqno) &&
(orig_node->last_ttl - 3 <= batman_packet->ttl))))
update_orig(bat_priv, orig_node, ethhdr, batman_packet,
if_incoming, tt_buff, tt_buff_len, is_duplicate);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
/* mark direct link on incoming interface */
schedule_forward_packet(orig_node, ethhdr, batman_packet,
1, tt_buff_len, if_incoming);
bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
"rebroadcast neighbor packet with direct link flag\n");
goto out_neigh;
}
/* multihop originator */
if (!is_bidirectional) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: not received via bidirectional link\n");
goto out_neigh;
}
if (is_duplicate) {
bat_dbg(DBG_BATMAN, bat_priv,
"Drop packet: duplicate packet received\n");
goto out_neigh;
}
bat_dbg(DBG_BATMAN, bat_priv,
"Forwarding packet: rebroadcast originator packet\n");
schedule_forward_packet(orig_node, ethhdr, batman_packet,
0, tt_buff_len, if_incoming);
out_neigh:
if ((orig_neigh_node) && (!is_single_hop_neigh))
orig_node_free_ref(orig_neigh_node);
out:
if (router)
neigh_node_free_ref(router);
if (router_router)
neigh_node_free_ref(router_router);
if (orig_neigh_router)
neigh_node_free_ref(orig_neigh_router);
orig_node_free_ref(orig_node);
}
int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
{
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
return NET_RX_DROP;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with broadcast indication but unicast recipient */
if (!is_broadcast_ether_addr(ethhdr->h_dest))
return NET_RX_DROP;
/* packet with broadcast sender address */
if (is_broadcast_ether_addr(ethhdr->h_source))
return NET_RX_DROP;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, 0) < 0)
return NET_RX_DROP;
/* keep skb linear */
if (skb_linearize(skb) < 0)
return NET_RX_DROP;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
receive_aggr_bat_packet(ethhdr,
skb->data,
skb_headlen(skb),
hard_iface);
kfree_skb(skb);
return NET_RX_SUCCESS;
}
static int recv_my_icmp_packet(struct bat_priv *bat_priv,
struct sk_buff *skb, size_t icmp_len)
{
struct hard_iface *primary_if = NULL;
struct orig_node *orig_node = NULL;
struct neigh_node *router = NULL;
struct icmp_packet_rr *icmp_packet;
int ret = NET_RX_DROP;
icmp_packet = (struct icmp_packet_rr *)skb->data;
/* add data to device queue */
if (icmp_packet->msg_type != ECHO_REQUEST) {
bat_socket_receive_packet(icmp_packet, icmp_len);
goto out;
}
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* answer echo request (ping) */
/* get routing information */
orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
router = orig_node_get_router(orig_node);
if (!router)
goto out;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
goto out;
icmp_packet = (struct icmp_packet_rr *)skb->data;
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = ECHO_REPLY;
icmp_packet->ttl = TTL;
send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (primary_if)
hardif_free_ref(primary_if);
if (router)
neigh_node_free_ref(router);
if (orig_node)
orig_node_free_ref(orig_node);
return ret;
}
static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
struct sk_buff *skb)
{
struct hard_iface *primary_if = NULL;
struct orig_node *orig_node = NULL;
struct neigh_node *router = NULL;
struct icmp_packet *icmp_packet;
int ret = NET_RX_DROP;
icmp_packet = (struct icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */
if (icmp_packet->msg_type != ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to "
"%pM: ttl exceeded\n", icmp_packet->orig,
icmp_packet->dst);
goto out;
}
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* get routing information */
orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
router = orig_node_get_router(orig_node);
if (!router)
goto out;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
goto out;
icmp_packet = (struct icmp_packet *)skb->data;
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
icmp_packet->msg_type = TTL_EXCEEDED;
icmp_packet->ttl = TTL;
send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (primary_if)
hardif_free_ref(primary_if);
if (router)
neigh_node_free_ref(router);
if (orig_node)
orig_node_free_ref(orig_node);
return ret;
}
int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
struct orig_node *orig_node = NULL;
struct neigh_node *router = NULL;
int hdr_size = sizeof(struct icmp_packet);
int ret = NET_RX_DROP;
/**
* we truncate all incoming icmp packets if they don't match our size
*/
if (skb->len >= sizeof(struct icmp_packet_rr))
hdr_size = sizeof(struct icmp_packet_rr);
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
goto out;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */
if (is_broadcast_ether_addr(ethhdr->h_dest))
goto out;
/* packet with broadcast sender address */
if (is_broadcast_ether_addr(ethhdr->h_source))
goto out;
/* not for me */
if (!is_my_mac(ethhdr->h_dest))
goto out;
icmp_packet = (struct icmp_packet_rr *)skb->data;
/* add record route information if not full */
if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
(icmp_packet->rr_cur < BAT_RR_LEN)) {
memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
ethhdr->h_dest, ETH_ALEN);
icmp_packet->rr_cur++;
}
/* packet for me */
if (is_my_mac(icmp_packet->dst))
return recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */
if (icmp_packet->ttl < 2)
return recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
if (!orig_node)
goto out;
router = orig_node_get_router(orig_node);
if (!router)
goto out;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
goto out;
icmp_packet = (struct icmp_packet_rr *)skb->data;
/* decrement ttl */
icmp_packet->ttl--;
/* route it */
send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (router)
neigh_node_free_ref(router);
if (orig_node)
orig_node_free_ref(orig_node);
return ret;
}
/* In the bonding case, send the packets in a round
* robin fashion over the remaining interfaces.
*
* This method rotates the bonding list and increases the
* returned router's refcount. */
static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
struct hard_iface *recv_if)
{
struct neigh_node *tmp_neigh_node;
struct neigh_node *router = NULL, *first_candidate = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
bonding_list) {
if (!first_candidate)
first_candidate = tmp_neigh_node;
/* recv_if == NULL on the first node. */
if (tmp_neigh_node->if_incoming == recv_if)
continue;
if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
continue;
router = tmp_neigh_node;
break;
}
/* use the first candidate if nothing was found. */
if (!router && first_candidate &&
atomic_inc_not_zero(&first_candidate->refcount))
router = first_candidate;
if (!router)
goto out;
/* selected should point to the next element
* after the current router */
spin_lock_bh(&primary_orig->neigh_list_lock);
/* this is a list_move(), which unfortunately
* does not exist as rcu version */
list_del_rcu(&primary_orig->bond_list);
list_add_rcu(&primary_orig->bond_list,
&router->bonding_list);
spin_unlock_bh(&primary_orig->neigh_list_lock);
out:
rcu_read_unlock();
return router;
}
/* Interface Alternating: Use the best of the
* remaining candidates which are not using
* this interface.
*
* Increases the returned router's refcount */
static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
struct hard_iface *recv_if)
{
struct neigh_node *tmp_neigh_node;
struct neigh_node *router = NULL, *first_candidate = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
bonding_list) {
if (!first_candidate)
first_candidate = tmp_neigh_node;
/* recv_if == NULL on the first node. */
if (tmp_neigh_node->if_incoming == recv_if)
continue;
if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
continue;
/* if we don't have a router yet
* or this one is better, choose it. */
if ((!router) ||
(tmp_neigh_node->tq_avg > router->tq_avg)) {
/* decrement refcount of
* previously selected router */
if (router)
neigh_node_free_ref(router);
router = tmp_neigh_node;
atomic_inc_not_zero(&router->refcount);
}
neigh_node_free_ref(tmp_neigh_node);
}
/* use the first candidate if nothing was found. */
if (!router && first_candidate &&
atomic_inc_not_zero(&first_candidate->refcount))
router = first_candidate;
rcu_read_unlock();
return router;
}
/* find a suitable router for this originator, and use
* bonding if possible. increases the found neighbors
* refcount.*/
struct neigh_node *find_router(struct bat_priv *bat_priv,
struct orig_node *orig_node,
struct hard_iface *recv_if)
{
struct orig_node *primary_orig_node;
struct orig_node *router_orig;
struct neigh_node *router;
static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
int bonding_enabled;
if (!orig_node)
return NULL;
router = orig_node_get_router(orig_node);
if (!router)
goto err;
/* without bonding, the first node should
* always choose the default router. */
bonding_enabled = atomic_read(&bat_priv->bonding);
rcu_read_lock();
/* select default router to output */
router_orig = router->orig_node;
if (!router_orig)
goto err_unlock;
if ((!recv_if) && (!bonding_enabled))
goto return_router;
/* if we have something in the primary_addr, we can search
* for a potential bonding candidate. */
if (compare_eth(router_orig->primary_addr, zero_mac))
goto return_router;
/* find the orig_node which has the primary interface. might
* even be the same as our router_orig in many cases */
if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
primary_orig_node = router_orig;
} else {
primary_orig_node = orig_hash_find(bat_priv,
router_orig->primary_addr);
if (!primary_orig_node)
goto return_router;
orig_node_free_ref(primary_orig_node);
}
/* with less than 2 candidates, we can't do any
* bonding and prefer the original router. */
if (atomic_read(&primary_orig_node->bond_candidates) < 2)
goto return_router;
/* all nodes between should choose a candidate which
* is is not on the interface where the packet came
* in. */
neigh_node_free_ref(router);
if (bonding_enabled)
router = find_bond_router(primary_orig_node, recv_if);
else
router = find_ifalter_router(primary_orig_node, recv_if);
return_router:
rcu_read_unlock();
return router;
err_unlock:
rcu_read_unlock();
err:
if (router)
neigh_node_free_ref(router);
return NULL;
}
static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
{
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
return -1;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with unicast indication but broadcast recipient */
if (is_broadcast_ether_addr(ethhdr->h_dest))
return -1;
/* packet with broadcast sender address */
if (is_broadcast_ether_addr(ethhdr->h_source))
return -1;
/* not for me */
if (!is_my_mac(ethhdr->h_dest))
return -1;
return 0;
}
int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node = NULL;
struct neigh_node *neigh_node = NULL;
struct unicast_packet *unicast_packet;
struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
int ret = NET_RX_DROP;
struct sk_buff *new_skb;
unicast_packet = (struct unicast_packet *)skb->data;
/* TTL exceeded */
if (unicast_packet->ttl < 2) {
pr_debug("Warning - can't forward unicast packet from %pM to "
"%pM: ttl exceeded\n", ethhdr->h_source,
unicast_packet->dest);
goto out;
}
/* get routing information */
orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
if (!orig_node)
goto out;
/* find_router() increases neigh_nodes refcount if found. */
neigh_node = find_router(bat_priv, orig_node, recv_if);
if (!neigh_node)
goto out;
/* create a copy of the skb, if needed, to modify it. */
if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
goto out;
unicast_packet = (struct unicast_packet *)skb->data;
if (unicast_packet->packet_type == BAT_UNICAST &&
atomic_read(&bat_priv->fragmentation) &&
skb->len > neigh_node->if_incoming->net_dev->mtu) {
ret = frag_send_skb(skb, bat_priv,
neigh_node->if_incoming, neigh_node->addr);
goto out;
}
if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
goto out;
/* packet was buffered for late merge */
if (!new_skb) {
ret = NET_RX_SUCCESS;
goto out;
}
skb = new_skb;
unicast_packet = (struct unicast_packet *)skb->data;
}
/* decrement ttl */
unicast_packet->ttl--;
/* route it */
send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = NET_RX_SUCCESS;
out:
if (neigh_node)
neigh_node_free_ref(neigh_node);
if (orig_node)
orig_node_free_ref(orig_node);
return ret;
}
int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct unicast_packet *unicast_packet;
int hdr_size = sizeof(struct unicast_packet);
if (check_unicast_packet(skb, hdr_size) < 0)
return NET_RX_DROP;
unicast_packet = (struct unicast_packet *)skb->data;
/* packet for me */
if (is_my_mac(unicast_packet->dest)) {
interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
return NET_RX_SUCCESS;
}
return route_unicast_packet(skb, recv_if);
}
int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct unicast_frag_packet *unicast_packet;
int hdr_size = sizeof(struct unicast_frag_packet);
struct sk_buff *new_skb = NULL;
int ret;
if (check_unicast_packet(skb, hdr_size) < 0)
return NET_RX_DROP;
unicast_packet = (struct unicast_frag_packet *)skb->data;
/* packet for me */
if (is_my_mac(unicast_packet->dest)) {
ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
return NET_RX_DROP;
/* packet was buffered for late merge */
if (!new_skb)
return NET_RX_SUCCESS;
interface_rx(recv_if->soft_iface, new_skb, recv_if,
sizeof(struct unicast_packet));
return NET_RX_SUCCESS;
}
return route_unicast_packet(skb, recv_if);
}
int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
struct orig_node *orig_node = NULL;
struct bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
int hdr_size = sizeof(struct bcast_packet);
int ret = NET_RX_DROP;
int32_t seq_diff;
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
goto out;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* packet with broadcast indication but unicast recipient */
if (!is_broadcast_ether_addr(ethhdr->h_dest))
goto out;
/* packet with broadcast sender address */
if (is_broadcast_ether_addr(ethhdr->h_source))
goto out;
/* ignore broadcasts sent by myself */
if (is_my_mac(ethhdr->h_source))
goto out;
bcast_packet = (struct bcast_packet *)skb->data;
/* ignore broadcasts originated by myself */
if (is_my_mac(bcast_packet->orig))
goto out;
if (bcast_packet->ttl < 2)
goto out;
orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
if (!orig_node)
goto out;
spin_lock_bh(&orig_node->bcast_seqno_lock);
/* check whether the packet is a duplicate */
if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
ntohl(bcast_packet->seqno)))
goto spin_unlock;
seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
/* check whether the packet is old and the host just restarted. */
if (window_protected(bat_priv, seq_diff,
&orig_node->bcast_seqno_reset))
goto spin_unlock;
/* mark broadcast in flood history, update window position
* if required. */
if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
spin_unlock_bh(&orig_node->bcast_seqno_lock);
/* rebroadcast packet */
add_bcast_packet_to_list(bat_priv, skb);
/* broadcast for me */
interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
ret = NET_RX_SUCCESS;
goto out;
spin_unlock:
spin_unlock_bh(&orig_node->bcast_seqno_lock);
out:
if (orig_node)
orig_node_free_ref(orig_node);
return ret;
}
int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
{
struct vis_packet *vis_packet;
struct ethhdr *ethhdr;
struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
int hdr_size = sizeof(struct vis_packet);
/* keep skb linear */
if (skb_linearize(skb) < 0)
return NET_RX_DROP;
if (unlikely(!pskb_may_pull(skb, hdr_size)))
return NET_RX_DROP;
vis_packet = (struct vis_packet *)skb->data;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* not for me */
if (!is_my_mac(ethhdr->h_dest))
return NET_RX_DROP;
/* ignore own packets */
if (is_my_mac(vis_packet->vis_orig))
return NET_RX_DROP;
if (is_my_mac(vis_packet->sender_orig))
return NET_RX_DROP;
switch (vis_packet->vis_type) {
case VIS_TYPE_SERVER_SYNC:
receive_server_sync_packet(bat_priv, vis_packet,
skb_headlen(skb));
break;
case VIS_TYPE_CLIENT_UPDATE:
receive_client_update_packet(bat_priv, vis_packet,
skb_headlen(skb));
break;
default: /* ignore unknown packet */
break;
}
/* We take a copy of the data in the packet, so we should
always free the skbuf. */
return NET_RX_DROP;
}
| gpl-2.0 |
glewarne/Note2Core_v3_kernel_N710x | drivers/media/video/pwc/pwc-v4l.c | 2374 | 23726 | /* Linux driver for Philips webcam
USB and Video4Linux interface part.
(C) 1999-2004 Nemosoft Unv.
(C) 2004-2006 Luc Saillard (luc@saillard.org)
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
Please send bug reports and support requests to <luc@saillard.org>.
The decompression routines have been implemented by reverse-engineering the
Nemosoft binary pwcx module. Caveat emptor.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <asm/io.h>
#include "pwc.h"
static struct v4l2_queryctrl pwc_controls[] = {
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
.minimum = 0,
.maximum = 128,
.step = 1,
.default_value = 64,
},
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Contrast",
.minimum = 0,
.maximum = 64,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Saturation",
.minimum = -100,
.maximum = 100,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_GAMMA,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gamma",
.minimum = 0,
.maximum = 32,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Gain",
.minimum = 0,
.maximum = 256,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Gain",
.minimum = 0,
.maximum = 256,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto White Balance",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Shutter Speed (Exposure)",
.minimum = 0,
.maximum = 256,
.step = 1,
.default_value = 200,
},
{
.id = V4L2_CID_AUTOGAIN,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto Gain Enabled",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1,
},
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain Level",
.minimum = 0,
.maximum = 256,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_SAVE_USER,
.type = V4L2_CTRL_TYPE_BUTTON,
.name = "Save User Settings",
.minimum = 0,
.maximum = 0,
.step = 0,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_RESTORE_USER,
.type = V4L2_CTRL_TYPE_BUTTON,
.name = "Restore User Settings",
.minimum = 0,
.maximum = 0,
.step = 0,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_RESTORE_FACTORY,
.type = V4L2_CTRL_TYPE_BUTTON,
.name = "Restore Factory Settings",
.minimum = 0,
.maximum = 0,
.step = 0,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_COLOUR_MODE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Colour mode",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_AUTOCONTOUR,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Auto contour",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_CONTOUR,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Contour",
.minimum = 0,
.maximum = 63,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_BACKLIGHT,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Backlight compensation",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_FLICKERLESS,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Flickerless",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_PRIVATE_NOISE_REDUCTION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Noise reduction",
.minimum = 0,
.maximum = 3,
.step = 1,
.default_value = 0,
},
};
static void pwc_vidioc_fill_fmt(const struct pwc_device *pdev, struct v4l2_format *f)
{
memset(&f->fmt.pix, 0, sizeof(struct v4l2_pix_format));
f->fmt.pix.width = pdev->view.x;
f->fmt.pix.height = pdev->view.y;
f->fmt.pix.field = V4L2_FIELD_NONE;
if (pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
f->fmt.pix.bytesperline = (f->fmt.pix.width * 3)/2;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
} else {
/* vbandlength contains 4 lines ... */
f->fmt.pix.bytesperline = pdev->vbandlength/4;
f->fmt.pix.sizeimage = pdev->frame_size + sizeof(struct pwc_raw_frame);
if (DEVICE_USE_CODEC1(pdev->type))
f->fmt.pix.pixelformat = V4L2_PIX_FMT_PWC1;
else
f->fmt.pix.pixelformat = V4L2_PIX_FMT_PWC2;
}
PWC_DEBUG_IOCTL("pwc_vidioc_fill_fmt() "
"width=%d, height=%d, bytesperline=%d, sizeimage=%d, pixelformat=%c%c%c%c\n",
f->fmt.pix.width,
f->fmt.pix.height,
f->fmt.pix.bytesperline,
f->fmt.pix.sizeimage,
(f->fmt.pix.pixelformat)&255,
(f->fmt.pix.pixelformat>>8)&255,
(f->fmt.pix.pixelformat>>16)&255,
(f->fmt.pix.pixelformat>>24)&255);
}
/* ioctl(VIDIOC_TRY_FMT) */
static int pwc_vidioc_try_fmt(struct pwc_device *pdev, struct v4l2_format *f)
{
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
PWC_DEBUG_IOCTL("Bad video type must be V4L2_BUF_TYPE_VIDEO_CAPTURE\n");
return -EINVAL;
}
switch (f->fmt.pix.pixelformat) {
case V4L2_PIX_FMT_YUV420:
break;
case V4L2_PIX_FMT_PWC1:
if (DEVICE_USE_CODEC23(pdev->type)) {
PWC_DEBUG_IOCTL("codec1 is only supported for old pwc webcam\n");
return -EINVAL;
}
break;
case V4L2_PIX_FMT_PWC2:
if (DEVICE_USE_CODEC1(pdev->type)) {
PWC_DEBUG_IOCTL("codec23 is only supported for new pwc webcam\n");
return -EINVAL;
}
break;
default:
PWC_DEBUG_IOCTL("Unsupported pixel format\n");
return -EINVAL;
}
if (f->fmt.pix.width > pdev->view_max.x)
f->fmt.pix.width = pdev->view_max.x;
else if (f->fmt.pix.width < pdev->view_min.x)
f->fmt.pix.width = pdev->view_min.x;
if (f->fmt.pix.height > pdev->view_max.y)
f->fmt.pix.height = pdev->view_max.y;
else if (f->fmt.pix.height < pdev->view_min.y)
f->fmt.pix.height = pdev->view_min.y;
return 0;
}
/* ioctl(VIDIOC_SET_FMT) */
static int pwc_vidioc_set_fmt(struct pwc_device *pdev, struct v4l2_format *f)
{
int ret, fps, snapshot, compression, pixelformat;
ret = pwc_vidioc_try_fmt(pdev, f);
if (ret<0)
return ret;
pixelformat = f->fmt.pix.pixelformat;
compression = pdev->vcompression;
snapshot = 0;
fps = pdev->vframes;
if (f->fmt.pix.priv) {
compression = (f->fmt.pix.priv & PWC_QLT_MASK) >> PWC_QLT_SHIFT;
snapshot = !!(f->fmt.pix.priv & PWC_FPS_SNAPSHOT);
fps = (f->fmt.pix.priv & PWC_FPS_FRMASK) >> PWC_FPS_SHIFT;
if (fps == 0)
fps = pdev->vframes;
}
if (pixelformat != V4L2_PIX_FMT_YUV420 &&
pixelformat != V4L2_PIX_FMT_PWC1 &&
pixelformat != V4L2_PIX_FMT_PWC2)
return -EINVAL;
if (pdev->iso_init)
return -EBUSY;
PWC_DEBUG_IOCTL("Trying to set format to: width=%d height=%d fps=%d "
"compression=%d snapshot=%d format=%c%c%c%c\n",
f->fmt.pix.width, f->fmt.pix.height, fps,
compression, snapshot,
(pixelformat)&255,
(pixelformat>>8)&255,
(pixelformat>>16)&255,
(pixelformat>>24)&255);
ret = pwc_set_video_mode(pdev,
f->fmt.pix.width,
f->fmt.pix.height,
fps,
compression,
snapshot);
PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
if (ret)
return ret;
pdev->pixfmt = pixelformat;
pwc_vidioc_fill_fmt(pdev, f);
return 0;
}
static int pwc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
struct pwc_device *pdev = video_drvdata(file);
strcpy(cap->driver, PWC_NAME);
strlcpy(cap->card, vdev->name, sizeof(cap->card));
usb_make_path(pdev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->version = PWC_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING |
V4L2_CAP_READWRITE;
return 0;
}
static int pwc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
if (i->index) /* Only one INPUT is supported */
return -EINVAL;
strcpy(i->name, "usb");
return 0;
}
static int pwc_g_input(struct file *file, void *fh, unsigned int *i)
{
*i = 0;
return 0;
}
static int pwc_s_input(struct file *file, void *fh, unsigned int i)
{
return i ? -EINVAL : 0;
}
static int pwc_queryctrl(struct file *file, void *fh, struct v4l2_queryctrl *c)
{
int i, idx;
u32 id;
id = c->id;
if (id & V4L2_CTRL_FLAG_NEXT_CTRL) {
id &= V4L2_CTRL_ID_MASK;
id++;
idx = -1;
for (i = 0; i < ARRAY_SIZE(pwc_controls); i++) {
if (pwc_controls[i].id < id)
continue;
if (idx >= 0
&& pwc_controls[i].id > pwc_controls[idx].id)
continue;
idx = i;
}
if (idx < 0)
return -EINVAL;
memcpy(c, &pwc_controls[idx], sizeof pwc_controls[0]);
return 0;
}
for (i = 0; i < sizeof(pwc_controls) / sizeof(struct v4l2_queryctrl); i++) {
if (pwc_controls[i].id == c->id) {
PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYCTRL) found\n");
memcpy(c, &pwc_controls[i], sizeof(struct v4l2_queryctrl));
return 0;
}
}
return -EINVAL;
}
static int pwc_g_ctrl(struct file *file, void *fh, struct v4l2_control *c)
{
struct pwc_device *pdev = video_drvdata(file);
int ret;
switch (c->id) {
case V4L2_CID_BRIGHTNESS:
c->value = pwc_get_brightness(pdev);
if (c->value < 0)
return -EINVAL;
return 0;
case V4L2_CID_CONTRAST:
c->value = pwc_get_contrast(pdev);
if (c->value < 0)
return -EINVAL;
return 0;
case V4L2_CID_SATURATION:
ret = pwc_get_saturation(pdev, &c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_GAMMA:
c->value = pwc_get_gamma(pdev);
if (c->value < 0)
return -EINVAL;
return 0;
case V4L2_CID_RED_BALANCE:
ret = pwc_get_red_gain(pdev, &c->value);
if (ret < 0)
return -EINVAL;
c->value >>= 8;
return 0;
case V4L2_CID_BLUE_BALANCE:
ret = pwc_get_blue_gain(pdev, &c->value);
if (ret < 0)
return -EINVAL;
c->value >>= 8;
return 0;
case V4L2_CID_AUTO_WHITE_BALANCE:
ret = pwc_get_awb(pdev);
if (ret < 0)
return -EINVAL;
c->value = (ret == PWC_WB_MANUAL) ? 0 : 1;
return 0;
case V4L2_CID_GAIN:
ret = pwc_get_agc(pdev, &c->value);
if (ret < 0)
return -EINVAL;
c->value >>= 8;
return 0;
case V4L2_CID_AUTOGAIN:
ret = pwc_get_agc(pdev, &c->value);
if (ret < 0)
return -EINVAL;
c->value = (c->value < 0) ? 1 : 0;
return 0;
case V4L2_CID_EXPOSURE:
ret = pwc_get_shutter_speed(pdev, &c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_COLOUR_MODE:
ret = pwc_get_colour_mode(pdev, &c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_AUTOCONTOUR:
ret = pwc_get_contour(pdev, &c->value);
if (ret < 0)
return -EINVAL;
c->value = (c->value == -1 ? 1 : 0);
return 0;
case V4L2_CID_PRIVATE_CONTOUR:
ret = pwc_get_contour(pdev, &c->value);
if (ret < 0)
return -EINVAL;
c->value >>= 10;
return 0;
case V4L2_CID_PRIVATE_BACKLIGHT:
ret = pwc_get_backlight(pdev, &c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_FLICKERLESS:
ret = pwc_get_flicker(pdev, &c->value);
if (ret < 0)
return -EINVAL;
c->value = (c->value ? 1 : 0);
return 0;
case V4L2_CID_PRIVATE_NOISE_REDUCTION:
ret = pwc_get_dynamic_noise(pdev, &c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_SAVE_USER:
case V4L2_CID_PRIVATE_RESTORE_USER:
case V4L2_CID_PRIVATE_RESTORE_FACTORY:
return -EINVAL;
}
return -EINVAL;
}
static int pwc_s_ctrl(struct file *file, void *fh, struct v4l2_control *c)
{
struct pwc_device *pdev = video_drvdata(file);
int ret;
switch (c->id) {
case V4L2_CID_BRIGHTNESS:
c->value <<= 9;
ret = pwc_set_brightness(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_CONTRAST:
c->value <<= 10;
ret = pwc_set_contrast(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_SATURATION:
ret = pwc_set_saturation(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_GAMMA:
c->value <<= 11;
ret = pwc_set_gamma(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_RED_BALANCE:
c->value <<= 8;
ret = pwc_set_red_gain(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_BLUE_BALANCE:
c->value <<= 8;
ret = pwc_set_blue_gain(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_AUTO_WHITE_BALANCE:
c->value = (c->value == 0) ? PWC_WB_MANUAL : PWC_WB_AUTO;
ret = pwc_set_awb(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_EXPOSURE:
c->value <<= 8;
ret = pwc_set_shutter_speed(pdev, c->value ? 0 : 1, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_AUTOGAIN:
/* autogain off means nothing without a gain */
if (c->value == 0)
return 0;
ret = pwc_set_agc(pdev, c->value, 0);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_GAIN:
c->value <<= 8;
ret = pwc_set_agc(pdev, 0, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_SAVE_USER:
if (pwc_save_user(pdev))
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_RESTORE_USER:
if (pwc_restore_user(pdev))
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_RESTORE_FACTORY:
if (pwc_restore_factory(pdev))
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_COLOUR_MODE:
ret = pwc_set_colour_mode(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_AUTOCONTOUR:
c->value = (c->value == 1) ? -1 : 0;
ret = pwc_set_contour(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_CONTOUR:
c->value <<= 10;
ret = pwc_set_contour(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_BACKLIGHT:
ret = pwc_set_backlight(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
case V4L2_CID_PRIVATE_FLICKERLESS:
ret = pwc_set_flicker(pdev, c->value);
if (ret < 0)
return -EINVAL;
case V4L2_CID_PRIVATE_NOISE_REDUCTION:
ret = pwc_set_dynamic_noise(pdev, c->value);
if (ret < 0)
return -EINVAL;
return 0;
}
return -EINVAL;
}
static int pwc_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct pwc_device *pdev = video_drvdata(file);
/* We only support two format: the raw format, and YUV */
switch (f->index) {
case 0:
/* RAW format */
f->pixelformat = pdev->type <= 646 ? V4L2_PIX_FMT_PWC1 : V4L2_PIX_FMT_PWC2;
f->flags = V4L2_FMT_FLAG_COMPRESSED;
strlcpy(f->description, "Raw Philips Webcam", sizeof(f->description));
break;
case 1:
f->pixelformat = V4L2_PIX_FMT_YUV420;
strlcpy(f->description, "4:2:0, planar, Y-Cb-Cr", sizeof(f->description));
break;
default:
return -EINVAL;
}
return 0;
}
static int pwc_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
struct pwc_device *pdev = video_drvdata(file);
PWC_DEBUG_IOCTL("ioctl(VIDIOC_G_FMT) return size %dx%d\n",
pdev->image.x, pdev->image.y);
pwc_vidioc_fill_fmt(pdev, f);
return 0;
}
static int pwc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
struct pwc_device *pdev = video_drvdata(file);
return pwc_vidioc_try_fmt(pdev, f);
}
static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
{
struct pwc_device *pdev = video_drvdata(file);
return pwc_vidioc_set_fmt(pdev, f);
}
static int pwc_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
{
int nbuffers;
PWC_DEBUG_IOCTL("ioctl(VIDIOC_REQBUFS) count=%d\n", rb->count);
if (rb->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (rb->memory != V4L2_MEMORY_MMAP)
return -EINVAL;
nbuffers = rb->count;
if (nbuffers < 2)
nbuffers = 2;
else if (nbuffers > pwc_mbufs)
nbuffers = pwc_mbufs;
/* Force to use our # of buffers */
rb->count = pwc_mbufs;
return 0;
}
static int pwc_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct pwc_device *pdev = video_drvdata(file);
int index;
PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) index=%d\n", buf->index);
if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) Bad type\n");
return -EINVAL;
}
index = buf->index;
if (index < 0 || index >= pwc_mbufs) {
PWC_DEBUG_IOCTL("ioctl(VIDIOC_QUERYBUF) Bad index %d\n", buf->index);
return -EINVAL;
}
buf->m.offset = index * pdev->len_per_image;
if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
else
buf->bytesused = pdev->view.size;
buf->field = V4L2_FIELD_NONE;
buf->memory = V4L2_MEMORY_MMAP;
/*buf->flags = V4L2_BUF_FLAG_MAPPED;*/
buf->length = pdev->len_per_image;
PWC_DEBUG_READ("VIDIOC_QUERYBUF: index=%d\n", buf->index);
PWC_DEBUG_READ("VIDIOC_QUERYBUF: m.offset=%d\n", buf->m.offset);
PWC_DEBUG_READ("VIDIOC_QUERYBUF: bytesused=%d\n", buf->bytesused);
return 0;
}
static int pwc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
PWC_DEBUG_IOCTL("ioctl(VIDIOC_QBUF) index=%d\n", buf->index);
if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (buf->memory != V4L2_MEMORY_MMAP)
return -EINVAL;
if (buf->index >= pwc_mbufs)
return -EINVAL;
buf->flags |= V4L2_BUF_FLAG_QUEUED;
buf->flags &= ~V4L2_BUF_FLAG_DONE;
return 0;
}
static int pwc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
{
DECLARE_WAITQUEUE(wait, current);
struct pwc_device *pdev = video_drvdata(file);
int ret;
PWC_DEBUG_IOCTL("ioctl(VIDIOC_DQBUF)\n");
if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
add_wait_queue(&pdev->frameq, &wait);
while (pdev->full_frames == NULL) {
if (pdev->error_status) {
remove_wait_queue(&pdev->frameq, &wait);
set_current_state(TASK_RUNNING);
return -pdev->error_status;
}
if (signal_pending(current)) {
remove_wait_queue(&pdev->frameq, &wait);
set_current_state(TASK_RUNNING);
return -ERESTARTSYS;
}
mutex_unlock(&pdev->modlock);
schedule();
set_current_state(TASK_INTERRUPTIBLE);
mutex_lock(&pdev->modlock);
}
remove_wait_queue(&pdev->frameq, &wait);
set_current_state(TASK_RUNNING);
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: frame ready.\n");
/* Decompress data in pdev->images[pdev->fill_image] */
ret = pwc_handle_frame(pdev);
if (ret)
return -EFAULT;
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: after pwc_handle_frame\n");
buf->index = pdev->fill_image;
if (pdev->pixfmt != V4L2_PIX_FMT_YUV420)
buf->bytesused = pdev->frame_size + sizeof(struct pwc_raw_frame);
else
buf->bytesused = pdev->view.size;
buf->flags = V4L2_BUF_FLAG_MAPPED;
buf->field = V4L2_FIELD_NONE;
do_gettimeofday(&buf->timestamp);
buf->sequence = 0;
buf->memory = V4L2_MEMORY_MMAP;
buf->m.offset = pdev->fill_image * pdev->len_per_image;
buf->length = pdev->len_per_image;
pwc_next_image(pdev);
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->index=%d\n", buf->index);
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: buf->length=%d\n", buf->length);
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: m.offset=%d\n", buf->m.offset);
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: bytesused=%d\n", buf->bytesused);
PWC_DEBUG_IOCTL("VIDIOC_DQBUF: leaving\n");
return 0;
}
static int pwc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
return pwc_isoc_init(pdev);
}
static int pwc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
{
struct pwc_device *pdev = video_drvdata(file);
pwc_isoc_cleanup(pdev);
return 0;
}
static int pwc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct pwc_device *pdev = video_drvdata(file);
unsigned int i = 0, index = fsize->index;
if (fsize->pixel_format == V4L2_PIX_FMT_YUV420) {
for (i = 0; i < PSZ_MAX; i++) {
if (pdev->image_mask & (1UL << i)) {
if (!index--) {
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = pwc_image_sizes[i].x;
fsize->discrete.height = pwc_image_sizes[i].y;
return 0;
}
}
}
} else if (fsize->index == 0 &&
((fsize->pixel_format == V4L2_PIX_FMT_PWC1 && DEVICE_USE_CODEC1(pdev->type)) ||
(fsize->pixel_format == V4L2_PIX_FMT_PWC2 && DEVICE_USE_CODEC23(pdev->type)))) {
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = pdev->abs_max.x;
fsize->discrete.height = pdev->abs_max.y;
return 0;
}
return -EINVAL;
}
static int pwc_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *fival)
{
struct pwc_device *pdev = video_drvdata(file);
int size = -1;
unsigned int i;
for (i = 0; i < PSZ_MAX; i++) {
if (pwc_image_sizes[i].x == fival->width &&
pwc_image_sizes[i].y == fival->height) {
size = i;
break;
}
}
/* TODO: Support raw format */
if (size < 0 || fival->pixel_format != V4L2_PIX_FMT_YUV420)
return -EINVAL;
i = pwc_get_fps(pdev, fival->index, size);
if (!i)
return -EINVAL;
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fival->discrete.numerator = 1;
fival->discrete.denominator = i;
return 0;
}
static long pwc_default(struct file *file, void *fh, bool valid_prio,
int cmd, void *arg)
{
struct pwc_device *pdev = video_drvdata(file);
return pwc_ioctl(pdev, cmd, arg);
}
const struct v4l2_ioctl_ops pwc_ioctl_ops = {
.vidioc_querycap = pwc_querycap,
.vidioc_enum_input = pwc_enum_input,
.vidioc_g_input = pwc_g_input,
.vidioc_s_input = pwc_s_input,
.vidioc_enum_fmt_vid_cap = pwc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = pwc_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = pwc_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = pwc_try_fmt_vid_cap,
.vidioc_queryctrl = pwc_queryctrl,
.vidioc_g_ctrl = pwc_g_ctrl,
.vidioc_s_ctrl = pwc_s_ctrl,
.vidioc_reqbufs = pwc_reqbufs,
.vidioc_querybuf = pwc_querybuf,
.vidioc_qbuf = pwc_qbuf,
.vidioc_dqbuf = pwc_dqbuf,
.vidioc_streamon = pwc_streamon,
.vidioc_streamoff = pwc_streamoff,
.vidioc_enum_framesizes = pwc_enum_framesizes,
.vidioc_enum_frameintervals = pwc_enum_frameintervals,
.vidioc_default = pwc_default,
};
/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
| gpl-2.0 |
vmobi-d2vmu/android_kernel_samsung_d2vmu | drivers/video/aty/atyfb_base.c | 2374 | 111571 | /*
* ATI Frame Buffer Device Driver Core
*
* Copyright (C) 2004 Alex Kern <alex.kern@gmx.de>
* Copyright (C) 1997-2001 Geert Uytterhoeven
* Copyright (C) 1998 Bernd Harries
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
*
* This driver supports the following ATI graphics chips:
* - ATI Mach64
*
* To do: add support for
* - ATI Rage128 (from aty128fb.c)
* - ATI Radeon (from radeonfb.c)
*
* This driver is partly based on the PowerMac console driver:
*
* Copyright (C) 1996 Paul Mackerras
*
* and on the PowerMac ATI/mach64 display driver:
*
* Copyright (C) 1997 Michael AK Tesch
*
* with work by Jon Howell
* Harry AC Eaton
* Anthony Tong <atong@uiuc.edu>
*
* Generic LCD support written by Daniel Mantione, ported from 2.4.20 by Alex Kern
* Many Thanks to Ville Syrjälä for patches and fixing nasting 16 bit color bug.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*
* Many thanks to Nitya from ATI devrel for support and patience !
*/
/******************************************************************************
TODO:
- cursor support on all cards and all ramdacs.
- cursor parameters controlable via ioctl()s.
- guess PLL and MCLK based on the original PLL register values initialized
by Open Firmware (if they are initialized). BIOS is done
(Anyone with Mac to help with this?)
******************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/console.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/backlight.h>
#include <linux/reboot.h>
#include <linux/dmi.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <video/mach64.h>
#include "atyfb.h"
#include "ati_ids.h"
#ifdef __powerpc__
#include <asm/machdep.h>
#include <asm/prom.h>
#include "../macmodes.h"
#endif
#ifdef __sparc__
#include <asm/fbio.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#endif
#ifdef CONFIG_ADB_PMU
#include <linux/adb.h>
#include <linux/pmu.h>
#endif
#ifdef CONFIG_BOOTX_TEXT
#include <asm/btext.h>
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
/*
* Debug flags.
*/
#undef DEBUG
/*#define DEBUG*/
/* Make sure n * PAGE_SIZE is protected at end of Aperture for GUI-regs */
/* - must be large enough to catch all GUI-Regs */
/* - must be aligned to a PAGE boundary */
#define GUI_RESERVE (1 * PAGE_SIZE)
/* FIXME: remove the FAIL definition */
#define FAIL(msg) do { \
if (!(var->activate & FB_ACTIVATE_TEST)) \
printk(KERN_CRIT "atyfb: " msg "\n"); \
return -EINVAL; \
} while (0)
#define FAIL_MAX(msg, x, _max_) do { \
if (x > _max_) { \
if (!(var->activate & FB_ACTIVATE_TEST)) \
printk(KERN_CRIT "atyfb: " msg " %x(%x)\n", x, _max_); \
return -EINVAL; \
} \
} while (0)
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(KERN_DEBUG "atyfb: " fmt, ## args)
#else
#define DPRINTK(fmt, args...)
#endif
#define PRINTKI(fmt, args...) printk(KERN_INFO "atyfb: " fmt, ## args)
#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
#if defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || \
defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_FB_ATY_BACKLIGHT)
static const u32 lt_lcd_regs[] = {
CNFG_PANEL_LG,
LCD_GEN_CNTL_LG,
DSTN_CONTROL_LG,
HFB_PITCH_ADDR_LG,
HORZ_STRETCHING_LG,
VERT_STRETCHING_LG,
0, /* EXT_VERT_STRETCH */
LT_GIO_LG,
POWER_MANAGEMENT_LG
};
void aty_st_lcd(int index, u32 val, const struct atyfb_par *par)
{
if (M64_HAS(LT_LCD_REGS)) {
aty_st_le32(lt_lcd_regs[index], val, par);
} else {
unsigned long temp;
/* write addr byte */
temp = aty_ld_le32(LCD_INDEX, par);
aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par);
/* write the register value */
aty_st_le32(LCD_DATA, val, par);
}
}
u32 aty_ld_lcd(int index, const struct atyfb_par *par)
{
if (M64_HAS(LT_LCD_REGS)) {
return aty_ld_le32(lt_lcd_regs[index], par);
} else {
unsigned long temp;
/* write addr byte */
temp = aty_ld_le32(LCD_INDEX, par);
aty_st_le32(LCD_INDEX, (temp & ~LCD_INDEX_MASK) | index, par);
/* read the register value */
return aty_ld_le32(LCD_DATA, par);
}
}
#endif /* defined(CONFIG_PM) || defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
#ifdef CONFIG_FB_ATY_GENERIC_LCD
/*
* ATIReduceRatio --
*
* Reduce a fraction by factoring out the largest common divider of the
* fraction's numerator and denominator.
*/
static void ATIReduceRatio(int *Numerator, int *Denominator)
{
int Multiplier, Divider, Remainder;
Multiplier = *Numerator;
Divider = *Denominator;
while ((Remainder = Multiplier % Divider)) {
Multiplier = Divider;
Divider = Remainder;
}
*Numerator /= Divider;
*Denominator /= Divider;
}
#endif
/*
* The Hardware parameters for each card
*/
struct pci_mmap_map {
unsigned long voff;
unsigned long poff;
unsigned long size;
unsigned long prot_flag;
unsigned long prot_mask;
};
static struct fb_fix_screeninfo atyfb_fix __devinitdata = {
.id = "ATY Mach64",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.xpanstep = 8,
.ypanstep = 1,
};
/*
* Frame buffer device API
*/
static int atyfb_open(struct fb_info *info, int user);
static int atyfb_release(struct fb_info *info, int user);
static int atyfb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info);
static int atyfb_set_par(struct fb_info *info);
static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info);
static int atyfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
static int atyfb_blank(int blank, struct fb_info *info);
static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg);
#ifdef __sparc__
static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma);
#endif
static int atyfb_sync(struct fb_info *info);
/*
* Internal routines
*/
static int aty_init(struct fb_info *info);
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var,
struct crtc *crtc);
static int aty_crtc_to_var(const struct crtc *crtc,
struct fb_var_screeninfo *var);
static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info);
#ifdef CONFIG_PPC
static int read_aty_sense(const struct atyfb_par *par);
#endif
static DEFINE_MUTEX(reboot_lock);
static struct fb_info *reboot_info;
/*
* Interface used by the world
*/
static struct fb_var_screeninfo default_var = {
/* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */
640, 480, 640, 480, 0, 0, 8, 0,
{0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0},
0, 0, -1, -1, 0, 39722, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED
};
static struct fb_videomode defmode = {
/* 640x480 @ 60 Hz, 31.5 kHz hsync */
NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
0, FB_VMODE_NONINTERLACED
};
static struct fb_ops atyfb_ops = {
.owner = THIS_MODULE,
.fb_open = atyfb_open,
.fb_release = atyfb_release,
.fb_check_var = atyfb_check_var,
.fb_set_par = atyfb_set_par,
.fb_setcolreg = atyfb_setcolreg,
.fb_pan_display = atyfb_pan_display,
.fb_blank = atyfb_blank,
.fb_ioctl = atyfb_ioctl,
.fb_fillrect = atyfb_fillrect,
.fb_copyarea = atyfb_copyarea,
.fb_imageblit = atyfb_imageblit,
#ifdef __sparc__
.fb_mmap = atyfb_mmap,
#endif
.fb_sync = atyfb_sync,
};
static int noaccel;
#ifdef CONFIG_MTRR
static int nomtrr;
#endif
static int vram;
static int pll;
static int mclk;
static int xclk;
static int comp_sync __devinitdata = -1;
static char *mode;
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight __devinitdata = 1;
#else
static int backlight __devinitdata = 0;
#endif
#ifdef CONFIG_PPC
static int default_vmode __devinitdata = VMODE_CHOOSE;
static int default_cmode __devinitdata = CMODE_CHOOSE;
module_param_named(vmode, default_vmode, int, 0);
MODULE_PARM_DESC(vmode, "int: video mode for mac");
module_param_named(cmode, default_cmode, int, 0);
MODULE_PARM_DESC(cmode, "int: color mode for mac");
#endif
#ifdef CONFIG_ATARI
static unsigned int mach64_count __devinitdata = 0;
static unsigned long phys_vmembase[FB_MAX] __devinitdata = { 0, };
static unsigned long phys_size[FB_MAX] __devinitdata = { 0, };
static unsigned long phys_guiregbase[FB_MAX] __devinitdata = { 0, };
#endif
/* top -> down is an evolution of mach64 chipset, any corrections? */
#define ATI_CHIP_88800GX (M64F_GX)
#define ATI_CHIP_88800CX (M64F_GX)
#define ATI_CHIP_264CT (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO)
#define ATI_CHIP_264ET (M64F_CT | M64F_INTEGRATED | M64F_CT_BUS | M64F_MAGIC_FIFO)
#define ATI_CHIP_264VT (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_MAGIC_FIFO)
#define ATI_CHIP_264GT (M64F_GT | M64F_INTEGRATED | M64F_MAGIC_FIFO | M64F_EXTRA_BRIGHT)
#define ATI_CHIP_264VTB (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP)
#define ATI_CHIP_264VT3 (M64F_VT | M64F_INTEGRATED | M64F_VT_BUS | M64F_GTB_DSP | M64F_SDRAM_MAGIC_PLL)
#define ATI_CHIP_264VT4 (M64F_VT | M64F_INTEGRATED | M64F_GTB_DSP)
/* FIXME what is this chip? */
#define ATI_CHIP_264LT (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP)
/* make sets shorter */
#define ATI_MODERN_SET (M64F_GT | M64F_INTEGRATED | M64F_GTB_DSP | M64F_EXTRA_BRIGHT)
#define ATI_CHIP_264GTB (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL)
/*#define ATI_CHIP_264GTDVD ?*/
#define ATI_CHIP_264LTG (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL)
#define ATI_CHIP_264GT2C (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE)
#define ATI_CHIP_264GTPRO (ATI_MODERN_SET | M64F_SDRAM_MAGIC_PLL | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264LTPRO (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D)
#define ATI_CHIP_264XL (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM)
#define ATI_CHIP_MOBILITY (ATI_MODERN_SET | M64F_HW_TRIPLE | M64F_FIFO_32 | M64F_RESET_3D | M64F_XL_DLL | M64F_MFB_FORCE_4 | M64F_XL_MEM | M64F_MOBIL_BUS)
static struct {
u16 pci_id;
const char *name;
int pll, mclk, xclk, ecp_max;
u32 features;
} aty_chips[] __devinitdata = {
#ifdef CONFIG_FB_ATY_GX
/* Mach64 GX */
{ PCI_CHIP_MACH64GX, "ATI888GX00 (Mach64 GX)", 135, 50, 50, 0, ATI_CHIP_88800GX },
{ PCI_CHIP_MACH64CX, "ATI888CX00 (Mach64 CX)", 135, 50, 50, 0, ATI_CHIP_88800CX },
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
{ PCI_CHIP_MACH64CT, "ATI264CT (Mach64 CT)", 135, 60, 60, 0, ATI_CHIP_264CT },
{ PCI_CHIP_MACH64ET, "ATI264ET (Mach64 ET)", 135, 60, 60, 0, ATI_CHIP_264ET },
/* FIXME what is this chip? */
{ PCI_CHIP_MACH64LT, "ATI264LT (Mach64 LT)", 135, 63, 63, 0, ATI_CHIP_264LT },
{ PCI_CHIP_MACH64VT, "ATI264VT (Mach64 VT)", 170, 67, 67, 80, ATI_CHIP_264VT },
{ PCI_CHIP_MACH64GT, "3D RAGE (Mach64 GT)", 135, 63, 63, 80, ATI_CHIP_264GT },
{ PCI_CHIP_MACH64VU, "ATI264VT3 (Mach64 VU)", 200, 67, 67, 80, ATI_CHIP_264VT3 },
{ PCI_CHIP_MACH64GU, "3D RAGE II+ (Mach64 GU)", 200, 67, 67, 100, ATI_CHIP_264GTB },
{ PCI_CHIP_MACH64LG, "3D RAGE LT (Mach64 LG)", 230, 63, 63, 100, ATI_CHIP_264LTG | M64F_LT_LCD_REGS | M64F_G3_PB_1024x768 },
{ PCI_CHIP_MACH64VV, "ATI264VT4 (Mach64 VV)", 230, 83, 83, 100, ATI_CHIP_264VT4 },
{ PCI_CHIP_MACH64GV, "3D RAGE IIC (Mach64 GV, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
{ PCI_CHIP_MACH64GW, "3D RAGE IIC (Mach64 GW, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
{ PCI_CHIP_MACH64GY, "3D RAGE IIC (Mach64 GY, PCI)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
{ PCI_CHIP_MACH64GZ, "3D RAGE IIC (Mach64 GZ, AGP)", 230, 83, 83, 100, ATI_CHIP_264GT2C },
{ PCI_CHIP_MACH64GB, "3D RAGE PRO (Mach64 GB, BGA, AGP)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
{ PCI_CHIP_MACH64GD, "3D RAGE PRO (Mach64 GD, BGA, AGP 1x)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
{ PCI_CHIP_MACH64GI, "3D RAGE PRO (Mach64 GI, BGA, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO | M64F_MAGIC_VRAM_SIZE },
{ PCI_CHIP_MACH64GP, "3D RAGE PRO (Mach64 GP, PQFP, PCI)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
{ PCI_CHIP_MACH64GQ, "3D RAGE PRO (Mach64 GQ, PQFP, PCI, limited 3D)", 230, 100, 100, 125, ATI_CHIP_264GTPRO },
{ PCI_CHIP_MACH64LB, "3D RAGE LT PRO (Mach64 LB, AGP)", 236, 75, 100, 135, ATI_CHIP_264LTPRO },
{ PCI_CHIP_MACH64LD, "3D RAGE LT PRO (Mach64 LD, AGP)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
{ PCI_CHIP_MACH64LI, "3D RAGE LT PRO (Mach64 LI, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1_1 | M64F_G3_PB_1024x768 },
{ PCI_CHIP_MACH64LP, "3D RAGE LT PRO (Mach64 LP, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO | M64F_G3_PB_1024x768 },
{ PCI_CHIP_MACH64LQ, "3D RAGE LT PRO (Mach64 LQ, PCI)", 230, 100, 100, 135, ATI_CHIP_264LTPRO },
{ PCI_CHIP_MACH64GM, "3D RAGE XL (Mach64 GM, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64GN, "3D RAGE XC (Mach64 GN, AGP 2x)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64GO, "3D RAGE XL (Mach64 GO, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64GL, "3D RAGE XC (Mach64 GL, PCI-66)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64GR, "3D RAGE XL (Mach64 GR, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL | M64F_SDRAM_MAGIC_PLL },
{ PCI_CHIP_MACH64GS, "3D RAGE XC (Mach64 GS, PCI-33)", 230, 83, 63, 135, ATI_CHIP_264XL },
{ PCI_CHIP_MACH64LM, "3D RAGE Mobility P/M (Mach64 LM, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
{ PCI_CHIP_MACH64LN, "3D RAGE Mobility L (Mach64 LN, AGP 2x)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
{ PCI_CHIP_MACH64LR, "3D RAGE Mobility P/M (Mach64 LR, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
{ PCI_CHIP_MACH64LS, "3D RAGE Mobility L (Mach64 LS, PCI)", 230, 83, 125, 135, ATI_CHIP_MOBILITY },
#endif /* CONFIG_FB_ATY_CT */
};
static int __devinit correct_chipset(struct atyfb_par *par)
{
u8 rev;
u16 type;
u32 chip_id;
const char *name;
int i;
for (i = ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
if (par->pci_id == aty_chips[i].pci_id)
break;
if (i < 0)
return -ENODEV;
name = aty_chips[i].name;
par->pll_limits.pll_max = aty_chips[i].pll;
par->pll_limits.mclk = aty_chips[i].mclk;
par->pll_limits.xclk = aty_chips[i].xclk;
par->pll_limits.ecp_max = aty_chips[i].ecp_max;
par->features = aty_chips[i].features;
chip_id = aty_ld_le32(CNFG_CHIP_ID, par);
type = chip_id & CFG_CHIP_TYPE;
rev = (chip_id & CFG_CHIP_REV) >> 24;
switch (par->pci_id) {
#ifdef CONFIG_FB_ATY_GX
case PCI_CHIP_MACH64GX:
if (type != 0x00d7)
return -ENODEV;
break;
case PCI_CHIP_MACH64CX:
if (type != 0x0057)
return -ENODEV;
break;
#endif
#ifdef CONFIG_FB_ATY_CT
case PCI_CHIP_MACH64VT:
switch (rev & 0x07) {
case 0x00:
switch (rev & 0xc0) {
case 0x00:
name = "ATI264VT (A3) (Mach64 VT)";
par->pll_limits.pll_max = 170;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264VT;
break;
case 0x40:
name = "ATI264VT2 (A4) (Mach64 VT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264VT | M64F_MAGIC_POSTDIV;
break;
}
break;
case 0x01:
name = "ATI264VT3 (B1) (Mach64 VT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264VTB;
break;
case 0x02:
name = "ATI264VT3 (B2) (Mach64 VT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264VT3;
break;
}
break;
case PCI_CHIP_MACH64GT:
switch (rev & 0x07) {
case 0x01:
name = "3D RAGE II (Mach64 GT)";
par->pll_limits.pll_max = 170;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 80;
par->features = ATI_CHIP_264GTB;
break;
case 0x02:
name = "3D RAGE II+ (Mach64 GT)";
par->pll_limits.pll_max = 200;
par->pll_limits.mclk = 67;
par->pll_limits.xclk = 67;
par->pll_limits.ecp_max = 100;
par->features = ATI_CHIP_264GTB;
break;
}
break;
#endif
}
PRINTKI("%s [0x%04x rev 0x%02x]\n", name, type, rev);
return 0;
}
static char ram_dram[] __devinitdata = "DRAM";
static char ram_resv[] __devinitdata = "RESV";
#ifdef CONFIG_FB_ATY_GX
static char ram_vram[] __devinitdata = "VRAM";
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
static char ram_edo[] __devinitdata = "EDO";
static char ram_sdram[] __devinitdata = "SDRAM (1:1)";
static char ram_sgram[] __devinitdata = "SGRAM (1:1)";
static char ram_sdram32[] __devinitdata = "SDRAM (2:1) (32-bit)";
static char ram_wram[] __devinitdata = "WRAM";
static char ram_off[] __devinitdata = "OFF";
#endif /* CONFIG_FB_ATY_CT */
#ifdef CONFIG_FB_ATY_GX
static char *aty_gx_ram[8] __devinitdata = {
ram_dram, ram_vram, ram_vram, ram_dram,
ram_dram, ram_vram, ram_vram, ram_resv
};
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
static char *aty_ct_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_wram, ram_resv
};
static char *aty_xl_ram[8] __devinitdata = {
ram_off, ram_dram, ram_edo, ram_edo,
ram_sdram, ram_sgram, ram_sdram32, ram_resv
};
#endif /* CONFIG_FB_ATY_CT */
static u32 atyfb_get_pixclock(struct fb_var_screeninfo *var,
struct atyfb_par *par)
{
u32 pixclock = var->pixclock;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
u32 lcd_on_off;
par->pll.ct.xres = 0;
if (par->lcd_table != 0) {
lcd_on_off = aty_ld_lcd(LCD_GEN_CNTL, par);
if (lcd_on_off & LCD_ON) {
par->pll.ct.xres = var->xres;
pixclock = par->lcd_pixclock;
}
}
#endif
return pixclock;
}
#if defined(CONFIG_PPC)
/*
* Apple monitor sense
*/
static int __devinit read_aty_sense(const struct atyfb_par *par)
{
int sense, i;
aty_st_le32(GP_IO, 0x31003100, par); /* drive outputs high */
__delay(200);
aty_st_le32(GP_IO, 0, par); /* turn off outputs */
__delay(2000);
i = aty_ld_le32(GP_IO, par); /* get primary sense value */
sense = ((i & 0x3000) >> 3) | (i & 0x100);
/* drive each sense line low in turn and collect the other 2 */
aty_st_le32(GP_IO, 0x20000000, par); /* drive A low */
__delay(2000);
i = aty_ld_le32(GP_IO, par);
sense |= ((i & 0x1000) >> 7) | ((i & 0x100) >> 4);
aty_st_le32(GP_IO, 0x20002000, par); /* drive A high again */
__delay(200);
aty_st_le32(GP_IO, 0x10000000, par); /* drive B low */
__delay(2000);
i = aty_ld_le32(GP_IO, par);
sense |= ((i & 0x2000) >> 10) | ((i & 0x100) >> 6);
aty_st_le32(GP_IO, 0x10001000, par); /* drive B high again */
__delay(200);
aty_st_le32(GP_IO, 0x01000000, par); /* drive C low */
__delay(2000);
sense |= (aty_ld_le32(GP_IO, par) & 0x3000) >> 12;
aty_st_le32(GP_IO, 0, par); /* turn off outputs */
return sense;
}
#endif /* defined(CONFIG_PPC) */
/* ------------------------------------------------------------------------- */
/*
* CRTC programming
*/
static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc)
{
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
if (!M64_HAS(LT_LCD_REGS)) {
crtc->lcd_index = aty_ld_le32(LCD_INDEX, par);
aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
}
crtc->lcd_config_panel = aty_ld_lcd(CNFG_PANEL, par);
crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par);
/* switch to non shadow registers */
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
/* save stretching */
crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par);
crtc->vert_stretching = aty_ld_lcd(VERT_STRETCHING, par);
if (!M64_HAS(LT_LCD_REGS))
crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par);
}
#endif
crtc->h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
crtc->h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
crtc->v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
crtc->v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
crtc->vline_crnt_vline = aty_ld_le32(CRTC_VLINE_CRNT_VLINE, par);
crtc->off_pitch = aty_ld_le32(CRTC_OFF_PITCH, par);
crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
/* switch to shadow registers */
aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
SHADOW_EN | SHADOW_RW_EN, par);
crtc->shadow_h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
crtc->shadow_h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
crtc->shadow_v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
crtc->shadow_v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
aty_st_le32(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc)
{
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
/* stop CRTC */
aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl &
~(CRTC_EXT_DISP_EN | CRTC_EN), par);
/* update non-shadow registers first */
aty_st_lcd(CNFG_PANEL, crtc->lcd_config_panel, par);
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl &
~(CRTC_RW_SELECT | SHADOW_EN | SHADOW_RW_EN), par);
/* temporarily disable stretching */
aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching &
~(HORZ_STRETCH_MODE | HORZ_STRETCH_EN), par);
aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching &
~(VERT_STRETCH_RATIO1 | VERT_STRETCH_RATIO2 |
VERT_STRETCH_USE0 | VERT_STRETCH_EN), par);
}
#endif
/* turn off CRT */
aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl & ~CRTC_EN, par);
DPRINTK("setting up CRTC\n");
DPRINTK("set primary CRT to %ix%i %c%c composite %c\n",
((((crtc->h_tot_disp >> 16) & 0xff) + 1) << 3),
(((crtc->v_tot_disp >> 16) & 0x7ff) + 1),
(crtc->h_sync_strt_wid & 0x200000) ? 'N' : 'P',
(crtc->v_sync_strt_wid & 0x200000) ? 'N' : 'P',
(crtc->gen_cntl & CRTC_CSYNC_EN) ? 'P' : 'N');
DPRINTK("CRTC_H_TOTAL_DISP: %x\n", crtc->h_tot_disp);
DPRINTK("CRTC_H_SYNC_STRT_WID: %x\n", crtc->h_sync_strt_wid);
DPRINTK("CRTC_V_TOTAL_DISP: %x\n", crtc->v_tot_disp);
DPRINTK("CRTC_V_SYNC_STRT_WID: %x\n", crtc->v_sync_strt_wid);
DPRINTK("CRTC_OFF_PITCH: %x\n", crtc->off_pitch);
DPRINTK("CRTC_VLINE_CRNT_VLINE: %x\n", crtc->vline_crnt_vline);
DPRINTK("CRTC_GEN_CNTL: %x\n", crtc->gen_cntl);
aty_st_le32(CRTC_H_TOTAL_DISP, crtc->h_tot_disp, par);
aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->h_sync_strt_wid, par);
aty_st_le32(CRTC_V_TOTAL_DISP, crtc->v_tot_disp, par);
aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->v_sync_strt_wid, par);
aty_st_le32(CRTC_OFF_PITCH, crtc->off_pitch, par);
aty_st_le32(CRTC_VLINE_CRNT_VLINE, crtc->vline_crnt_vline, par);
aty_st_le32(CRTC_GEN_CNTL, crtc->gen_cntl, par);
#if 0
FIXME
if (par->accel_flags & FB_ACCELF_TEXT)
aty_init_engine(par, info);
#endif
#ifdef CONFIG_FB_ATY_GENERIC_LCD
/* after setting the CRTC registers we should set the LCD registers. */
if (par->lcd_table != 0) {
/* switch to shadow registers */
aty_st_lcd(LCD_GEN_CNTL, (crtc->lcd_gen_cntl & ~CRTC_RW_SELECT) |
SHADOW_EN | SHADOW_RW_EN, par);
DPRINTK("set shadow CRT to %ix%i %c%c\n",
((((crtc->shadow_h_tot_disp >> 16) & 0xff) + 1) << 3),
(((crtc->shadow_v_tot_disp >> 16) & 0x7ff) + 1),
(crtc->shadow_h_sync_strt_wid & 0x200000) ? 'N' : 'P',
(crtc->shadow_v_sync_strt_wid & 0x200000) ? 'N' : 'P');
DPRINTK("SHADOW CRTC_H_TOTAL_DISP: %x\n",
crtc->shadow_h_tot_disp);
DPRINTK("SHADOW CRTC_H_SYNC_STRT_WID: %x\n",
crtc->shadow_h_sync_strt_wid);
DPRINTK("SHADOW CRTC_V_TOTAL_DISP: %x\n",
crtc->shadow_v_tot_disp);
DPRINTK("SHADOW CRTC_V_SYNC_STRT_WID: %x\n",
crtc->shadow_v_sync_strt_wid);
aty_st_le32(CRTC_H_TOTAL_DISP, crtc->shadow_h_tot_disp, par);
aty_st_le32(CRTC_H_SYNC_STRT_WID, crtc->shadow_h_sync_strt_wid, par);
aty_st_le32(CRTC_V_TOTAL_DISP, crtc->shadow_v_tot_disp, par);
aty_st_le32(CRTC_V_SYNC_STRT_WID, crtc->shadow_v_sync_strt_wid, par);
/* restore CRTC selection & shadow state and enable stretching */
DPRINTK("LCD_GEN_CNTL: %x\n", crtc->lcd_gen_cntl);
DPRINTK("HORZ_STRETCHING: %x\n", crtc->horz_stretching);
DPRINTK("VERT_STRETCHING: %x\n", crtc->vert_stretching);
if (!M64_HAS(LT_LCD_REGS))
DPRINTK("EXT_VERT_STRETCH: %x\n", crtc->ext_vert_stretch);
aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);
aty_st_lcd(HORZ_STRETCHING, crtc->horz_stretching, par);
aty_st_lcd(VERT_STRETCHING, crtc->vert_stretching, par);
if (!M64_HAS(LT_LCD_REGS)) {
aty_st_lcd(EXT_VERT_STRETCH, crtc->ext_vert_stretch, par);
aty_ld_le32(LCD_INDEX, par);
aty_st_le32(LCD_INDEX, crtc->lcd_index, par);
}
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
static u32 calc_line_length(struct atyfb_par *par, u32 vxres, u32 bpp)
{
u32 line_length = vxres * bpp / 8;
if (par->ram_type == SGRAM ||
(!M64_HAS(XL_MEM) && par->ram_type == WRAM))
line_length = (line_length + 63) & ~63;
return line_length;
}
static int aty_var_to_crtc(const struct fb_info *info,
const struct fb_var_screeninfo *var,
struct crtc *crtc)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp;
u32 sync, vmode, vdisplay;
u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width, dp_pix_width, dp_chain_mask;
u32 line_length;
/* input */
xres = (var->xres + 7) & ~7;
yres = var->yres;
vxres = (var->xres_virtual + 7) & ~7;
vyres = var->yres_virtual;
xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
bpp = var->bits_per_pixel;
if (bpp == 16)
bpp = (var->green.length == 5) ? 15 : 16;
sync = var->sync;
vmode = var->vmode;
/* convert (and round up) and validate */
if (vxres < xres + xoffset)
vxres = xres + xoffset;
h_disp = xres;
if (vyres < yres + yoffset)
vyres = yres + yoffset;
v_disp = yres;
if (bpp <= 8) {
bpp = 8;
pix_width = CRTC_PIX_WIDTH_8BPP;
dp_pix_width = HOST_8BPP | SRC_8BPP | DST_8BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_8BPP;
} else if (bpp <= 15) {
bpp = 16;
pix_width = CRTC_PIX_WIDTH_15BPP;
dp_pix_width = HOST_15BPP | SRC_15BPP | DST_15BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_15BPP;
} else if (bpp <= 16) {
bpp = 16;
pix_width = CRTC_PIX_WIDTH_16BPP;
dp_pix_width = HOST_16BPP | SRC_16BPP | DST_16BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_16BPP;
} else if (bpp <= 24 && M64_HAS(INTEGRATED)) {
bpp = 24;
pix_width = CRTC_PIX_WIDTH_24BPP;
dp_pix_width = HOST_8BPP | SRC_8BPP | DST_8BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_24BPP;
} else if (bpp <= 32) {
bpp = 32;
pix_width = CRTC_PIX_WIDTH_32BPP;
dp_pix_width = HOST_32BPP | SRC_32BPP | DST_32BPP |
BYTE_ORDER_LSB_TO_MSB;
dp_chain_mask = DP_CHAIN_32BPP;
} else
FAIL("invalid bpp");
line_length = calc_line_length(par, vxres, bpp);
if (vyres * line_length > info->fix.smem_len)
FAIL("not enough video RAM");
h_sync_pol = sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
v_sync_pol = sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
if ((xres > 1600) || (yres > 1200)) {
FAIL("MACH64 chips are designed for max 1600x1200\n"
"select anoter resolution.");
}
h_sync_strt = h_disp + var->right_margin;
h_sync_end = h_sync_strt + var->hsync_len;
h_sync_dly = var->right_margin & 7;
h_total = h_sync_end + h_sync_dly + var->left_margin;
v_sync_strt = v_disp + var->lower_margin;
v_sync_end = v_sync_strt + var->vsync_len;
v_total = v_sync_end + var->upper_margin;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
if (!M64_HAS(LT_LCD_REGS)) {
u32 lcd_index = aty_ld_le32(LCD_INDEX, par);
crtc->lcd_index = lcd_index &
~(LCD_INDEX_MASK | LCD_DISPLAY_DIS |
LCD_SRC_SEL | CRTC2_DISPLAY_DIS);
aty_st_le32(LCD_INDEX, lcd_index, par);
}
if (!M64_HAS(MOBIL_BUS))
crtc->lcd_index |= CRTC2_DISPLAY_DIS;
crtc->lcd_config_panel = aty_ld_lcd(CNFG_PANEL, par) | 0x4000;
crtc->lcd_gen_cntl = aty_ld_lcd(LCD_GEN_CNTL, par) & ~CRTC_RW_SELECT;
crtc->lcd_gen_cntl &=
~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 | TVCLK_PM_EN |
/*VCLK_DAC_PM_EN | USE_SHADOWED_VEND |*/
USE_SHADOWED_ROWCUR | SHADOW_EN | SHADOW_RW_EN);
crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR | LOCK_8DOT;
if ((crtc->lcd_gen_cntl & LCD_ON) &&
((xres > par->lcd_width) || (yres > par->lcd_height))) {
/*
* We cannot display the mode on the LCD. If the CRT is
* enabled we can turn off the LCD.
* If the CRT is off, it isn't a good idea to switch it
* on; we don't know if one is connected. So it's better
* to fail then.
*/
if (crtc->lcd_gen_cntl & CRT_ON) {
if (!(var->activate & FB_ACTIVATE_TEST))
PRINTKI("Disable LCD panel, because video mode does not fit.\n");
crtc->lcd_gen_cntl &= ~LCD_ON;
/*aty_st_lcd(LCD_GEN_CNTL, crtc->lcd_gen_cntl, par);*/
} else {
if (!(var->activate & FB_ACTIVATE_TEST))
PRINTKE("Video mode exceeds size of LCD panel.\nConnect this computer to a conventional monitor if you really need this mode.\n");
return -EINVAL;
}
}
}
if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON)) {
int VScan = 1;
/* bpp -> bytespp, 1,4 -> 0; 8 -> 2; 15,16 -> 1; 24 -> 6; 32 -> 5
const u8 DFP_h_sync_dly_LT[] = { 0, 2, 1, 6, 5 };
const u8 ADD_to_strt_wid_and_dly_LT_DAC[] = { 0, 5, 6, 9, 9, 12, 12 }; */
vmode &= ~(FB_VMODE_DOUBLE | FB_VMODE_INTERLACED);
/*
* This is horror! When we simulate, say 640x480 on an 800x600
* LCD monitor, the CRTC should be programmed 800x600 values for
* the non visible part, but 640x480 for the visible part.
* This code has been tested on a laptop with it's 1400x1050 LCD
* monitor and a conventional monitor both switched on.
* Tested modes: 1280x1024, 1152x864, 1024x768, 800x600,
* works with little glitches also with DOUBLESCAN modes
*/
if (yres < par->lcd_height) {
VScan = par->lcd_height / yres;
if (VScan > 1) {
VScan = 2;
vmode |= FB_VMODE_DOUBLE;
}
}
h_sync_strt = h_disp + par->lcd_right_margin;
h_sync_end = h_sync_strt + par->lcd_hsync_len;
h_sync_dly = /*DFP_h_sync_dly[ ( bpp + 1 ) / 3 ]; */par->lcd_hsync_dly;
h_total = h_disp + par->lcd_hblank_len;
v_sync_strt = v_disp + par->lcd_lower_margin / VScan;
v_sync_end = v_sync_strt + par->lcd_vsync_len / VScan;
v_total = v_disp + par->lcd_vblank_len / VScan;
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
h_disp = (h_disp >> 3) - 1;
h_sync_strt = (h_sync_strt >> 3) - 1;
h_sync_end = (h_sync_end >> 3) - 1;
h_total = (h_total >> 3) - 1;
h_sync_wid = h_sync_end - h_sync_strt;
FAIL_MAX("h_disp too large", h_disp, 0xff);
FAIL_MAX("h_sync_strt too large", h_sync_strt, 0x1ff);
/*FAIL_MAX("h_sync_wid too large", h_sync_wid, 0x1f);*/
if (h_sync_wid > 0x1f)
h_sync_wid = 0x1f;
FAIL_MAX("h_total too large", h_total, 0x1ff);
if (vmode & FB_VMODE_DOUBLE) {
v_disp <<= 1;
v_sync_strt <<= 1;
v_sync_end <<= 1;
v_total <<= 1;
}
vdisplay = yres;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON))
vdisplay = par->lcd_height;
#endif
v_disp--;
v_sync_strt--;
v_sync_end--;
v_total--;
v_sync_wid = v_sync_end - v_sync_strt;
FAIL_MAX("v_disp too large", v_disp, 0x7ff);
FAIL_MAX("v_sync_stsrt too large", v_sync_strt, 0x7ff);
/*FAIL_MAX("v_sync_wid too large", v_sync_wid, 0x1f);*/
if (v_sync_wid > 0x1f)
v_sync_wid = 0x1f;
FAIL_MAX("v_total too large", v_total, 0x7ff);
c_sync = sync & FB_SYNC_COMP_HIGH_ACT ? CRTC_CSYNC_EN : 0;
/* output */
crtc->vxres = vxres;
crtc->vyres = vyres;
crtc->xoffset = xoffset;
crtc->yoffset = yoffset;
crtc->bpp = bpp;
crtc->off_pitch =
((yoffset * line_length + xoffset * bpp / 8) / 8) |
((line_length / bpp) << 22);
crtc->vline_crnt_vline = 0;
crtc->h_tot_disp = h_total | (h_disp << 16);
crtc->h_sync_strt_wid = (h_sync_strt & 0xff) | (h_sync_dly << 8) |
((h_sync_strt & 0x100) << 4) | (h_sync_wid << 16) |
(h_sync_pol << 21);
crtc->v_tot_disp = v_total | (v_disp << 16);
crtc->v_sync_strt_wid = v_sync_strt | (v_sync_wid << 16) |
(v_sync_pol << 21);
/* crtc->gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_PRESERVED_MASK; */
crtc->gen_cntl = CRTC_EXT_DISP_EN | CRTC_EN | pix_width | c_sync;
crtc->gen_cntl |= CRTC_VGA_LINEAR;
/* Enable doublescan mode if requested */
if (vmode & FB_VMODE_DOUBLE)
crtc->gen_cntl |= CRTC_DBL_SCAN_EN;
/* Enable interlaced mode if requested */
if (vmode & FB_VMODE_INTERLACED)
crtc->gen_cntl |= CRTC_INTERLACE_EN;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
vdisplay = yres;
if (vmode & FB_VMODE_DOUBLE)
vdisplay <<= 1;
crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH);
crtc->lcd_gen_cntl &= ~(HORZ_DIVBY2_EN | DIS_HOR_CRT_DIVBY2 |
/*TVCLK_PM_EN | VCLK_DAC_PM_EN |*/
USE_SHADOWED_VEND |
USE_SHADOWED_ROWCUR |
SHADOW_EN | SHADOW_RW_EN);
crtc->lcd_gen_cntl |= DONT_SHADOW_VPAR/* | LOCK_8DOT*/;
/* MOBILITY M1 tested, FIXME: LT */
crtc->horz_stretching = aty_ld_lcd(HORZ_STRETCHING, par);
if (!M64_HAS(LT_LCD_REGS))
crtc->ext_vert_stretch = aty_ld_lcd(EXT_VERT_STRETCH, par) &
~(AUTO_VERT_RATIO | VERT_STRETCH_MODE | VERT_STRETCH_RATIO3);
crtc->horz_stretching &= ~(HORZ_STRETCH_RATIO |
HORZ_STRETCH_LOOP | AUTO_HORZ_RATIO |
HORZ_STRETCH_MODE | HORZ_STRETCH_EN);
if (xres < par->lcd_width && crtc->lcd_gen_cntl & LCD_ON) {
do {
/*
* The horizontal blender misbehaves when
* HDisplay is less than a certain threshold
* (440 for a 1024-wide panel). It doesn't
* stretch such modes enough. Use pixel
* replication instead of blending to stretch
* modes that can be made to exactly fit the
* panel width. The undocumented "NoLCDBlend"
* option allows the pixel-replicated mode to
* be slightly wider or narrower than the
* panel width. It also causes a mode that is
* exactly half as wide as the panel to be
* pixel-replicated, rather than blended.
*/
int HDisplay = xres & ~7;
int nStretch = par->lcd_width / HDisplay;
int Remainder = par->lcd_width % HDisplay;
if ((!Remainder && ((nStretch > 2))) ||
(((HDisplay * 16) / par->lcd_width) < 7)) {
static const char StretchLoops[] = { 10, 12, 13, 15, 16 };
int horz_stretch_loop = -1, BestRemainder;
int Numerator = HDisplay, Denominator = par->lcd_width;
int Index = 5;
ATIReduceRatio(&Numerator, &Denominator);
BestRemainder = (Numerator * 16) / Denominator;
while (--Index >= 0) {
Remainder = ((Denominator - Numerator) * StretchLoops[Index]) %
Denominator;
if (Remainder < BestRemainder) {
horz_stretch_loop = Index;
if (!(BestRemainder = Remainder))
break;
}
}
if ((horz_stretch_loop >= 0) && !BestRemainder) {
int horz_stretch_ratio = 0, Accumulator = 0;
int reuse_previous = 1;
Index = StretchLoops[horz_stretch_loop];
while (--Index >= 0) {
if (Accumulator > 0)
horz_stretch_ratio |= reuse_previous;
else
Accumulator += Denominator;
Accumulator -= Numerator;
reuse_previous <<= 1;
}
crtc->horz_stretching |= (HORZ_STRETCH_EN |
((horz_stretch_loop & HORZ_STRETCH_LOOP) << 16) |
(horz_stretch_ratio & HORZ_STRETCH_RATIO));
break; /* Out of the do { ... } while (0) */
}
}
crtc->horz_stretching |= (HORZ_STRETCH_MODE | HORZ_STRETCH_EN |
(((HDisplay * (HORZ_STRETCH_BLEND + 1)) / par->lcd_width) & HORZ_STRETCH_BLEND));
} while (0);
}
if (vdisplay < par->lcd_height && crtc->lcd_gen_cntl & LCD_ON) {
crtc->vert_stretching = (VERT_STRETCH_USE0 | VERT_STRETCH_EN |
(((vdisplay * (VERT_STRETCH_RATIO0 + 1)) / par->lcd_height) & VERT_STRETCH_RATIO0));
if (!M64_HAS(LT_LCD_REGS) &&
xres <= (M64_HAS(MOBIL_BUS) ? 1024 : 800))
crtc->ext_vert_stretch |= VERT_STRETCH_MODE;
} else {
/*
* Don't use vertical blending if the mode is too wide
* or not vertically stretched.
*/
crtc->vert_stretching = 0;
}
/* copy to shadow crtc */
crtc->shadow_h_tot_disp = crtc->h_tot_disp;
crtc->shadow_h_sync_strt_wid = crtc->h_sync_strt_wid;
crtc->shadow_v_tot_disp = crtc->v_tot_disp;
crtc->shadow_v_sync_strt_wid = crtc->v_sync_strt_wid;
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
if (M64_HAS(MAGIC_FIFO)) {
/* FIXME: display FIFO low watermark values */
crtc->gen_cntl |= (aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_FIFO_LWM);
}
crtc->dp_pix_width = dp_pix_width;
crtc->dp_chain_mask = dp_chain_mask;
return 0;
}
static int aty_crtc_to_var(const struct crtc *crtc,
struct fb_var_screeninfo *var)
{
u32 xres, yres, bpp, left, right, upper, lower, hslen, vslen, sync;
u32 h_total, h_disp, h_sync_strt, h_sync_dly, h_sync_wid, h_sync_pol;
u32 v_total, v_disp, v_sync_strt, v_sync_wid, v_sync_pol, c_sync;
u32 pix_width;
u32 double_scan, interlace;
/* input */
h_total = crtc->h_tot_disp & 0x1ff;
h_disp = (crtc->h_tot_disp >> 16) & 0xff;
h_sync_strt = (crtc->h_sync_strt_wid & 0xff) | ((crtc->h_sync_strt_wid >> 4) & 0x100);
h_sync_dly = (crtc->h_sync_strt_wid >> 8) & 0x7;
h_sync_wid = (crtc->h_sync_strt_wid >> 16) & 0x1f;
h_sync_pol = (crtc->h_sync_strt_wid >> 21) & 0x1;
v_total = crtc->v_tot_disp & 0x7ff;
v_disp = (crtc->v_tot_disp >> 16) & 0x7ff;
v_sync_strt = crtc->v_sync_strt_wid & 0x7ff;
v_sync_wid = (crtc->v_sync_strt_wid >> 16) & 0x1f;
v_sync_pol = (crtc->v_sync_strt_wid >> 21) & 0x1;
c_sync = crtc->gen_cntl & CRTC_CSYNC_EN ? 1 : 0;
pix_width = crtc->gen_cntl & CRTC_PIX_WIDTH_MASK;
double_scan = crtc->gen_cntl & CRTC_DBL_SCAN_EN;
interlace = crtc->gen_cntl & CRTC_INTERLACE_EN;
/* convert */
xres = (h_disp + 1) * 8;
yres = v_disp + 1;
left = (h_total - h_sync_strt - h_sync_wid) * 8 - h_sync_dly;
right = (h_sync_strt - h_disp) * 8 + h_sync_dly;
hslen = h_sync_wid * 8;
upper = v_total - v_sync_strt - v_sync_wid;
lower = v_sync_strt - v_disp;
vslen = v_sync_wid;
sync = (h_sync_pol ? 0 : FB_SYNC_HOR_HIGH_ACT) |
(v_sync_pol ? 0 : FB_SYNC_VERT_HIGH_ACT) |
(c_sync ? FB_SYNC_COMP_HIGH_ACT : 0);
switch (pix_width) {
#if 0
case CRTC_PIX_WIDTH_4BPP:
bpp = 4;
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
#endif
case CRTC_PIX_WIDTH_8BPP:
bpp = 8;
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case CRTC_PIX_WIDTH_15BPP: /* RGB 555 */
bpp = 16;
var->red.offset = 10;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 5;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
case CRTC_PIX_WIDTH_16BPP: /* RGB 565 */
bpp = 16;
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = 0;
var->transp.length = 0;
break;
case CRTC_PIX_WIDTH_24BPP: /* RGB 888 */
bpp = 24;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
break;
case CRTC_PIX_WIDTH_32BPP: /* ARGB 8888 */
bpp = 32;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
var->transp.offset = 24;
var->transp.length = 8;
break;
default:
PRINTKE("Invalid pixel width\n");
return -EINVAL;
}
/* output */
var->xres = xres;
var->yres = yres;
var->xres_virtual = crtc->vxres;
var->yres_virtual = crtc->vyres;
var->bits_per_pixel = bpp;
var->left_margin = left;
var->right_margin = right;
var->upper_margin = upper;
var->lower_margin = lower;
var->hsync_len = hslen;
var->vsync_len = vslen;
var->sync = sync;
var->vmode = FB_VMODE_NONINTERLACED;
/*
* In double scan mode, the vertical parameters are doubled,
* so we need to halve them to get the right values.
* In interlaced mode the values are already correct,
* so no correction is necessary.
*/
if (interlace)
var->vmode = FB_VMODE_INTERLACED;
if (double_scan) {
var->vmode = FB_VMODE_DOUBLE;
var->yres >>= 1;
var->upper_margin >>= 1;
var->lower_margin >>= 1;
var->vsync_len >>= 1;
}
return 0;
}
/* ------------------------------------------------------------------------- */
static int atyfb_set_par(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
struct fb_var_screeninfo *var = &info->var;
u32 tmp, pixclock;
int err;
#ifdef DEBUG
struct fb_var_screeninfo debug;
u32 pixclock_in_ps;
#endif
if (par->asleep)
return 0;
err = aty_var_to_crtc(info, var, &par->crtc);
if (err)
return err;
pixclock = atyfb_get_pixclock(var, par);
if (pixclock == 0) {
PRINTKE("Invalid pixclock\n");
return -EINVAL;
} else {
err = par->pll_ops->var_to_pll(info, pixclock,
var->bits_per_pixel, &par->pll);
if (err)
return err;
}
par->accel_flags = var->accel_flags; /* hack */
if (var->accel_flags) {
info->fbops->fb_sync = atyfb_sync;
info->flags &= ~FBINFO_HWACCEL_DISABLED;
} else {
info->fbops->fb_sync = NULL;
info->flags |= FBINFO_HWACCEL_DISABLED;
}
if (par->blitter_may_be_busy)
wait_for_idle(par);
aty_set_crtc(par, &par->crtc);
par->dac_ops->set_dac(info, &par->pll,
var->bits_per_pixel, par->accel_flags);
par->pll_ops->set_pll(info, &par->pll);
#ifdef DEBUG
if (par->pll_ops && par->pll_ops->pll_to_var)
pixclock_in_ps = par->pll_ops->pll_to_var(info, &par->pll);
else
pixclock_in_ps = 0;
if (0 == pixclock_in_ps) {
PRINTKE("ALERT ops->pll_to_var get 0\n");
pixclock_in_ps = pixclock;
}
memset(&debug, 0, sizeof(debug));
if (!aty_crtc_to_var(&par->crtc, &debug)) {
u32 hSync, vRefresh;
u32 h_disp, h_sync_strt, h_sync_end, h_total;
u32 v_disp, v_sync_strt, v_sync_end, v_total;
h_disp = debug.xres;
h_sync_strt = h_disp + debug.right_margin;
h_sync_end = h_sync_strt + debug.hsync_len;
h_total = h_sync_end + debug.left_margin;
v_disp = debug.yres;
v_sync_strt = v_disp + debug.lower_margin;
v_sync_end = v_sync_strt + debug.vsync_len;
v_total = v_sync_end + debug.upper_margin;
hSync = 1000000000 / (pixclock_in_ps * h_total);
vRefresh = (hSync * 1000) / v_total;
if (par->crtc.gen_cntl & CRTC_INTERLACE_EN)
vRefresh *= 2;
if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN)
vRefresh /= 2;
DPRINTK("atyfb_set_par\n");
DPRINTK(" Set Visible Mode to %ix%i-%i\n",
var->xres, var->yres, var->bits_per_pixel);
DPRINTK(" Virtual resolution %ix%i, "
"pixclock_in_ps %i (calculated %i)\n",
var->xres_virtual, var->yres_virtual,
pixclock, pixclock_in_ps);
DPRINTK(" Dot clock: %i MHz\n",
1000000 / pixclock_in_ps);
DPRINTK(" Horizontal sync: %i kHz\n", hSync);
DPRINTK(" Vertical refresh: %i Hz\n", vRefresh);
DPRINTK(" x style: %i.%03i %i %i %i %i %i %i %i %i\n",
1000000 / pixclock_in_ps, 1000000 % pixclock_in_ps,
h_disp, h_sync_strt, h_sync_end, h_total,
v_disp, v_sync_strt, v_sync_end, v_total);
DPRINTK(" fb style: %i %i %i %i %i %i %i %i %i\n",
pixclock_in_ps,
debug.left_margin, h_disp, debug.right_margin, debug.hsync_len,
debug.upper_margin, v_disp, debug.lower_margin, debug.vsync_len);
}
#endif /* DEBUG */
if (!M64_HAS(INTEGRATED)) {
/* Don't forget MEM_CNTL */
tmp = aty_ld_le32(MEM_CNTL, par) & 0xf0ffffff;
switch (var->bits_per_pixel) {
case 8:
tmp |= 0x02000000;
break;
case 16:
tmp |= 0x03000000;
break;
case 32:
tmp |= 0x06000000;
break;
}
aty_st_le32(MEM_CNTL, tmp, par);
} else {
tmp = aty_ld_le32(MEM_CNTL, par) & 0xf00fffff;
if (!M64_HAS(MAGIC_POSTDIV))
tmp |= par->mem_refresh_rate << 20;
switch (var->bits_per_pixel) {
case 8:
case 24:
tmp |= 0x00000000;
break;
case 16:
tmp |= 0x04000000;
break;
case 32:
tmp |= 0x08000000;
break;
}
if (M64_HAS(CT_BUS)) {
aty_st_le32(DAC_CNTL, 0x87010184, par);
aty_st_le32(BUS_CNTL, 0x680000f9, par);
} else if (M64_HAS(VT_BUS)) {
aty_st_le32(DAC_CNTL, 0x87010184, par);
aty_st_le32(BUS_CNTL, 0x680000f9, par);
} else if (M64_HAS(MOBIL_BUS)) {
aty_st_le32(DAC_CNTL, 0x80010102, par);
aty_st_le32(BUS_CNTL, 0x7b33a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par);
} else {
/* GT */
aty_st_le32(DAC_CNTL, 0x86010102, par);
aty_st_le32(BUS_CNTL, 0x7b23a040 | (par->aux_start ? BUS_APER_REG_DIS : 0), par);
aty_st_le32(EXT_MEM_CNTL, aty_ld_le32(EXT_MEM_CNTL, par) | 0x5000001, par);
}
aty_st_le32(MEM_CNTL, tmp, par);
}
aty_st_8(DAC_MASK, 0xff, par);
info->fix.line_length = calc_line_length(par, var->xres_virtual,
var->bits_per_pixel);
info->fix.visual = var->bits_per_pixel <= 8 ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
/* Initialize the graphics engine */
if (par->accel_flags & FB_ACCELF_TEXT)
aty_init_engine(par, info);
#ifdef CONFIG_BOOTX_TEXT
btext_update_display(info->fix.smem_start,
(((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8,
((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1,
var->bits_per_pixel,
par->crtc.vxres * var->bits_per_pixel / 8);
#endif /* CONFIG_BOOTX_TEXT */
#if 0
/* switch to accelerator mode */
if (!(par->crtc.gen_cntl & CRTC_EXT_DISP_EN))
aty_st_le32(CRTC_GEN_CNTL, par->crtc.gen_cntl | CRTC_EXT_DISP_EN, par);
#endif
#ifdef DEBUG
{
/* dump non shadow CRTC, pll, LCD registers */
int i; u32 base;
/* CRTC registers */
base = 0x2000;
printk("debug atyfb: Mach64 non-shadow register values:");
for (i = 0; i < 256; i = i+4) {
if (i % 16 == 0)
printk("\ndebug atyfb: 0x%04X: ", base + i);
printk(" %08X", aty_ld_le32(i, par));
}
printk("\n\n");
#ifdef CONFIG_FB_ATY_CT
/* PLL registers */
base = 0x00;
printk("debug atyfb: Mach64 PLL register values:");
for (i = 0; i < 64; i++) {
if (i % 16 == 0)
printk("\ndebug atyfb: 0x%02X: ", base + i);
if (i % 4 == 0)
printk(" ");
printk("%02X", aty_ld_pll_ct(i, par));
}
printk("\n\n");
#endif /* CONFIG_FB_ATY_CT */
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table != 0) {
/* LCD registers */
base = 0x00;
printk("debug atyfb: LCD register values:");
if (M64_HAS(LT_LCD_REGS)) {
for (i = 0; i <= POWER_MANAGEMENT; i++) {
if (i == EXT_VERT_STRETCH)
continue;
printk("\ndebug atyfb: 0x%04X: ",
lt_lcd_regs[i]);
printk(" %08X", aty_ld_lcd(i, par));
}
} else {
for (i = 0; i < 64; i++) {
if (i % 4 == 0)
printk("\ndebug atyfb: 0x%02X: ",
base + i);
printk(" %08X", aty_ld_lcd(i, par));
}
}
printk("\n\n");
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
}
#endif /* DEBUG */
return 0;
}
static int atyfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
int err;
struct crtc crtc;
union aty_pll pll;
u32 pixclock;
memcpy(&pll, &par->pll, sizeof(pll));
err = aty_var_to_crtc(info, var, &crtc);
if (err)
return err;
pixclock = atyfb_get_pixclock(var, par);
if (pixclock == 0) {
if (!(var->activate & FB_ACTIVATE_TEST))
PRINTKE("Invalid pixclock\n");
return -EINVAL;
} else {
err = par->pll_ops->var_to_pll(info, pixclock,
var->bits_per_pixel, &pll);
if (err)
return err;
}
if (var->accel_flags & FB_ACCELF_TEXT)
info->var.accel_flags = FB_ACCELF_TEXT;
else
info->var.accel_flags = 0;
aty_crtc_to_var(&crtc, var);
var->pixclock = par->pll_ops->pll_to_var(info, &pll);
return 0;
}
static void set_off_pitch(struct atyfb_par *par, const struct fb_info *info)
{
u32 xoffset = info->var.xoffset;
u32 yoffset = info->var.yoffset;
u32 line_length = info->fix.line_length;
u32 bpp = info->var.bits_per_pixel;
par->crtc.off_pitch =
((yoffset * line_length + xoffset * bpp / 8) / 8) |
((line_length / bpp) << 22);
}
/*
* Open/Release the frame buffer device
*/
static int atyfb_open(struct fb_info *info, int user)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
if (user) {
par->open++;
#ifdef __sparc__
par->mmaped = 0;
#endif
}
return 0;
}
static irqreturn_t aty_irq(int irq, void *dev_id)
{
struct atyfb_par *par = dev_id;
int handled = 0;
u32 int_cntl;
spin_lock(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par);
if (int_cntl & CRTC_VBLANK_INT) {
/* clear interrupt */
aty_st_le32(CRTC_INT_CNTL, (int_cntl & CRTC_INT_EN_MASK) |
CRTC_VBLANK_INT_AK, par);
par->vblank.count++;
if (par->vblank.pan_display) {
par->vblank.pan_display = 0;
aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
}
wake_up_interruptible(&par->vblank.wait);
handled = 1;
}
spin_unlock(&par->int_lock);
return IRQ_RETVAL(handled);
}
static int aty_enable_irq(struct atyfb_par *par, int reenable)
{
u32 int_cntl;
if (!test_and_set_bit(0, &par->irq_flags)) {
if (request_irq(par->irq, aty_irq, IRQF_SHARED, "atyfb", par)) {
clear_bit(0, &par->irq_flags);
return -EINVAL;
}
spin_lock_irq(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
/* clear interrupt */
aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_AK, par);
/* enable interrupt */
aty_st_le32(CRTC_INT_CNTL, int_cntl | CRTC_VBLANK_INT_EN, par);
spin_unlock_irq(&par->int_lock);
} else if (reenable) {
spin_lock_irq(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
if (!(int_cntl & CRTC_VBLANK_INT_EN)) {
printk("atyfb: someone disabled IRQ [%08x]\n",
int_cntl);
/* re-enable interrupt */
aty_st_le32(CRTC_INT_CNTL, int_cntl |
CRTC_VBLANK_INT_EN, par);
}
spin_unlock_irq(&par->int_lock);
}
return 0;
}
static int aty_disable_irq(struct atyfb_par *par)
{
u32 int_cntl;
if (test_and_clear_bit(0, &par->irq_flags)) {
if (par->vblank.pan_display) {
par->vblank.pan_display = 0;
aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
}
spin_lock_irq(&par->int_lock);
int_cntl = aty_ld_le32(CRTC_INT_CNTL, par) & CRTC_INT_EN_MASK;
/* disable interrupt */
aty_st_le32(CRTC_INT_CNTL, int_cntl & ~CRTC_VBLANK_INT_EN, par);
spin_unlock_irq(&par->int_lock);
free_irq(par->irq, par);
}
return 0;
}
static int atyfb_release(struct fb_info *info, int user)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
#ifdef __sparc__
int was_mmaped;
#endif
if (!user)
return 0;
par->open--;
mdelay(1);
wait_for_idle(par);
if (par->open)
return 0;
#ifdef __sparc__
was_mmaped = par->mmaped;
par->mmaped = 0;
if (was_mmaped) {
struct fb_var_screeninfo var;
/*
* Now reset the default display config, we have
* no idea what the program(s) which mmap'd the
* chip did to the configuration, nor whether it
* restored it correctly.
*/
var = default_var;
if (noaccel)
var.accel_flags &= ~FB_ACCELF_TEXT;
else
var.accel_flags |= FB_ACCELF_TEXT;
if (var.yres == var.yres_virtual) {
u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
var.yres_virtual =
((videoram * 8) / var.bits_per_pixel) /
var.xres_virtual;
if (var.yres_virtual < var.yres)
var.yres_virtual = var.yres;
}
}
#endif
aty_disable_irq(par);
return 0;
}
/*
* Pan or Wrap the Display
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
static int atyfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 xres, yres, xoffset, yoffset;
xres = (((par->crtc.h_tot_disp >> 16) & 0xff) + 1) * 8;
yres = ((par->crtc.v_tot_disp >> 16) & 0x7ff) + 1;
if (par->crtc.gen_cntl & CRTC_DBL_SCAN_EN)
yres >>= 1;
xoffset = (var->xoffset + 7) & ~7;
yoffset = var->yoffset;
if (xoffset + xres > par->crtc.vxres ||
yoffset + yres > par->crtc.vyres)
return -EINVAL;
info->var.xoffset = xoffset;
info->var.yoffset = yoffset;
if (par->asleep)
return 0;
set_off_pitch(par, info);
if ((var->activate & FB_ACTIVATE_VBL) && !aty_enable_irq(par, 0)) {
par->vblank.pan_display = 1;
} else {
par->vblank.pan_display = 0;
aty_st_le32(CRTC_OFF_PITCH, par->crtc.off_pitch, par);
}
return 0;
}
static int aty_waitforvblank(struct atyfb_par *par, u32 crtc)
{
struct aty_interrupt *vbl;
unsigned int count;
int ret;
switch (crtc) {
case 0:
vbl = &par->vblank;
break;
default:
return -ENODEV;
}
ret = aty_enable_irq(par, 0);
if (ret)
return ret;
count = vbl->count;
ret = wait_event_interruptible_timeout(vbl->wait,
count != vbl->count, HZ/10);
if (ret < 0)
return ret;
if (ret == 0) {
aty_enable_irq(par, 1);
return -ETIMEDOUT;
}
return 0;
}
#ifdef DEBUG
#define ATYIO_CLKR 0x41545900 /* ATY\00 */
#define ATYIO_CLKW 0x41545901 /* ATY\01 */
struct atyclk {
u32 ref_clk_per;
u8 pll_ref_div;
u8 mclk_fb_div;
u8 mclk_post_div; /* 1,2,3,4,8 */
u8 mclk_fb_mult; /* 2 or 4 */
u8 xclk_post_div; /* 1,2,3,4,8 */
u8 vclk_fb_div;
u8 vclk_post_div; /* 1,2,3,4,6,8,12 */
u32 dsp_xclks_per_row; /* 0-16383 */
u32 dsp_loop_latency; /* 0-15 */
u32 dsp_precision; /* 0-7 */
u32 dsp_on; /* 0-2047 */
u32 dsp_off; /* 0-2047 */
};
#define ATYIO_FEATR 0x41545902 /* ATY\02 */
#define ATYIO_FEATW 0x41545903 /* ATY\03 */
#endif
static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
#ifdef __sparc__
struct fbtype fbtyp;
#endif
switch (cmd) {
#ifdef __sparc__
case FBIOGTYPE:
fbtyp.fb_type = FBTYPE_PCI_GENERIC;
fbtyp.fb_width = par->crtc.vxres;
fbtyp.fb_height = par->crtc.vyres;
fbtyp.fb_depth = info->var.bits_per_pixel;
fbtyp.fb_cmsize = info->cmap.len;
fbtyp.fb_size = info->fix.smem_len;
if (copy_to_user((struct fbtype __user *) arg, &fbtyp,
sizeof(fbtyp)))
return -EFAULT;
break;
#endif /* __sparc__ */
case FBIO_WAITFORVSYNC:
{
u32 crtc;
if (get_user(crtc, (__u32 __user *) arg))
return -EFAULT;
return aty_waitforvblank(par, crtc);
}
break;
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
case ATYIO_CLKR:
if (M64_HAS(INTEGRATED)) {
struct atyclk clk;
union aty_pll *pll = &par->pll;
u32 dsp_config = pll->ct.dsp_config;
u32 dsp_on_off = pll->ct.dsp_on_off;
clk.ref_clk_per = par->ref_clk_per;
clk.pll_ref_div = pll->ct.pll_ref_div;
clk.mclk_fb_div = pll->ct.mclk_fb_div;
clk.mclk_post_div = pll->ct.mclk_post_div_real;
clk.mclk_fb_mult = pll->ct.mclk_fb_mult;
clk.xclk_post_div = pll->ct.xclk_post_div_real;
clk.vclk_fb_div = pll->ct.vclk_fb_div;
clk.vclk_post_div = pll->ct.vclk_post_div_real;
clk.dsp_xclks_per_row = dsp_config & 0x3fff;
clk.dsp_loop_latency = (dsp_config >> 16) & 0xf;
clk.dsp_precision = (dsp_config >> 20) & 7;
clk.dsp_off = dsp_on_off & 0x7ff;
clk.dsp_on = (dsp_on_off >> 16) & 0x7ff;
if (copy_to_user((struct atyclk __user *) arg, &clk,
sizeof(clk)))
return -EFAULT;
} else
return -EINVAL;
break;
case ATYIO_CLKW:
if (M64_HAS(INTEGRATED)) {
struct atyclk clk;
union aty_pll *pll = &par->pll;
if (copy_from_user(&clk, (struct atyclk __user *) arg,
sizeof(clk)))
return -EFAULT;
par->ref_clk_per = clk.ref_clk_per;
pll->ct.pll_ref_div = clk.pll_ref_div;
pll->ct.mclk_fb_div = clk.mclk_fb_div;
pll->ct.mclk_post_div_real = clk.mclk_post_div;
pll->ct.mclk_fb_mult = clk.mclk_fb_mult;
pll->ct.xclk_post_div_real = clk.xclk_post_div;
pll->ct.vclk_fb_div = clk.vclk_fb_div;
pll->ct.vclk_post_div_real = clk.vclk_post_div;
pll->ct.dsp_config = (clk.dsp_xclks_per_row & 0x3fff) |
((clk.dsp_loop_latency & 0xf) << 16) |
((clk.dsp_precision & 7) << 20);
pll->ct.dsp_on_off = (clk.dsp_off & 0x7ff) |
((clk.dsp_on & 0x7ff) << 16);
/*aty_calc_pll_ct(info, &pll->ct);*/
aty_set_pll_ct(info, pll);
} else
return -EINVAL;
break;
case ATYIO_FEATR:
if (get_user(par->features, (u32 __user *) arg))
return -EFAULT;
break;
case ATYIO_FEATW:
if (put_user(par->features, (u32 __user *) arg))
return -EFAULT;
break;
#endif /* DEBUG && CONFIG_FB_ATY_CT */
default:
return -EINVAL;
}
return 0;
}
static int atyfb_sync(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
if (par->blitter_may_be_busy)
wait_for_idle(par);
return 0;
}
#ifdef __sparc__
static int atyfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
unsigned int size, page, map_size = 0;
unsigned long map_offset = 0;
unsigned long off;
int i;
if (!par->mmap_map)
return -ENXIO;
if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
return -EINVAL;
off = vma->vm_pgoff << PAGE_SHIFT;
size = vma->vm_end - vma->vm_start;
/* To stop the swapper from even considering these pages. */
vma->vm_flags |= (VM_IO | VM_RESERVED);
if (((vma->vm_pgoff == 0) && (size == info->fix.smem_len)) ||
((off == info->fix.smem_len) && (size == PAGE_SIZE)))
off += 0x8000000000000000UL;
vma->vm_pgoff = off >> PAGE_SHIFT; /* propagate off changes */
/* Each page, see which map applies */
for (page = 0; page < size;) {
map_size = 0;
for (i = 0; par->mmap_map[i].size; i++) {
unsigned long start = par->mmap_map[i].voff;
unsigned long end = start + par->mmap_map[i].size;
unsigned long offset = off + page;
if (start > offset)
continue;
if (offset >= end)
continue;
map_size = par->mmap_map[i].size - (offset - start);
map_offset = par->mmap_map[i].poff + (offset - start);
break;
}
if (!map_size) {
page += PAGE_SIZE;
continue;
}
if (page + map_size > size)
map_size = size - page;
pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask);
pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag;
if (remap_pfn_range(vma, vma->vm_start + page,
map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot))
return -EAGAIN;
page += map_size;
}
if (!map_size)
return -EINVAL;
if (!par->mmaped)
par->mmaped = 1;
return 0;
}
#endif /* __sparc__ */
#if defined(CONFIG_PM) && defined(CONFIG_PCI)
#ifdef CONFIG_PPC_PMAC
/* Power management routines. Those are used for PowerBook sleep.
*/
static int aty_power_mgmt(int sleep, struct atyfb_par *par)
{
u32 pm;
int timeout;
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
pm = (pm & ~PWR_MGT_MODE_MASK) | PWR_MGT_MODE_REG;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
timeout = 2000;
if (sleep) {
/* Sleep */
pm &= ~PWR_MGT_ON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
udelay(10);
pm &= ~(PWR_BLON | AUTO_PWR_UP);
pm |= SUSPEND_NOW;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
udelay(10);
pm |= PWR_MGT_ON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
do {
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
mdelay(1);
if ((--timeout) == 0)
break;
} while ((pm & PWR_MGT_STATUS_MASK) != PWR_MGT_STATUS_SUSPEND);
} else {
/* Wakeup */
pm &= ~PWR_MGT_ON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
udelay(10);
pm &= ~SUSPEND_NOW;
pm |= (PWR_BLON | AUTO_PWR_UP);
aty_st_lcd(POWER_MANAGEMENT, pm, par);
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
udelay(10);
pm |= PWR_MGT_ON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
do {
pm = aty_ld_lcd(POWER_MANAGEMENT, par);
mdelay(1);
if ((--timeout) == 0)
break;
} while ((pm & PWR_MGT_STATUS_MASK) != 0);
}
mdelay(500);
return timeout ? 0 : -EIO;
}
#endif /* CONFIG_PPC_PMAC */
static int atyfb_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct atyfb_par *par = (struct atyfb_par *) info->par;
if (state.event == pdev->dev.power.power_state.event)
return 0;
console_lock();
fb_set_suspend(info, 1);
/* Idle & reset engine */
wait_for_idle(par);
aty_reset_engine(par);
/* Blank display and LCD */
atyfb_blank(FB_BLANK_POWERDOWN, info);
par->asleep = 1;
par->lock_blank = 1;
/*
* Because we may change PCI D state ourselves, we need to
* first save the config space content so the core can
* restore it properly on resume.
*/
pci_save_state(pdev);
#ifdef CONFIG_PPC_PMAC
/* Set chip to "suspend" mode */
if (machine_is(powermac) && aty_power_mgmt(1, par)) {
par->asleep = 0;
par->lock_blank = 0;
atyfb_blank(FB_BLANK_UNBLANK, info);
fb_set_suspend(info, 0);
console_unlock();
return -EIO;
}
#else
pci_set_power_state(pdev, pci_choose_state(pdev, state));
#endif
console_unlock();
pdev->dev.power.power_state = state;
return 0;
}
static void aty_resume_chip(struct fb_info *info)
{
struct atyfb_par *par = info->par;
aty_st_le32(MEM_CNTL, par->mem_cntl, par);
if (par->pll_ops->resume_pll)
par->pll_ops->resume_pll(info, &par->pll);
if (par->aux_start)
aty_st_le32(BUS_CNTL,
aty_ld_le32(BUS_CNTL, par) | BUS_APER_REG_DIS, par);
}
static int atyfb_pci_resume(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct atyfb_par *par = (struct atyfb_par *) info->par;
if (pdev->dev.power.power_state.event == PM_EVENT_ON)
return 0;
console_lock();
/*
* PCI state will have been restored by the core, so
* we should be in D0 now with our config space fully
* restored
*/
#ifdef CONFIG_PPC_PMAC
if (machine_is(powermac) &&
pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
aty_power_mgmt(0, par);
#endif
aty_resume_chip(info);
par->asleep = 0;
/* Restore display */
atyfb_set_par(info);
/* Refresh */
fb_set_suspend(info, 0);
/* Unblank */
par->lock_blank = 0;
atyfb_blank(FB_BLANK_UNBLANK, info);
console_unlock();
pdev->dev.power.power_state = PMSG_ON;
return 0;
}
#endif /* defined(CONFIG_PM) && defined(CONFIG_PCI) */
/* Backlight */
#ifdef CONFIG_FB_ATY_BACKLIGHT
#define MAX_LEVEL 0xFF
static int aty_bl_get_level_brightness(struct atyfb_par *par, int level)
{
struct fb_info *info = pci_get_drvdata(par->pdev);
int atylevel;
/* Get and convert the value */
/* No locking of bl_curve since we read a single value */
atylevel = info->bl_curve[level] * FB_BACKLIGHT_MAX / MAX_LEVEL;
if (atylevel < 0)
atylevel = 0;
else if (atylevel > MAX_LEVEL)
atylevel = MAX_LEVEL;
return atylevel;
}
static int aty_bl_update_status(struct backlight_device *bd)
{
struct atyfb_par *par = bl_get_data(bd);
unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par);
int level;
if (bd->props.power != FB_BLANK_UNBLANK ||
bd->props.fb_blank != FB_BLANK_UNBLANK)
level = 0;
else
level = bd->props.brightness;
reg |= (BLMOD_EN | BIASMOD_EN);
if (level > 0) {
reg &= ~BIAS_MOD_LEVEL_MASK;
reg |= (aty_bl_get_level_brightness(par, level) << BIAS_MOD_LEVEL_SHIFT);
} else {
reg &= ~BIAS_MOD_LEVEL_MASK;
reg |= (aty_bl_get_level_brightness(par, 0) << BIAS_MOD_LEVEL_SHIFT);
}
aty_st_lcd(LCD_MISC_CNTL, reg, par);
return 0;
}
static int aty_bl_get_brightness(struct backlight_device *bd)
{
return bd->props.brightness;
}
static const struct backlight_ops aty_bl_data = {
.get_brightness = aty_bl_get_brightness,
.update_status = aty_bl_update_status,
};
static void aty_bl_init(struct atyfb_par *par)
{
struct backlight_properties props;
struct fb_info *info = pci_get_drvdata(par->pdev);
struct backlight_device *bd;
char name[12];
#ifdef CONFIG_PMAC_BACKLIGHT
if (!pmac_has_backlight_type("ati"))
return;
#endif
snprintf(name, sizeof(name), "atybl%d", info->node);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
bd = backlight_device_register(name, info->dev, par, &aty_bl_data,
&props);
if (IS_ERR(bd)) {
info->bl_dev = NULL;
printk(KERN_WARNING "aty: Backlight registration failed\n");
goto error;
}
info->bl_dev = bd;
fb_bl_default_curve(info, 0,
0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL,
0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL);
bd->props.brightness = bd->props.max_brightness;
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
printk("aty: Backlight initialized (%s)\n", name);
return;
error:
return;
}
#ifdef CONFIG_PCI
static void aty_bl_exit(struct backlight_device *bd)
{
backlight_device_unregister(bd);
printk("aty: Backlight unloaded\n");
}
#endif /* CONFIG_PCI */
#endif /* CONFIG_FB_ATY_BACKLIGHT */
static void __devinit aty_calc_mem_refresh(struct atyfb_par *par, int xclk)
{
const int ragepro_tbl[] = {
44, 50, 55, 66, 75, 80, 100
};
const int ragexl_tbl[] = {
50, 66, 75, 83, 90, 95, 100, 105,
110, 115, 120, 125, 133, 143, 166
};
const int *refresh_tbl;
int i, size;
if (M64_HAS(XL_MEM)) {
refresh_tbl = ragexl_tbl;
size = ARRAY_SIZE(ragexl_tbl);
} else {
refresh_tbl = ragepro_tbl;
size = ARRAY_SIZE(ragepro_tbl);
}
for (i = 0; i < size; i++) {
if (xclk < refresh_tbl[i])
break;
}
par->mem_refresh_rate = i;
}
/*
* Initialisation
*/
static struct fb_info *fb_list = NULL;
#if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD)
static int __devinit atyfb_get_timings_from_lcd(struct atyfb_par *par,
struct fb_var_screeninfo *var)
{
int ret = -EINVAL;
if (par->lcd_table != 0 && (aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
*var = default_var;
var->xres = var->xres_virtual = par->lcd_hdisp;
var->right_margin = par->lcd_right_margin;
var->left_margin = par->lcd_hblank_len -
(par->lcd_right_margin + par->lcd_hsync_dly +
par->lcd_hsync_len);
var->hsync_len = par->lcd_hsync_len + par->lcd_hsync_dly;
var->yres = var->yres_virtual = par->lcd_vdisp;
var->lower_margin = par->lcd_lower_margin;
var->upper_margin = par->lcd_vblank_len -
(par->lcd_lower_margin + par->lcd_vsync_len);
var->vsync_len = par->lcd_vsync_len;
var->pixclock = par->lcd_pixclock;
ret = 0;
}
return ret;
}
#endif /* defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD) */
static int __devinit aty_init(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
const char *ramname = NULL, *xtal;
int gtb_memsize, has_var = 0;
struct fb_var_screeninfo var;
int ret;
init_waitqueue_head(&par->vblank.wait);
spin_lock_init(&par->int_lock);
#ifdef CONFIG_FB_ATY_GX
if (!M64_HAS(INTEGRATED)) {
u32 stat0;
u8 dac_type, dac_subtype, clk_type;
stat0 = aty_ld_le32(CNFG_STAT0, par);
par->bus_type = (stat0 >> 0) & 0x07;
par->ram_type = (stat0 >> 3) & 0x07;
ramname = aty_gx_ram[par->ram_type];
/* FIXME: clockchip/RAMDAC probing? */
dac_type = (aty_ld_le32(DAC_CNTL, par) >> 16) & 0x07;
#ifdef CONFIG_ATARI
clk_type = CLK_ATI18818_1;
dac_type = (stat0 >> 9) & 0x07;
if (dac_type == 0x07)
dac_subtype = DAC_ATT20C408;
else
dac_subtype = (aty_ld_8(SCRATCH_REG1 + 1, par) & 0xF0) | dac_type;
#else
dac_type = DAC_IBMRGB514;
dac_subtype = DAC_IBMRGB514;
clk_type = CLK_IBMRGB514;
#endif
switch (dac_subtype) {
case DAC_IBMRGB514:
par->dac_ops = &aty_dac_ibm514;
break;
#ifdef CONFIG_ATARI
case DAC_ATI68860_B:
case DAC_ATI68860_C:
par->dac_ops = &aty_dac_ati68860b;
break;
case DAC_ATT20C408:
case DAC_ATT21C498:
par->dac_ops = &aty_dac_att21c498;
break;
#endif
default:
PRINTKI("aty_init: DAC type not implemented yet!\n");
par->dac_ops = &aty_dac_unsupported;
break;
}
switch (clk_type) {
#ifdef CONFIG_ATARI
case CLK_ATI18818_1:
par->pll_ops = &aty_pll_ati18818_1;
break;
#else
case CLK_IBMRGB514:
par->pll_ops = &aty_pll_ibm514;
break;
#endif
#if 0 /* dead code */
case CLK_STG1703:
par->pll_ops = &aty_pll_stg1703;
break;
case CLK_CH8398:
par->pll_ops = &aty_pll_ch8398;
break;
case CLK_ATT20C408:
par->pll_ops = &aty_pll_att20c408;
break;
#endif
default:
PRINTKI("aty_init: CLK type not implemented yet!");
par->pll_ops = &aty_pll_unsupported;
break;
}
}
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
if (M64_HAS(INTEGRATED)) {
par->dac_ops = &aty_dac_ct;
par->pll_ops = &aty_pll_ct;
par->bus_type = PCI;
par->ram_type = (aty_ld_le32(CNFG_STAT0, par) & 0x07);
if (M64_HAS(XL_MEM))
ramname = aty_xl_ram[par->ram_type];
else
ramname = aty_ct_ram[par->ram_type];
/* for many chips, the mclk is 67 MHz for SDRAM, 63 MHz otherwise */
if (par->pll_limits.mclk == 67 && par->ram_type < SDRAM)
par->pll_limits.mclk = 63;
/* Mobility + 32bit memory interface need halved XCLK. */
if (M64_HAS(MOBIL_BUS) && par->ram_type == SDRAM32)
par->pll_limits.xclk = (par->pll_limits.xclk + 1) >> 1;
}
#endif
#ifdef CONFIG_PPC_PMAC
/*
* The Apple iBook1 uses non-standard memory frequencies.
* We detect it and set the frequency manually.
*/
if (of_machine_is_compatible("PowerBook2,1")) {
par->pll_limits.mclk = 70;
par->pll_limits.xclk = 53;
}
#endif
/* Allow command line to override clocks. */
if (pll)
par->pll_limits.pll_max = pll;
if (mclk)
par->pll_limits.mclk = mclk;
if (xclk)
par->pll_limits.xclk = xclk;
aty_calc_mem_refresh(par, par->pll_limits.xclk);
par->pll_per = 1000000/par->pll_limits.pll_max;
par->mclk_per = 1000000/par->pll_limits.mclk;
par->xclk_per = 1000000/par->pll_limits.xclk;
par->ref_clk_per = 1000000000000ULL / 14318180;
xtal = "14.31818";
#ifdef CONFIG_FB_ATY_CT
if (M64_HAS(GTB_DSP)) {
u8 pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
if (pll_ref_div) {
int diff1, diff2;
diff1 = 510 * 14 / pll_ref_div - par->pll_limits.pll_max;
diff2 = 510 * 29 / pll_ref_div - par->pll_limits.pll_max;
if (diff1 < 0)
diff1 = -diff1;
if (diff2 < 0)
diff2 = -diff2;
if (diff2 < diff1) {
par->ref_clk_per = 1000000000000ULL / 29498928;
xtal = "29.498928";
}
}
}
#endif /* CONFIG_FB_ATY_CT */
/* save previous video mode */
aty_get_crtc(par, &par->saved_crtc);
if (par->pll_ops->get_pll)
par->pll_ops->get_pll(info, &par->saved_pll);
par->mem_cntl = aty_ld_le32(MEM_CNTL, par);
gtb_memsize = M64_HAS(GTB_DSP);
if (gtb_memsize)
/* 0xF used instead of MEM_SIZE_ALIAS */
switch (par->mem_cntl & 0xF) {
case MEM_SIZE_512K:
info->fix.smem_len = 0x80000;
break;
case MEM_SIZE_1M:
info->fix.smem_len = 0x100000;
break;
case MEM_SIZE_2M_GTB:
info->fix.smem_len = 0x200000;
break;
case MEM_SIZE_4M_GTB:
info->fix.smem_len = 0x400000;
break;
case MEM_SIZE_6M_GTB:
info->fix.smem_len = 0x600000;
break;
case MEM_SIZE_8M_GTB:
info->fix.smem_len = 0x800000;
break;
default:
info->fix.smem_len = 0x80000;
} else
switch (par->mem_cntl & MEM_SIZE_ALIAS) {
case MEM_SIZE_512K:
info->fix.smem_len = 0x80000;
break;
case MEM_SIZE_1M:
info->fix.smem_len = 0x100000;
break;
case MEM_SIZE_2M:
info->fix.smem_len = 0x200000;
break;
case MEM_SIZE_4M:
info->fix.smem_len = 0x400000;
break;
case MEM_SIZE_6M:
info->fix.smem_len = 0x600000;
break;
case MEM_SIZE_8M:
info->fix.smem_len = 0x800000;
break;
default:
info->fix.smem_len = 0x80000;
}
if (M64_HAS(MAGIC_VRAM_SIZE)) {
if (aty_ld_le32(CNFG_STAT1, par) & 0x40000000)
info->fix.smem_len += 0x400000;
}
if (vram) {
info->fix.smem_len = vram * 1024;
par->mem_cntl &= ~(gtb_memsize ? 0xF : MEM_SIZE_ALIAS);
if (info->fix.smem_len <= 0x80000)
par->mem_cntl |= MEM_SIZE_512K;
else if (info->fix.smem_len <= 0x100000)
par->mem_cntl |= MEM_SIZE_1M;
else if (info->fix.smem_len <= 0x200000)
par->mem_cntl |= gtb_memsize ? MEM_SIZE_2M_GTB : MEM_SIZE_2M;
else if (info->fix.smem_len <= 0x400000)
par->mem_cntl |= gtb_memsize ? MEM_SIZE_4M_GTB : MEM_SIZE_4M;
else if (info->fix.smem_len <= 0x600000)
par->mem_cntl |= gtb_memsize ? MEM_SIZE_6M_GTB : MEM_SIZE_6M;
else
par->mem_cntl |= gtb_memsize ? MEM_SIZE_8M_GTB : MEM_SIZE_8M;
aty_st_le32(MEM_CNTL, par->mem_cntl, par);
}
/*
* Reg Block 0 (CT-compatible block) is at mmio_start
* Reg Block 1 (multimedia extensions) is at mmio_start - 0x400
*/
if (M64_HAS(GX)) {
info->fix.mmio_len = 0x400;
info->fix.accel = FB_ACCEL_ATI_MACH64GX;
} else if (M64_HAS(CT)) {
info->fix.mmio_len = 0x400;
info->fix.accel = FB_ACCEL_ATI_MACH64CT;
} else if (M64_HAS(VT)) {
info->fix.mmio_start -= 0x400;
info->fix.mmio_len = 0x800;
info->fix.accel = FB_ACCEL_ATI_MACH64VT;
} else {/* GT */
info->fix.mmio_start -= 0x400;
info->fix.mmio_len = 0x800;
info->fix.accel = FB_ACCEL_ATI_MACH64GT;
}
PRINTKI("%d%c %s, %s MHz XTAL, %d MHz PLL, %d Mhz MCLK, %d MHz XCLK\n",
info->fix.smem_len == 0x80000 ? 512 : (info->fix.smem_len>>20),
info->fix.smem_len == 0x80000 ? 'K' : 'M', ramname, xtal,
par->pll_limits.pll_max, par->pll_limits.mclk,
par->pll_limits.xclk);
#if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
if (M64_HAS(INTEGRATED)) {
int i;
printk("debug atyfb: BUS_CNTL DAC_CNTL MEM_CNTL "
"EXT_MEM_CNTL CRTC_GEN_CNTL DSP_CONFIG "
"DSP_ON_OFF CLOCK_CNTL\n"
"debug atyfb: %08x %08x %08x "
"%08x %08x %08x "
"%08x %08x\n"
"debug atyfb: PLL",
aty_ld_le32(BUS_CNTL, par),
aty_ld_le32(DAC_CNTL, par),
aty_ld_le32(MEM_CNTL, par),
aty_ld_le32(EXT_MEM_CNTL, par),
aty_ld_le32(CRTC_GEN_CNTL, par),
aty_ld_le32(DSP_CONFIG, par),
aty_ld_le32(DSP_ON_OFF, par),
aty_ld_le32(CLOCK_CNTL, par));
for (i = 0; i < 40; i++)
printk(" %02x", aty_ld_pll_ct(i, par));
printk("\n");
}
#endif
if (par->pll_ops->init_pll)
par->pll_ops->init_pll(info, &par->pll);
if (par->pll_ops->resume_pll)
par->pll_ops->resume_pll(info, &par->pll);
/*
* Last page of 8 MB (4 MB on ISA) aperture is MMIO,
* unless the auxiliary register aperture is used.
*/
if (!par->aux_start &&
(info->fix.smem_len == 0x800000 ||
(par->bus_type == ISA && info->fix.smem_len == 0x400000)))
info->fix.smem_len -= GUI_RESERVE;
/*
* Disable register access through the linear aperture
* if the auxiliary aperture is used so we can access
* the full 8 MB of video RAM on 8 MB boards.
*/
if (par->aux_start)
aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) |
BUS_APER_REG_DIS, par);
#ifdef CONFIG_MTRR
par->mtrr_aper = -1;
par->mtrr_reg = -1;
if (!nomtrr) {
/* Cover the whole resource. */
par->mtrr_aper = mtrr_add(par->res_start, par->res_size,
MTRR_TYPE_WRCOMB, 1);
if (par->mtrr_aper >= 0 && !par->aux_start) {
/* Make a hole for mmio. */
par->mtrr_reg = mtrr_add(par->res_start + 0x800000 -
GUI_RESERVE, GUI_RESERVE,
MTRR_TYPE_UNCACHABLE, 1);
if (par->mtrr_reg < 0) {
mtrr_del(par->mtrr_aper, 0, 0);
par->mtrr_aper = -1;
}
}
}
#endif
info->fbops = &atyfb_ops;
info->pseudo_palette = par->pseudo_palette;
info->flags = FBINFO_DEFAULT |
FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_YPAN;
#ifdef CONFIG_PMAC_BACKLIGHT
if (M64_HAS(G3_PB_1_1) && of_machine_is_compatible("PowerBook1,1")) {
/*
* these bits let the 101 powerbook
* wake up from sleep -- paulus
*/
aty_st_lcd(POWER_MANAGEMENT, aty_ld_lcd(POWER_MANAGEMENT, par) |
USE_F32KHZ | TRISTATE_MEM_EN, par);
} else
#endif
if (M64_HAS(MOBIL_BUS) && backlight) {
#ifdef CONFIG_FB_ATY_BACKLIGHT
aty_bl_init(par);
#endif
}
memset(&var, 0, sizeof(var));
#ifdef CONFIG_PPC
if (machine_is(powermac)) {
/*
* FIXME: The NVRAM stuff should be put in a Mac-specific file,
* as it applies to all Mac video cards
*/
if (mode) {
if (mac_find_mode(&var, info, mode, 8))
has_var = 1;
} else {
if (default_vmode == VMODE_CHOOSE) {
int sense;
if (M64_HAS(G3_PB_1024x768))
/* G3 PowerBook with 1024x768 LCD */
default_vmode = VMODE_1024_768_60;
else if (of_machine_is_compatible("iMac"))
default_vmode = VMODE_1024_768_75;
else if (of_machine_is_compatible("PowerBook2,1"))
/* iBook with 800x600 LCD */
default_vmode = VMODE_800_600_60;
else
default_vmode = VMODE_640_480_67;
sense = read_aty_sense(par);
PRINTKI("monitor sense=%x, mode %d\n",
sense, mac_map_monitor_sense(sense));
}
if (default_vmode <= 0 || default_vmode > VMODE_MAX)
default_vmode = VMODE_640_480_60;
if (default_cmode < CMODE_8 || default_cmode > CMODE_32)
default_cmode = CMODE_8;
if (!mac_vmode_to_var(default_vmode, default_cmode,
&var))
has_var = 1;
}
}
#endif /* !CONFIG_PPC */
#if defined(__i386__) && defined(CONFIG_FB_ATY_GENERIC_LCD)
if (!atyfb_get_timings_from_lcd(par, &var))
has_var = 1;
#endif
if (mode && fb_find_mode(&var, info, mode, NULL, 0, &defmode, 8))
has_var = 1;
if (!has_var)
var = default_var;
if (noaccel)
var.accel_flags &= ~FB_ACCELF_TEXT;
else
var.accel_flags |= FB_ACCELF_TEXT;
if (comp_sync != -1) {
if (!comp_sync)
var.sync &= ~FB_SYNC_COMP_HIGH_ACT;
else
var.sync |= FB_SYNC_COMP_HIGH_ACT;
}
if (var.yres == var.yres_virtual) {
u32 videoram = (info->fix.smem_len - (PAGE_SIZE << 2));
var.yres_virtual = ((videoram * 8) / var.bits_per_pixel) / var.xres_virtual;
if (var.yres_virtual < var.yres)
var.yres_virtual = var.yres;
}
ret = atyfb_check_var(&var, info);
if (ret) {
PRINTKE("can't set default video mode\n");
goto aty_init_exit;
}
#ifdef CONFIG_FB_ATY_CT
if (!noaccel && M64_HAS(INTEGRATED))
aty_init_cursor(info);
#endif /* CONFIG_FB_ATY_CT */
info->var = var;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret < 0)
goto aty_init_exit;
ret = register_framebuffer(info);
if (ret < 0) {
fb_dealloc_cmap(&info->cmap);
goto aty_init_exit;
}
fb_list = info;
PRINTKI("fb%d: %s frame buffer device on %s\n",
info->node, info->fix.id, par->bus_type == ISA ? "ISA" : "PCI");
return 0;
aty_init_exit:
/* restore video mode */
aty_set_crtc(par, &par->saved_crtc);
par->pll_ops->set_pll(info, &par->saved_pll);
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
}
if (par->mtrr_aper >= 0) {
mtrr_del(par->mtrr_aper, 0, 0);
par->mtrr_aper = -1;
}
#endif
return ret;
}
#if defined(CONFIG_ATARI) && !defined(MODULE)
static int __devinit store_video_par(char *video_str, unsigned char m64_num)
{
char *p;
unsigned long vmembase, size, guiregbase;
PRINTKI("store_video_par() '%s' \n", video_str);
if (!(p = strsep(&video_str, ";")) || !*p)
goto mach64_invalid;
vmembase = simple_strtoul(p, NULL, 0);
if (!(p = strsep(&video_str, ";")) || !*p)
goto mach64_invalid;
size = simple_strtoul(p, NULL, 0);
if (!(p = strsep(&video_str, ";")) || !*p)
goto mach64_invalid;
guiregbase = simple_strtoul(p, NULL, 0);
phys_vmembase[m64_num] = vmembase;
phys_size[m64_num] = size;
phys_guiregbase[m64_num] = guiregbase;
PRINTKI("stored them all: $%08lX $%08lX $%08lX \n", vmembase, size,
guiregbase);
return 0;
mach64_invalid:
phys_vmembase[m64_num] = 0;
return -1;
}
#endif /* CONFIG_ATARI && !MODULE */
/*
* Blank the display.
*/
static int atyfb_blank(int blank, struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 gen_cntl;
if (par->lock_blank || par->asleep)
return 0;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table && blank > FB_BLANK_NORMAL &&
(aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
pm &= ~PWR_BLON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
}
#endif
gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
gen_cntl &= ~0x400004c;
switch (blank) {
case FB_BLANK_UNBLANK:
break;
case FB_BLANK_NORMAL:
gen_cntl |= 0x4000040;
break;
case FB_BLANK_VSYNC_SUSPEND:
gen_cntl |= 0x4000048;
break;
case FB_BLANK_HSYNC_SUSPEND:
gen_cntl |= 0x4000044;
break;
case FB_BLANK_POWERDOWN:
gen_cntl |= 0x400004c;
break;
}
aty_st_le32(CRTC_GEN_CNTL, gen_cntl, par);
#ifdef CONFIG_FB_ATY_GENERIC_LCD
if (par->lcd_table && blank <= FB_BLANK_NORMAL &&
(aty_ld_lcd(LCD_GEN_CNTL, par) & LCD_ON)) {
u32 pm = aty_ld_lcd(POWER_MANAGEMENT, par);
pm |= PWR_BLON;
aty_st_lcd(POWER_MANAGEMENT, pm, par);
}
#endif
return 0;
}
static void aty_st_pal(u_int regno, u_int red, u_int green, u_int blue,
const struct atyfb_par *par)
{
aty_st_8(DAC_W_INDEX, regno, par);
aty_st_8(DAC_DATA, red, par);
aty_st_8(DAC_DATA, green, par);
aty_st_8(DAC_DATA, blue, par);
}
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
* entries in the var structure). Return != 0 for invalid regno.
* !! 4 & 8 = PSEUDO, > 8 = DIRECTCOLOR
*/
static int atyfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
int i, depth;
u32 *pal = info->pseudo_palette;
depth = info->var.bits_per_pixel;
if (depth == 16)
depth = (info->var.green.length == 5) ? 15 : 16;
if (par->asleep)
return 0;
if (regno > 255 ||
(depth == 16 && regno > 63) ||
(depth == 15 && regno > 31))
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
par->palette[regno].red = red;
par->palette[regno].green = green;
par->palette[regno].blue = blue;
if (regno < 16) {
switch (depth) {
case 15:
pal[regno] = (regno << 10) | (regno << 5) | regno;
break;
case 16:
pal[regno] = (regno << 11) | (regno << 5) | regno;
break;
case 24:
pal[regno] = (regno << 16) | (regno << 8) | regno;
break;
case 32:
i = (regno << 8) | regno;
pal[regno] = (i << 16) | i;
break;
}
}
i = aty_ld_8(DAC_CNTL, par) & 0xfc;
if (M64_HAS(EXTRA_BRIGHT))
i |= 0x2; /* DAC_CNTL | 0x2 turns off the extra brightness for gt */
aty_st_8(DAC_CNTL, i, par);
aty_st_8(DAC_MASK, 0xff, par);
if (M64_HAS(INTEGRATED)) {
if (depth == 16) {
if (regno < 32)
aty_st_pal(regno << 3, red,
par->palette[regno << 1].green,
blue, par);
red = par->palette[regno >> 1].red;
blue = par->palette[regno >> 1].blue;
regno <<= 2;
} else if (depth == 15) {
regno <<= 3;
for (i = 0; i < 8; i++)
aty_st_pal(regno + i, red, green, blue, par);
}
}
aty_st_pal(regno, red, green, blue, par);
return 0;
}
#ifdef CONFIG_PCI
#ifdef __sparc__
static int __devinit atyfb_setup_sparc(struct pci_dev *pdev,
struct fb_info *info,
unsigned long addr)
{
struct atyfb_par *par = info->par;
struct device_node *dp;
u32 mem, chip_id;
int i, j, ret;
/*
* Map memory-mapped registers.
*/
par->ati_regbase = (void *)addr + 0x7ffc00UL;
info->fix.mmio_start = addr + 0x7ffc00UL;
/*
* Map in big-endian aperture.
*/
info->screen_base = (char *) (addr + 0x800000UL);
info->fix.smem_start = addr + 0x800000UL;
/*
* Figure mmap addresses from PCI config space.
* Split Framebuffer in big- and little-endian halfs.
*/
for (i = 0; i < 6 && pdev->resource[i].start; i++)
/* nothing */ ;
j = i + 4;
par->mmap_map = kcalloc(j, sizeof(*par->mmap_map), GFP_ATOMIC);
if (!par->mmap_map) {
PRINTKE("atyfb_setup_sparc() can't alloc mmap_map\n");
return -ENOMEM;
}
for (i = 0, j = 2; i < 6 && pdev->resource[i].start; i++) {
struct resource *rp = &pdev->resource[i];
int io, breg = PCI_BASE_ADDRESS_0 + (i << 2);
unsigned long base;
u32 size, pbase;
base = rp->start;
io = (rp->flags & IORESOURCE_IO);
size = rp->end - base + 1;
pci_read_config_dword(pdev, breg, &pbase);
if (io)
size &= ~1;
/*
* Map the framebuffer a second time, this time without
* the braindead _PAGE_IE setting. This is used by the
* fixed Xserver, but we need to maintain the old mapping
* to stay compatible with older ones...
*/
if (base == addr) {
par->mmap_map[j].voff = (pbase + 0x10000000) & PAGE_MASK;
par->mmap_map[j].poff = base & PAGE_MASK;
par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK;
par->mmap_map[j].prot_mask = _PAGE_CACHE;
par->mmap_map[j].prot_flag = _PAGE_E;
j++;
}
/*
* Here comes the old framebuffer mapping with _PAGE_IE
* set for the big endian half of the framebuffer...
*/
if (base == addr) {
par->mmap_map[j].voff = (pbase + 0x800000) & PAGE_MASK;
par->mmap_map[j].poff = (base + 0x800000) & PAGE_MASK;
par->mmap_map[j].size = 0x800000;
par->mmap_map[j].prot_mask = _PAGE_CACHE;
par->mmap_map[j].prot_flag = _PAGE_E | _PAGE_IE;
size -= 0x800000;
j++;
}
par->mmap_map[j].voff = pbase & PAGE_MASK;
par->mmap_map[j].poff = base & PAGE_MASK;
par->mmap_map[j].size = (size + ~PAGE_MASK) & PAGE_MASK;
par->mmap_map[j].prot_mask = _PAGE_CACHE;
par->mmap_map[j].prot_flag = _PAGE_E;
j++;
}
ret = correct_chipset(par);
if (ret)
return ret;
if (IS_XL(pdev->device)) {
/*
* Fix PROMs idea of MEM_CNTL settings...
*/
mem = aty_ld_le32(MEM_CNTL, par);
chip_id = aty_ld_le32(CNFG_CHIP_ID, par);
if (((chip_id & CFG_CHIP_TYPE) == VT_CHIP_ID) && !((chip_id >> 24) & 1)) {
switch (mem & 0x0f) {
case 3:
mem = (mem & ~(0x0f)) | 2;
break;
case 7:
mem = (mem & ~(0x0f)) | 3;
break;
case 9:
mem = (mem & ~(0x0f)) | 4;
break;
case 11:
mem = (mem & ~(0x0f)) | 5;
break;
default:
break;
}
if ((aty_ld_le32(CNFG_STAT0, par) & 7) >= SDRAM)
mem &= ~(0x00700000);
}
mem &= ~(0xcf80e000); /* Turn off all undocumented bits. */
aty_st_le32(MEM_CNTL, mem, par);
}
dp = pci_device_to_OF_node(pdev);
if (dp == of_console_device) {
struct fb_var_screeninfo *var = &default_var;
unsigned int N, P, Q, M, T, R;
u32 v_total, h_total;
struct crtc crtc;
u8 pll_regs[16];
u8 clock_cntl;
crtc.vxres = of_getintprop_default(dp, "width", 1024);
crtc.vyres = of_getintprop_default(dp, "height", 768);
var->bits_per_pixel = of_getintprop_default(dp, "depth", 8);
var->xoffset = var->yoffset = 0;
crtc.h_tot_disp = aty_ld_le32(CRTC_H_TOTAL_DISP, par);
crtc.h_sync_strt_wid = aty_ld_le32(CRTC_H_SYNC_STRT_WID, par);
crtc.v_tot_disp = aty_ld_le32(CRTC_V_TOTAL_DISP, par);
crtc.v_sync_strt_wid = aty_ld_le32(CRTC_V_SYNC_STRT_WID, par);
crtc.gen_cntl = aty_ld_le32(CRTC_GEN_CNTL, par);
aty_crtc_to_var(&crtc, var);
h_total = var->xres + var->right_margin + var->hsync_len + var->left_margin;
v_total = var->yres + var->lower_margin + var->vsync_len + var->upper_margin;
/*
* Read the PLL to figure actual Refresh Rate.
*/
clock_cntl = aty_ld_8(CLOCK_CNTL, par);
/* DPRINTK("CLOCK_CNTL %02x\n", clock_cntl); */
for (i = 0; i < 16; i++)
pll_regs[i] = aty_ld_pll_ct(i, par);
/*
* PLL Reference Divider M:
*/
M = pll_regs[2];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
N = pll_regs[7 + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
/*
* PLL Divider Q:
*/
Q = N / P;
/*
* Target Frequency:
*
* T * M
* Q = -------
* 2 * R
*
* where R is XTALIN (= 14318 or 29498 kHz).
*/
if (IS_XL(pdev->device))
R = 29498;
else
R = 14318;
T = 2 * Q * R / M;
default_var.pixclock = 1000000000 / T;
}
return 0;
}
#else /* __sparc__ */
#ifdef __i386__
#ifdef CONFIG_FB_ATY_GENERIC_LCD
static void __devinit aty_init_lcd(struct atyfb_par *par, u32 bios_base)
{
u32 driv_inf_tab, sig;
u16 lcd_ofs;
/*
* To support an LCD panel, we should know it's dimensions and
* it's desired pixel clock.
* There are two ways to do it:
* - Check the startup video mode and calculate the panel
* size from it. This is unreliable.
* - Read it from the driver information table in the video BIOS.
*/
/* Address of driver information table is at offset 0x78. */
driv_inf_tab = bios_base + *((u16 *)(bios_base+0x78));
/* Check for the driver information table signature. */
sig = *(u32 *)driv_inf_tab;
if ((sig == 0x54504c24) || /* Rage LT pro */
(sig == 0x544d5224) || /* Rage mobility */
(sig == 0x54435824) || /* Rage XC */
(sig == 0x544c5824)) { /* Rage XL */
PRINTKI("BIOS contains driver information table.\n");
lcd_ofs = *(u16 *)(driv_inf_tab + 10);
par->lcd_table = 0;
if (lcd_ofs != 0)
par->lcd_table = bios_base + lcd_ofs;
}
if (par->lcd_table != 0) {
char model[24];
char strbuf[16];
char refresh_rates_buf[100];
int id, tech, f, i, m, default_refresh_rate;
char *txtcolour;
char *txtmonitor;
char *txtdual;
char *txtformat;
u16 width, height, panel_type, refresh_rates;
u16 *lcdmodeptr;
u32 format;
u8 lcd_refresh_rates[16] = { 50, 56, 60, 67, 70, 72, 75, 76, 85,
90, 100, 120, 140, 150, 160, 200 };
/*
* The most important information is the panel size at
* offset 25 and 27, but there's some other nice information
* which we print to the screen.
*/
id = *(u8 *)par->lcd_table;
strncpy(model, (char *)par->lcd_table+1, 24);
model[23] = 0;
width = par->lcd_width = *(u16 *)(par->lcd_table+25);
height = par->lcd_height = *(u16 *)(par->lcd_table+27);
panel_type = *(u16 *)(par->lcd_table+29);
if (panel_type & 1)
txtcolour = "colour";
else
txtcolour = "monochrome";
if (panel_type & 2)
txtdual = "dual (split) ";
else
txtdual = "";
tech = (panel_type >> 2) & 63;
switch (tech) {
case 0:
txtmonitor = "passive matrix";
break;
case 1:
txtmonitor = "active matrix";
break;
case 2:
txtmonitor = "active addressed STN";
break;
case 3:
txtmonitor = "EL";
break;
case 4:
txtmonitor = "plasma";
break;
default:
txtmonitor = "unknown";
}
format = *(u32 *)(par->lcd_table+57);
if (tech == 0 || tech == 2) {
switch (format & 7) {
case 0:
txtformat = "12 bit interface";
break;
case 1:
txtformat = "16 bit interface";
break;
case 2:
txtformat = "24 bit interface";
break;
default:
txtformat = "unknown format";
}
} else {
switch (format & 7) {
case 0:
txtformat = "8 colours";
break;
case 1:
txtformat = "512 colours";
break;
case 2:
txtformat = "4096 colours";
break;
case 4:
txtformat = "262144 colours (LT mode)";
break;
case 5:
txtformat = "16777216 colours";
break;
case 6:
txtformat = "262144 colours (FDPI-2 mode)";
break;
default:
txtformat = "unknown format";
}
}
PRINTKI("%s%s %s monitor detected: %s\n",
txtdual, txtcolour, txtmonitor, model);
PRINTKI(" id=%d, %dx%d pixels, %s\n",
id, width, height, txtformat);
refresh_rates_buf[0] = 0;
refresh_rates = *(u16 *)(par->lcd_table+62);
m = 1;
f = 0;
for (i = 0; i < 16; i++) {
if (refresh_rates & m) {
if (f == 0) {
sprintf(strbuf, "%d",
lcd_refresh_rates[i]);
f++;
} else {
sprintf(strbuf, ",%d",
lcd_refresh_rates[i]);
}
strcat(refresh_rates_buf, strbuf);
}
m = m << 1;
}
default_refresh_rate = (*(u8 *)(par->lcd_table+61) & 0xf0) >> 4;
PRINTKI(" supports refresh rates [%s], default %d Hz\n",
refresh_rates_buf, lcd_refresh_rates[default_refresh_rate]);
par->lcd_refreshrate = lcd_refresh_rates[default_refresh_rate];
/*
* We now need to determine the crtc parameters for the
* LCD monitor. This is tricky, because they are not stored
* individually in the BIOS. Instead, the BIOS contains a
* table of display modes that work for this monitor.
*
* The idea is that we search for a mode of the same dimensions
* as the dimensions of the LCD monitor. Say our LCD monitor
* is 800x600 pixels, we search for a 800x600 monitor.
* The CRTC parameters we find here are the ones that we need
* to use to simulate other resolutions on the LCD screen.
*/
lcdmodeptr = (u16 *)(par->lcd_table + 64);
while (*lcdmodeptr != 0) {
u32 modeptr;
u16 mwidth, mheight, lcd_hsync_start, lcd_vsync_start;
modeptr = bios_base + *lcdmodeptr;
mwidth = *((u16 *)(modeptr+0));
mheight = *((u16 *)(modeptr+2));
if (mwidth == width && mheight == height) {
par->lcd_pixclock = 100000000 / *((u16 *)(modeptr+9));
par->lcd_htotal = *((u16 *)(modeptr+17)) & 511;
par->lcd_hdisp = *((u16 *)(modeptr+19)) & 511;
lcd_hsync_start = *((u16 *)(modeptr+21)) & 511;
par->lcd_hsync_dly = (*((u16 *)(modeptr+21)) >> 9) & 7;
par->lcd_hsync_len = *((u8 *)(modeptr+23)) & 63;
par->lcd_vtotal = *((u16 *)(modeptr+24)) & 2047;
par->lcd_vdisp = *((u16 *)(modeptr+26)) & 2047;
lcd_vsync_start = *((u16 *)(modeptr+28)) & 2047;
par->lcd_vsync_len = (*((u16 *)(modeptr+28)) >> 11) & 31;
par->lcd_htotal = (par->lcd_htotal + 1) * 8;
par->lcd_hdisp = (par->lcd_hdisp + 1) * 8;
lcd_hsync_start = (lcd_hsync_start + 1) * 8;
par->lcd_hsync_len = par->lcd_hsync_len * 8;
par->lcd_vtotal++;
par->lcd_vdisp++;
lcd_vsync_start++;
par->lcd_right_margin = lcd_hsync_start - par->lcd_hdisp;
par->lcd_lower_margin = lcd_vsync_start - par->lcd_vdisp;
par->lcd_hblank_len = par->lcd_htotal - par->lcd_hdisp;
par->lcd_vblank_len = par->lcd_vtotal - par->lcd_vdisp;
break;
}
lcdmodeptr++;
}
if (*lcdmodeptr == 0) {
PRINTKE("LCD monitor CRTC parameters not found!!!\n");
/* To do: Switch to CRT if possible. */
} else {
PRINTKI(" LCD CRTC parameters: %d.%d %d %d %d %d %d %d %d %d\n",
1000000 / par->lcd_pixclock, 1000000 % par->lcd_pixclock,
par->lcd_hdisp,
par->lcd_hdisp + par->lcd_right_margin,
par->lcd_hdisp + par->lcd_right_margin
+ par->lcd_hsync_dly + par->lcd_hsync_len,
par->lcd_htotal,
par->lcd_vdisp,
par->lcd_vdisp + par->lcd_lower_margin,
par->lcd_vdisp + par->lcd_lower_margin + par->lcd_vsync_len,
par->lcd_vtotal);
PRINTKI(" : %d %d %d %d %d %d %d %d %d\n",
par->lcd_pixclock,
par->lcd_hblank_len - (par->lcd_right_margin +
par->lcd_hsync_dly + par->lcd_hsync_len),
par->lcd_hdisp,
par->lcd_right_margin,
par->lcd_hsync_len,
par->lcd_vblank_len - (par->lcd_lower_margin + par->lcd_vsync_len),
par->lcd_vdisp,
par->lcd_lower_margin,
par->lcd_vsync_len);
}
}
}
#endif /* CONFIG_FB_ATY_GENERIC_LCD */
static int __devinit init_from_bios(struct atyfb_par *par)
{
u32 bios_base, rom_addr;
int ret;
rom_addr = 0xc0000 + ((aty_ld_le32(SCRATCH_REG1, par) & 0x7f) << 11);
bios_base = (unsigned long)ioremap(rom_addr, 0x10000);
/* The BIOS starts with 0xaa55. */
if (*((u16 *)bios_base) == 0xaa55) {
u8 *bios_ptr;
u16 rom_table_offset, freq_table_offset;
PLL_BLOCK_MACH64 pll_block;
PRINTKI("Mach64 BIOS is located at %x, mapped at %x.\n", rom_addr, bios_base);
/* check for frequncy table */
bios_ptr = (u8*)bios_base;
rom_table_offset = (u16)(bios_ptr[0x48] | (bios_ptr[0x49] << 8));
freq_table_offset = bios_ptr[rom_table_offset + 16] | (bios_ptr[rom_table_offset + 17] << 8);
memcpy(&pll_block, bios_ptr + freq_table_offset, sizeof(PLL_BLOCK_MACH64));
PRINTKI("BIOS frequency table:\n");
PRINTKI("PCLK_min_freq %d, PCLK_max_freq %d, ref_freq %d, ref_divider %d\n",
pll_block.PCLK_min_freq, pll_block.PCLK_max_freq,
pll_block.ref_freq, pll_block.ref_divider);
PRINTKI("MCLK_pwd %d, MCLK_max_freq %d, XCLK_max_freq %d, SCLK_freq %d\n",
pll_block.MCLK_pwd, pll_block.MCLK_max_freq,
pll_block.XCLK_max_freq, pll_block.SCLK_freq);
par->pll_limits.pll_min = pll_block.PCLK_min_freq/100;
par->pll_limits.pll_max = pll_block.PCLK_max_freq/100;
par->pll_limits.ref_clk = pll_block.ref_freq/100;
par->pll_limits.ref_div = pll_block.ref_divider;
par->pll_limits.sclk = pll_block.SCLK_freq/100;
par->pll_limits.mclk = pll_block.MCLK_max_freq/100;
par->pll_limits.mclk_pm = pll_block.MCLK_pwd/100;
par->pll_limits.xclk = pll_block.XCLK_max_freq/100;
#ifdef CONFIG_FB_ATY_GENERIC_LCD
aty_init_lcd(par, bios_base);
#endif
ret = 0;
} else {
PRINTKE("no BIOS frequency table found, use parameters\n");
ret = -ENXIO;
}
iounmap((void __iomem *)bios_base);
return ret;
}
#endif /* __i386__ */
static int __devinit atyfb_setup_generic(struct pci_dev *pdev,
struct fb_info *info,
unsigned long addr)
{
struct atyfb_par *par = info->par;
u16 tmp;
unsigned long raddr;
struct resource *rrp;
int ret = 0;
raddr = addr + 0x7ff000UL;
rrp = &pdev->resource[2];
if ((rrp->flags & IORESOURCE_MEM) && request_mem_region(rrp->start, rrp->end - rrp->start + 1, "atyfb")) {
par->aux_start = rrp->start;
par->aux_size = rrp->end - rrp->start + 1;
raddr = rrp->start;
PRINTKI("using auxiliary register aperture\n");
}
info->fix.mmio_start = raddr;
par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
if (par->ati_regbase == NULL)
return -ENOMEM;
info->fix.mmio_start += par->aux_start ? 0x400 : 0xc00;
par->ati_regbase += par->aux_start ? 0x400 : 0xc00;
/*
* Enable memory-space accesses using config-space
* command register.
*/
pci_read_config_word(pdev, PCI_COMMAND, &tmp);
if (!(tmp & PCI_COMMAND_MEMORY)) {
tmp |= PCI_COMMAND_MEMORY;
pci_write_config_word(pdev, PCI_COMMAND, tmp);
}
#ifdef __BIG_ENDIAN
/* Use the big-endian aperture */
addr += 0x800000;
#endif
/* Map in frame buffer */
info->fix.smem_start = addr;
info->screen_base = ioremap(addr, 0x800000);
if (info->screen_base == NULL) {
ret = -ENOMEM;
goto atyfb_setup_generic_fail;
}
ret = correct_chipset(par);
if (ret)
goto atyfb_setup_generic_fail;
#ifdef __i386__
ret = init_from_bios(par);
if (ret)
goto atyfb_setup_generic_fail;
#endif
if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN))
par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2;
else
par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U;
/* according to ATI, we should use clock 3 for acelerated mode */
par->clk_wr_offset = 3;
return 0;
atyfb_setup_generic_fail:
iounmap(par->ati_regbase);
par->ati_regbase = NULL;
if (info->screen_base) {
iounmap(info->screen_base);
info->screen_base = NULL;
}
return ret;
}
#endif /* !__sparc__ */
static int __devinit atyfb_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long addr, res_start, res_size;
struct fb_info *info;
struct resource *rp;
struct atyfb_par *par;
int rc = -ENOMEM;
/* Enable device in PCI config */
if (pci_enable_device(pdev)) {
PRINTKE("Cannot enable PCI device\n");
return -ENXIO;
}
/* Find which resource to use */
rp = &pdev->resource[0];
if (rp->flags & IORESOURCE_IO)
rp = &pdev->resource[1];
addr = rp->start;
if (!addr)
return -ENXIO;
/* Reserve space */
res_start = rp->start;
res_size = rp->end - rp->start + 1;
if (!request_mem_region(res_start, res_size, "atyfb"))
return -EBUSY;
/* Allocate framebuffer */
info = framebuffer_alloc(sizeof(struct atyfb_par), &pdev->dev);
if (!info) {
PRINTKE("atyfb_pci_probe() can't alloc fb_info\n");
return -ENOMEM;
}
par = info->par;
info->fix = atyfb_fix;
info->device = &pdev->dev;
par->pci_id = pdev->device;
par->res_start = res_start;
par->res_size = res_size;
par->irq = pdev->irq;
par->pdev = pdev;
/* Setup "info" structure */
#ifdef __sparc__
rc = atyfb_setup_sparc(pdev, info, addr);
#else
rc = atyfb_setup_generic(pdev, info, addr);
#endif
if (rc)
goto err_release_mem;
pci_set_drvdata(pdev, info);
/* Init chip & register framebuffer */
rc = aty_init(info);
if (rc)
goto err_release_io;
#ifdef __sparc__
/*
* Add /dev/fb mmap values.
*/
par->mmap_map[0].voff = 0x8000000000000000UL;
par->mmap_map[0].poff = (unsigned long) info->screen_base & PAGE_MASK;
par->mmap_map[0].size = info->fix.smem_len;
par->mmap_map[0].prot_mask = _PAGE_CACHE;
par->mmap_map[0].prot_flag = _PAGE_E;
par->mmap_map[1].voff = par->mmap_map[0].voff + info->fix.smem_len;
par->mmap_map[1].poff = (long)par->ati_regbase & PAGE_MASK;
par->mmap_map[1].size = PAGE_SIZE;
par->mmap_map[1].prot_mask = _PAGE_CACHE;
par->mmap_map[1].prot_flag = _PAGE_E;
#endif /* __sparc__ */
mutex_lock(&reboot_lock);
if (!reboot_info)
reboot_info = info;
mutex_unlock(&reboot_lock);
return 0;
err_release_io:
#ifdef __sparc__
kfree(par->mmap_map);
#else
if (par->ati_regbase)
iounmap(par->ati_regbase);
if (info->screen_base)
iounmap(info->screen_base);
#endif
err_release_mem:
if (par->aux_start)
release_mem_region(par->aux_start, par->aux_size);
release_mem_region(par->res_start, par->res_size);
framebuffer_release(info);
return rc;
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_ATARI
static int __init atyfb_atari_probe(void)
{
struct atyfb_par *par;
struct fb_info *info;
int m64_num;
u32 clock_r;
int num_found = 0;
for (m64_num = 0; m64_num < mach64_count; m64_num++) {
if (!phys_vmembase[m64_num] || !phys_size[m64_num] ||
!phys_guiregbase[m64_num]) {
PRINTKI("phys_*[%d] parameters not set => "
"returning early. \n", m64_num);
continue;
}
info = framebuffer_alloc(sizeof(struct atyfb_par), NULL);
if (!info) {
PRINTKE("atyfb_atari_probe() can't alloc fb_info\n");
return -ENOMEM;
}
par = info->par;
info->fix = atyfb_fix;
par->irq = (unsigned int) -1; /* something invalid */
/*
* Map the video memory (physical address given)
* to somewhere in the kernel address space.
*/
info->screen_base = ioremap(phys_vmembase[m64_num], phys_size[m64_num]);
info->fix.smem_start = (unsigned long)info->screen_base; /* Fake! */
par->ati_regbase = ioremap(phys_guiregbase[m64_num], 0x10000) +
0xFC00ul;
info->fix.mmio_start = (unsigned long)par->ati_regbase; /* Fake! */
aty_st_le32(CLOCK_CNTL, 0x12345678, par);
clock_r = aty_ld_le32(CLOCK_CNTL, par);
switch (clock_r & 0x003F) {
case 0x12:
par->clk_wr_offset = 3; /* */
break;
case 0x34:
par->clk_wr_offset = 2; /* Medusa ST-IO ISA Adapter etc. */
break;
case 0x16:
par->clk_wr_offset = 1; /* */
break;
case 0x38:
par->clk_wr_offset = 0; /* Panther 1 ISA Adapter (Gerald) */
break;
}
/* Fake pci_id for correct_chipset() */
switch (aty_ld_le32(CNFG_CHIP_ID, par) & CFG_CHIP_TYPE) {
case 0x00d7:
par->pci_id = PCI_CHIP_MACH64GX;
break;
case 0x0057:
par->pci_id = PCI_CHIP_MACH64CX;
break;
default:
break;
}
if (correct_chipset(par) || aty_init(info)) {
iounmap(info->screen_base);
iounmap(par->ati_regbase);
framebuffer_release(info);
} else {
num_found++;
}
}
return num_found ? 0 : -ENXIO;
}
#endif /* CONFIG_ATARI */
#ifdef CONFIG_PCI
static void __devexit atyfb_remove(struct fb_info *info)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
/* restore video mode */
aty_set_crtc(par, &par->saved_crtc);
par->pll_ops->set_pll(info, &par->saved_pll);
unregister_framebuffer(info);
#ifdef CONFIG_FB_ATY_BACKLIGHT
if (M64_HAS(MOBIL_BUS))
aty_bl_exit(info->bl_dev);
#endif
#ifdef CONFIG_MTRR
if (par->mtrr_reg >= 0) {
mtrr_del(par->mtrr_reg, 0, 0);
par->mtrr_reg = -1;
}
if (par->mtrr_aper >= 0) {
mtrr_del(par->mtrr_aper, 0, 0);
par->mtrr_aper = -1;
}
#endif
#ifndef __sparc__
if (par->ati_regbase)
iounmap(par->ati_regbase);
if (info->screen_base)
iounmap(info->screen_base);
#ifdef __BIG_ENDIAN
if (info->sprite.addr)
iounmap(info->sprite.addr);
#endif
#endif
#ifdef __sparc__
kfree(par->mmap_map);
#endif
if (par->aux_start)
release_mem_region(par->aux_start, par->aux_size);
if (par->res_start)
release_mem_region(par->res_start, par->res_size);
framebuffer_release(info);
}
static void __devexit atyfb_pci_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
mutex_lock(&reboot_lock);
if (reboot_info == info)
reboot_info = NULL;
mutex_unlock(&reboot_lock);
atyfb_remove(info);
}
static struct pci_device_id atyfb_pci_tbl[] = {
#ifdef CONFIG_FB_ATY_GX
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GX) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CX) },
#endif /* CONFIG_FB_ATY_GX */
#ifdef CONFIG_FB_ATY_CT
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64CT) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64ET) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LT) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VT) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GT) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VU) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GU) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LG) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64VV) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GV) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GW) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GY) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GZ) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GB) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GD) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GI) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GP) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GQ) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LB) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LD) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LI) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LP) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LQ) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GM) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GN) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GO) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GL) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GR) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64GS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LM) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LN) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LR) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_CHIP_MACH64LS) },
#endif /* CONFIG_FB_ATY_CT */
{ }
};
MODULE_DEVICE_TABLE(pci, atyfb_pci_tbl);
static struct pci_driver atyfb_driver = {
.name = "atyfb",
.id_table = atyfb_pci_tbl,
.probe = atyfb_pci_probe,
.remove = __devexit_p(atyfb_pci_remove),
#ifdef CONFIG_PM
.suspend = atyfb_pci_suspend,
.resume = atyfb_pci_resume,
#endif /* CONFIG_PM */
};
#endif /* CONFIG_PCI */
#ifndef MODULE
static int __init atyfb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!strncmp(this_opt, "noaccel", 7)) {
noaccel = 1;
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
nomtrr = 1;
#endif
} else if (!strncmp(this_opt, "vram:", 5))
vram = simple_strtoul(this_opt + 5, NULL, 0);
else if (!strncmp(this_opt, "pll:", 4))
pll = simple_strtoul(this_opt + 4, NULL, 0);
else if (!strncmp(this_opt, "mclk:", 5))
mclk = simple_strtoul(this_opt + 5, NULL, 0);
else if (!strncmp(this_opt, "xclk:", 5))
xclk = simple_strtoul(this_opt+5, NULL, 0);
else if (!strncmp(this_opt, "comp_sync:", 10))
comp_sync = simple_strtoul(this_opt+10, NULL, 0);
else if (!strncmp(this_opt, "backlight:", 10))
backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_PPC
else if (!strncmp(this_opt, "vmode:", 6)) {
unsigned int vmode =
simple_strtoul(this_opt + 6, NULL, 0);
if (vmode > 0 && vmode <= VMODE_MAX)
default_vmode = vmode;
} else if (!strncmp(this_opt, "cmode:", 6)) {
unsigned int cmode =
simple_strtoul(this_opt + 6, NULL, 0);
switch (cmode) {
case 0:
case 8:
default_cmode = CMODE_8;
break;
case 15:
case 16:
default_cmode = CMODE_16;
break;
case 24:
case 32:
default_cmode = CMODE_32;
break;
}
}
#endif
#ifdef CONFIG_ATARI
/*
* Why do we need this silly Mach64 argument?
* We are already here because of mach64= so its redundant.
*/
else if (MACH_IS_ATARI
&& (!strncmp(this_opt, "Mach64:", 7))) {
static unsigned char m64_num;
static char mach64_str[80];
strlcpy(mach64_str, this_opt + 7, sizeof(mach64_str));
if (!store_video_par(mach64_str, m64_num)) {
m64_num++;
mach64_count = m64_num;
}
}
#endif
else
mode = this_opt;
}
return 0;
}
#endif /* MODULE */
static int atyfb_reboot_notify(struct notifier_block *nb,
unsigned long code, void *unused)
{
struct atyfb_par *par;
if (code != SYS_RESTART)
return NOTIFY_DONE;
mutex_lock(&reboot_lock);
if (!reboot_info)
goto out;
if (!lock_fb_info(reboot_info))
goto out;
par = reboot_info->par;
/*
* HP OmniBook 500's BIOS doesn't like the state of the
* hardware after atyfb has been used. Restore the hardware
* to the original state to allow successful reboots.
*/
aty_set_crtc(par, &par->saved_crtc);
par->pll_ops->set_pll(reboot_info, &par->saved_pll);
unlock_fb_info(reboot_info);
out:
mutex_unlock(&reboot_lock);
return NOTIFY_DONE;
}
static struct notifier_block atyfb_reboot_notifier = {
.notifier_call = atyfb_reboot_notify,
};
static const struct dmi_system_id atyfb_reboot_ids[] = {
{
.ident = "HP OmniBook 500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP OmniBook PC"),
DMI_MATCH(DMI_PRODUCT_VERSION, "HP OmniBook 500 FA"),
},
},
{ }
};
static int __init atyfb_init(void)
{
int err1 = 1, err2 = 1;
#ifndef MODULE
char *option = NULL;
if (fb_get_options("atyfb", &option))
return -ENODEV;
atyfb_setup(option);
#endif
#ifdef CONFIG_PCI
err1 = pci_register_driver(&atyfb_driver);
#endif
#ifdef CONFIG_ATARI
err2 = atyfb_atari_probe();
#endif
if (err1 && err2)
return -ENODEV;
if (dmi_check_system(atyfb_reboot_ids))
register_reboot_notifier(&atyfb_reboot_notifier);
return 0;
}
static void __exit atyfb_exit(void)
{
if (dmi_check_system(atyfb_reboot_ids))
unregister_reboot_notifier(&atyfb_reboot_notifier);
#ifdef CONFIG_PCI
pci_unregister_driver(&atyfb_driver);
#endif
}
module_init(atyfb_init);
module_exit(atyfb_exit);
MODULE_DESCRIPTION("FBDev driver for ATI Mach64 cards");
MODULE_LICENSE("GPL");
module_param(noaccel, bool, 0);
MODULE_PARM_DESC(noaccel, "bool: disable acceleration");
module_param(vram, int, 0);
MODULE_PARM_DESC(vram, "int: override size of video ram");
module_param(pll, int, 0);
MODULE_PARM_DESC(pll, "int: override video clock");
module_param(mclk, int, 0);
MODULE_PARM_DESC(mclk, "int: override memory clock");
module_param(xclk, int, 0);
MODULE_PARM_DESC(xclk, "int: override accelerated engine clock");
module_param(comp_sync, int, 0);
MODULE_PARM_DESC(comp_sync, "Set composite sync signal to low (0) or high (1)");
module_param(mode, charp, 0);
MODULE_PARM_DESC(mode, "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
#ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers");
#endif
| gpl-2.0 |
simone201/neak-gs3-jb2 | drivers/tty/hvc/hvc_console.c | 2374 | 22409 | /*
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
* Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
* Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
* Copyright (C) 2004 IBM Corporation
*
* Additional Author(s):
* Ryan S. Arnold <rsa@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/kbd_kern.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/major.h>
#include <linux/sysrq.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "hvc_console.h"
#define HVC_MAJOR 229
#define HVC_MINOR 0
/*
* Wait this long per iteration while trying to push buffered data to the
* hypervisor before allowing the tty to complete a close operation.
*/
#define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
/*
* These sizes are most efficient for vio, because they are the
* native transfer size. We could make them selectable in the
* future to better deal with backends that want other buffer sizes.
*/
#define N_OUTBUF 16
#define N_INBUF 16
#define __ALIGNED__ __attribute__((__aligned__(sizeof(long))))
static struct tty_driver *hvc_driver;
static struct task_struct *hvc_task;
/* Picks up late kicks after list walk but before schedule() */
static int hvc_kicked;
static int hvc_init(void);
#ifdef CONFIG_MAGIC_SYSRQ
static int sysrq_pressed;
#endif
/* dynamic list of hvc_struct instances */
static LIST_HEAD(hvc_structs);
/*
* Protect the list of hvc_struct instances from inserts and removals during
* list traversal.
*/
static DEFINE_SPINLOCK(hvc_structs_lock);
/*
* This value is used to assign a tty->index value to a hvc_struct based
* upon order of exposure via hvc_probe(), when we can not match it to
* a console candidate registered with hvc_instantiate().
*/
static int last_hvc = -1;
/*
* Do not call this function with either the hvc_structs_lock or the hvc_struct
* lock held. If successful, this function increments the kref reference
* count against the target hvc_struct so it should be released when finished.
*/
static struct hvc_struct *hvc_get_by_index(int index)
{
struct hvc_struct *hp;
unsigned long flags;
spin_lock(&hvc_structs_lock);
list_for_each_entry(hp, &hvc_structs, next) {
spin_lock_irqsave(&hp->lock, flags);
if (hp->index == index) {
kref_get(&hp->kref);
spin_unlock_irqrestore(&hp->lock, flags);
spin_unlock(&hvc_structs_lock);
return hp;
}
spin_unlock_irqrestore(&hp->lock, flags);
}
hp = NULL;
spin_unlock(&hvc_structs_lock);
return hp;
}
/*
* Initial console vtermnos for console API usage prior to full console
* initialization. Any vty adapter outside this range will not have usable
* console interfaces but can still be used as a tty device. This has to be
* static because kmalloc will not work during early console init.
*/
static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
{[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
/*
* Console APIs, NOT TTY. These APIs are available immediately when
* hvc_console_setup() finds adapters.
*/
static void hvc_console_print(struct console *co, const char *b,
unsigned count)
{
char c[N_OUTBUF] __ALIGNED__;
unsigned i = 0, n = 0;
int r, donecr = 0, index = co->index;
/* Console access attempt outside of acceptable console range. */
if (index >= MAX_NR_HVC_CONSOLES)
return;
/* This console adapter was removed so it is not usable. */
if (vtermnos[index] == -1)
return;
while (count > 0 || i > 0) {
if (count > 0 && i < sizeof(c)) {
if (b[n] == '\n' && !donecr) {
c[i++] = '\r';
donecr = 1;
} else {
c[i++] = b[n++];
donecr = 0;
--count;
}
} else {
r = cons_ops[index]->put_chars(vtermnos[index], c, i);
if (r <= 0) {
/* throw away characters on error
* but spin in case of -EAGAIN */
if (r != -EAGAIN)
i = 0;
} else if (r > 0) {
i -= r;
if (i > 0)
memmove(c, c+r, i);
}
}
}
}
static struct tty_driver *hvc_console_device(struct console *c, int *index)
{
if (vtermnos[c->index] == -1)
return NULL;
*index = c->index;
return hvc_driver;
}
static int __init hvc_console_setup(struct console *co, char *options)
{
if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES)
return -ENODEV;
if (vtermnos[co->index] == -1)
return -ENODEV;
return 0;
}
static struct console hvc_console = {
.name = "hvc",
.write = hvc_console_print,
.device = hvc_console_device,
.setup = hvc_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/*
* Early console initialization. Precedes driver initialization.
*
* (1) we are first, and the user specified another driver
* -- index will remain -1
* (2) we are first and the user specified no driver
* -- index will be set to 0, then we will fail setup.
* (3) we are first and the user specified our driver
* -- index will be set to user specified driver, and we will fail
* (4) we are after driver, and this initcall will register us
* -- if the user didn't specify a driver then the console will match
*
* Note that for cases 2 and 3, we will match later when the io driver
* calls hvc_instantiate() and call register again.
*/
static int __init hvc_console_init(void)
{
register_console(&hvc_console);
return 0;
}
console_initcall(hvc_console_init);
/* callback when the kboject ref count reaches zero. */
static void destroy_hvc_struct(struct kref *kref)
{
struct hvc_struct *hp = container_of(kref, struct hvc_struct, kref);
unsigned long flags;
spin_lock(&hvc_structs_lock);
spin_lock_irqsave(&hp->lock, flags);
list_del(&(hp->next));
spin_unlock_irqrestore(&hp->lock, flags);
spin_unlock(&hvc_structs_lock);
kfree(hp);
}
/*
* hvc_instantiate() is an early console discovery method which locates
* consoles * prior to the vio subsystem discovering them. Hotplugged
* vty adapters do NOT get an hvc_instantiate() callback since they
* appear after early console init.
*/
int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
{
struct hvc_struct *hp;
if (index < 0 || index >= MAX_NR_HVC_CONSOLES)
return -1;
if (vtermnos[index] != -1)
return -1;
/* make sure no no tty has been registered in this index */
hp = hvc_get_by_index(index);
if (hp) {
kref_put(&hp->kref, destroy_hvc_struct);
return -1;
}
vtermnos[index] = vtermno;
cons_ops[index] = ops;
/* reserve all indices up to and including this index */
if (last_hvc < index)
last_hvc = index;
/* if this index is what the user requested, then register
* now (setup won't fail at this point). It's ok to just
* call register again if previously .setup failed.
*/
if (index == hvc_console.index)
register_console(&hvc_console);
return 0;
}
EXPORT_SYMBOL_GPL(hvc_instantiate);
/* Wake the sleeping khvcd */
void hvc_kick(void)
{
hvc_kicked = 1;
wake_up_process(hvc_task);
}
EXPORT_SYMBOL_GPL(hvc_kick);
static void hvc_unthrottle(struct tty_struct *tty)
{
hvc_kick();
}
/*
* The TTY interface won't be used until after the vio layer has exposed the vty
* adapter to the kernel.
*/
static int hvc_open(struct tty_struct *tty, struct file * filp)
{
struct hvc_struct *hp;
unsigned long flags;
int rc = 0;
/* Auto increments kref reference if found. */
if (!(hp = hvc_get_by_index(tty->index)))
return -ENODEV;
spin_lock_irqsave(&hp->lock, flags);
/* Check and then increment for fast path open. */
if (hp->count++ > 0) {
tty_kref_get(tty);
spin_unlock_irqrestore(&hp->lock, flags);
hvc_kick();
return 0;
} /* else count == 0 */
tty->driver_data = hp;
hp->tty = tty_kref_get(tty);
spin_unlock_irqrestore(&hp->lock, flags);
if (hp->ops->notifier_add)
rc = hp->ops->notifier_add(hp, hp->data);
/*
* If the notifier fails we return an error. The tty layer
* will call hvc_close() after a failed open but we don't want to clean
* up there so we'll clean up here and clear out the previously set
* tty fields and return the kref reference.
*/
if (rc) {
spin_lock_irqsave(&hp->lock, flags);
hp->tty = NULL;
spin_unlock_irqrestore(&hp->lock, flags);
tty_kref_put(tty);
tty->driver_data = NULL;
kref_put(&hp->kref, destroy_hvc_struct);
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
}
/* Force wakeup of the polling thread */
hvc_kick();
return rc;
}
static void hvc_close(struct tty_struct *tty, struct file * filp)
{
struct hvc_struct *hp;
unsigned long flags;
if (tty_hung_up_p(filp))
return;
/*
* No driver_data means that this close was issued after a failed
* hvc_open by the tty layer's release_dev() function and we can just
* exit cleanly because the kref reference wasn't made.
*/
if (!tty->driver_data)
return;
hp = tty->driver_data;
spin_lock_irqsave(&hp->lock, flags);
if (--hp->count == 0) {
/* We are done with the tty pointer now. */
hp->tty = NULL;
spin_unlock_irqrestore(&hp->lock, flags);
if (hp->ops->notifier_del)
hp->ops->notifier_del(hp, hp->data);
/* cancel pending tty resize work */
cancel_work_sync(&hp->tty_resize);
/*
* Chain calls chars_in_buffer() and returns immediately if
* there is no buffered data otherwise sleeps on a wait queue
* waking periodically to check chars_in_buffer().
*/
tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
} else {
if (hp->count < 0)
printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
hp->vtermno, hp->count);
spin_unlock_irqrestore(&hp->lock, flags);
}
tty_kref_put(tty);
kref_put(&hp->kref, destroy_hvc_struct);
}
static void hvc_hangup(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
int temp_open_count;
if (!hp)
return;
/* cancel pending tty resize work */
cancel_work_sync(&hp->tty_resize);
spin_lock_irqsave(&hp->lock, flags);
/*
* The N_TTY line discipline has problems such that in a close vs
* open->hangup case this can be called after the final close so prevent
* that from happening for now.
*/
if (hp->count <= 0) {
spin_unlock_irqrestore(&hp->lock, flags);
return;
}
temp_open_count = hp->count;
hp->count = 0;
hp->n_outbuf = 0;
hp->tty = NULL;
spin_unlock_irqrestore(&hp->lock, flags);
if (hp->ops->notifier_hangup)
hp->ops->notifier_hangup(hp, hp->data);
while(temp_open_count) {
--temp_open_count;
tty_kref_put(tty);
kref_put(&hp->kref, destroy_hvc_struct);
}
}
/*
* Push buffered characters whether they were just recently buffered or waiting
* on a blocked hypervisor. Call this function with hp->lock held.
*/
static int hvc_push(struct hvc_struct *hp)
{
int n;
n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf);
if (n <= 0) {
if (n == 0 || n == -EAGAIN) {
hp->do_wakeup = 1;
return 0;
}
/* throw away output on error; this happens when
there is no session connected to the vterm. */
hp->n_outbuf = 0;
} else
hp->n_outbuf -= n;
if (hp->n_outbuf > 0)
memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf);
else
hp->do_wakeup = 1;
return n;
}
static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
int rsize, written = 0;
/* This write was probably executed during a tty close. */
if (!hp)
return -EPIPE;
if (hp->count <= 0)
return -EIO;
spin_lock_irqsave(&hp->lock, flags);
/* Push pending writes */
if (hp->n_outbuf > 0)
hvc_push(hp);
while (count > 0 && (rsize = hp->outbuf_size - hp->n_outbuf) > 0) {
if (rsize > count)
rsize = count;
memcpy(hp->outbuf + hp->n_outbuf, buf, rsize);
count -= rsize;
buf += rsize;
hp->n_outbuf += rsize;
written += rsize;
hvc_push(hp);
}
spin_unlock_irqrestore(&hp->lock, flags);
/*
* Racy, but harmless, kick thread if there is still pending data.
*/
if (hp->n_outbuf)
hvc_kick();
return written;
}
/**
* hvc_set_winsz() - Resize the hvc tty terminal window.
* @work: work structure.
*
* The routine shall not be called within an atomic context because it
* might sleep.
*
* Locking: hp->lock
*/
static void hvc_set_winsz(struct work_struct *work)
{
struct hvc_struct *hp;
unsigned long hvc_flags;
struct tty_struct *tty;
struct winsize ws;
hp = container_of(work, struct hvc_struct, tty_resize);
spin_lock_irqsave(&hp->lock, hvc_flags);
if (!hp->tty) {
spin_unlock_irqrestore(&hp->lock, hvc_flags);
return;
}
ws = hp->ws;
tty = tty_kref_get(hp->tty);
spin_unlock_irqrestore(&hp->lock, hvc_flags);
tty_do_resize(tty, &ws);
tty_kref_put(tty);
}
/*
* This is actually a contract between the driver and the tty layer outlining
* how much write room the driver can guarantee will be sent OR BUFFERED. This
* driver MUST honor the return value.
*/
static int hvc_write_room(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
if (!hp)
return -1;
return hp->outbuf_size - hp->n_outbuf;
}
static int hvc_chars_in_buffer(struct tty_struct *tty)
{
struct hvc_struct *hp = tty->driver_data;
if (!hp)
return 0;
return hp->n_outbuf;
}
/*
* timeout will vary between the MIN and MAX values defined here. By default
* and during console activity we will use a default MIN_TIMEOUT of 10. When
* the console is idle, we increase the timeout value on each pass through
* msleep until we reach the max. This may be noticeable as a brief (average
* one second) delay on the console before the console responds to input when
* there has been no input for some time.
*/
#define MIN_TIMEOUT (10)
#define MAX_TIMEOUT (2000)
static u32 timeout = MIN_TIMEOUT;
#define HVC_POLL_READ 0x00000001
#define HVC_POLL_WRITE 0x00000002
int hvc_poll(struct hvc_struct *hp)
{
struct tty_struct *tty;
int i, n, poll_mask = 0;
char buf[N_INBUF] __ALIGNED__;
unsigned long flags;
int read_total = 0;
int written_total = 0;
spin_lock_irqsave(&hp->lock, flags);
/* Push pending writes */
if (hp->n_outbuf > 0)
written_total = hvc_push(hp);
/* Reschedule us if still some write pending */
if (hp->n_outbuf > 0) {
poll_mask |= HVC_POLL_WRITE;
/* If hvc_push() was not able to write, sleep a few msecs */
timeout = (written_total) ? 0 : MIN_TIMEOUT;
}
/* No tty attached, just skip */
tty = tty_kref_get(hp->tty);
if (tty == NULL)
goto bail;
/* Now check if we can get data (are we throttled ?) */
if (test_bit(TTY_THROTTLED, &tty->flags))
goto throttled;
/* If we aren't notifier driven and aren't throttled, we always
* request a reschedule
*/
if (!hp->irq_requested)
poll_mask |= HVC_POLL_READ;
/* Read data if any */
for (;;) {
int count = tty_buffer_request_room(tty, N_INBUF);
/* If flip is full, just reschedule a later read */
if (count == 0) {
poll_mask |= HVC_POLL_READ;
break;
}
n = hp->ops->get_chars(hp->vtermno, buf, count);
if (n <= 0) {
/* Hangup the tty when disconnected from host */
if (n == -EPIPE) {
spin_unlock_irqrestore(&hp->lock, flags);
tty_hangup(tty);
spin_lock_irqsave(&hp->lock, flags);
} else if ( n == -EAGAIN ) {
/*
* Some back-ends can only ensure a certain min
* num of bytes read, which may be > 'count'.
* Let the tty clear the flip buff to make room.
*/
poll_mask |= HVC_POLL_READ;
}
break;
}
for (i = 0; i < n; ++i) {
#ifdef CONFIG_MAGIC_SYSRQ
if (hp->index == hvc_console.index) {
/* Handle the SysRq Hack */
/* XXX should support a sequence */
if (buf[i] == '\x0f') { /* ^O */
/* if ^O is pressed again, reset
* sysrq_pressed and flip ^O char */
sysrq_pressed = !sysrq_pressed;
if (sysrq_pressed)
continue;
} else if (sysrq_pressed) {
handle_sysrq(buf[i]);
sysrq_pressed = 0;
continue;
}
}
#endif /* CONFIG_MAGIC_SYSRQ */
tty_insert_flip_char(tty, buf[i], 0);
}
read_total += n;
}
throttled:
/* Wakeup write queue if necessary */
if (hp->do_wakeup) {
hp->do_wakeup = 0;
tty_wakeup(tty);
}
bail:
spin_unlock_irqrestore(&hp->lock, flags);
if (read_total) {
/* Activity is occurring, so reset the polling backoff value to
a minimum for performance. */
timeout = MIN_TIMEOUT;
tty_flip_buffer_push(tty);
}
if (tty)
tty_kref_put(tty);
return poll_mask;
}
EXPORT_SYMBOL_GPL(hvc_poll);
/**
* __hvc_resize() - Update terminal window size information.
* @hp: HVC console pointer
* @ws: Terminal window size structure
*
* Stores the specified window size information in the hvc structure of @hp.
* The function schedule the tty resize update.
*
* Locking: Locking free; the function MUST be called holding hp->lock
*/
void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
{
hp->ws = ws;
schedule_work(&hp->tty_resize);
}
EXPORT_SYMBOL_GPL(__hvc_resize);
/*
* This kthread is either polling or interrupt driven. This is determined by
* calling hvc_poll() who determines whether a console adapter support
* interrupts.
*/
static int khvcd(void *unused)
{
int poll_mask;
struct hvc_struct *hp;
set_freezable();
do {
poll_mask = 0;
hvc_kicked = 0;
try_to_freeze();
wmb();
if (!cpus_are_in_xmon()) {
spin_lock(&hvc_structs_lock);
list_for_each_entry(hp, &hvc_structs, next) {
poll_mask |= hvc_poll(hp);
}
spin_unlock(&hvc_structs_lock);
} else
poll_mask |= HVC_POLL_READ;
if (hvc_kicked)
continue;
set_current_state(TASK_INTERRUPTIBLE);
if (!hvc_kicked) {
if (poll_mask == 0)
schedule();
else {
if (timeout < MAX_TIMEOUT)
timeout += (timeout >> 6) + 1;
msleep_interruptible(timeout);
}
}
__set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
return 0;
}
static const struct tty_operations hvc_ops = {
.open = hvc_open,
.close = hvc_close,
.write = hvc_write,
.hangup = hvc_hangup,
.unthrottle = hvc_unthrottle,
.write_room = hvc_write_room,
.chars_in_buffer = hvc_chars_in_buffer,
};
struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
const struct hv_ops *ops,
int outbuf_size)
{
struct hvc_struct *hp;
int i;
/* We wait until a driver actually comes along */
if (!hvc_driver) {
int err = hvc_init();
if (err)
return ERR_PTR(err);
}
hp = kzalloc(ALIGN(sizeof(*hp), sizeof(long)) + outbuf_size,
GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
hp->vtermno = vtermno;
hp->data = data;
hp->ops = ops;
hp->outbuf_size = outbuf_size;
hp->outbuf = &((char *)hp)[ALIGN(sizeof(*hp), sizeof(long))];
kref_init(&hp->kref);
INIT_WORK(&hp->tty_resize, hvc_set_winsz);
spin_lock_init(&hp->lock);
spin_lock(&hvc_structs_lock);
/*
* find index to use:
* see if this vterm id matches one registered for console.
*/
for (i=0; i < MAX_NR_HVC_CONSOLES; i++)
if (vtermnos[i] == hp->vtermno &&
cons_ops[i] == hp->ops)
break;
/* no matching slot, just use a counter */
if (i >= MAX_NR_HVC_CONSOLES)
i = ++last_hvc;
hp->index = i;
list_add_tail(&(hp->next), &hvc_structs);
spin_unlock(&hvc_structs_lock);
return hp;
}
EXPORT_SYMBOL_GPL(hvc_alloc);
int hvc_remove(struct hvc_struct *hp)
{
unsigned long flags;
struct tty_struct *tty;
spin_lock_irqsave(&hp->lock, flags);
tty = tty_kref_get(hp->tty);
if (hp->index < MAX_NR_HVC_CONSOLES)
vtermnos[hp->index] = -1;
/* Don't whack hp->irq because tty_hangup() will need to free the irq. */
spin_unlock_irqrestore(&hp->lock, flags);
/*
* We 'put' the instance that was grabbed when the kref instance
* was initialized using kref_init(). Let the last holder of this
* kref cause it to be removed, which will probably be the tty_vhangup
* below.
*/
kref_put(&hp->kref, destroy_hvc_struct);
/*
* This function call will auto chain call hvc_hangup.
*/
if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
return 0;
}
EXPORT_SYMBOL_GPL(hvc_remove);
/* Driver initialization: called as soon as someone uses hvc_alloc(). */
static int hvc_init(void)
{
struct tty_driver *drv;
int err;
/* We need more than hvc_count adapters due to hotplug additions. */
drv = alloc_tty_driver(HVC_ALLOC_TTY_ADAPTERS);
if (!drv) {
err = -ENOMEM;
goto out;
}
drv->owner = THIS_MODULE;
drv->driver_name = "hvc";
drv->name = "hvc";
drv->major = HVC_MAJOR;
drv->minor_start = HVC_MINOR;
drv->type = TTY_DRIVER_TYPE_SYSTEM;
drv->init_termios = tty_std_termios;
drv->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS;
tty_set_operations(drv, &hvc_ops);
/* Always start the kthread because there can be hotplug vty adapters
* added later. */
hvc_task = kthread_run(khvcd, NULL, "khvcd");
if (IS_ERR(hvc_task)) {
printk(KERN_ERR "Couldn't create kthread for console.\n");
err = PTR_ERR(hvc_task);
goto put_tty;
}
err = tty_register_driver(drv);
if (err) {
printk(KERN_ERR "Couldn't register hvc console driver\n");
goto stop_thread;
}
/*
* Make sure tty is fully registered before allowing it to be
* found by hvc_console_device.
*/
smp_mb();
hvc_driver = drv;
return 0;
stop_thread:
kthread_stop(hvc_task);
hvc_task = NULL;
put_tty:
put_tty_driver(drv);
out:
return err;
}
/* This isn't particularly necessary due to this being a console driver
* but it is nice to be thorough.
*/
static void __exit hvc_exit(void)
{
if (hvc_driver) {
kthread_stop(hvc_task);
tty_unregister_driver(hvc_driver);
/* return tty_struct instances allocated in hvc_init(). */
put_tty_driver(hvc_driver);
unregister_console(&hvc_console);
}
}
module_exit(hvc_exit);
| gpl-2.0 |
gimmeitorilltell/slim_kernel_samsung_msm8660 | arch/arm/mach-s3c64xx/cpu.c | 3398 | 4102 | /* linux/arch/arm/plat-s3c64xx/cpu.c
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
* http://armlinux.simtec.co.uk/
*
* S3C64XX CPU Support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/sysdev.h>
#include <linux/serial_core.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/regs-serial.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/clock.h>
#include <mach/s3c6400.h>
#include <mach/s3c6410.h>
/* table of supported CPUs */
static const char name_s3c6400[] = "S3C6400";
static const char name_s3c6410[] = "S3C6410";
static struct cpu_table cpu_ids[] __initdata = {
{
.idcode = 0x36400000,
.idmask = 0xfffff000,
.map_io = s3c6400_map_io,
.init_clocks = s3c6400_init_clocks,
.init_uarts = s3c6400_init_uarts,
.init = s3c6400_init,
.name = name_s3c6400,
}, {
.idcode = 0x36410100,
.idmask = 0xffffff00,
.map_io = s3c6410_map_io,
.init_clocks = s3c6410_init_clocks,
.init_uarts = s3c6410_init_uarts,
.init = s3c6410_init,
.name = name_s3c6410,
},
};
/* minimal IO mapping */
/* see notes on uart map in arch/arm/mach-s3c6400/include/mach/debug-macro.S */
#define UART_OFFS (S3C_PA_UART & 0xfffff)
static struct map_desc s3c_iodesc[] __initdata = {
{
.virtual = (unsigned long)S3C_VA_SYS,
.pfn = __phys_to_pfn(S3C64XX_PA_SYSCON),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_MEM,
.pfn = __phys_to_pfn(S3C64XX_PA_SROM),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)(S3C_VA_UART + UART_OFFS),
.pfn = __phys_to_pfn(S3C_PA_UART),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC0,
.pfn = __phys_to_pfn(S3C64XX_PA_VIC0),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)VA_VIC1,
.pfn = __phys_to_pfn(S3C64XX_PA_VIC1),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_TIMER,
.pfn = __phys_to_pfn(S3C_PA_TIMER),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C64XX_VA_GPIO,
.pfn = __phys_to_pfn(S3C64XX_PA_GPIO),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C64XX_VA_MODEM,
.pfn = __phys_to_pfn(S3C64XX_PA_MODEM),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_WATCHDOG,
.pfn = __phys_to_pfn(S3C64XX_PA_WATCHDOG),
.length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = (unsigned long)S3C_VA_USB_HSPHY,
.pfn = __phys_to_pfn(S3C64XX_PA_USB_HSPHY),
.length = SZ_1K,
.type = MT_DEVICE,
},
};
struct sysdev_class s3c64xx_sysclass = {
.name = "s3c64xx-core",
};
static struct sys_device s3c64xx_sysdev = {
.cls = &s3c64xx_sysclass,
};
/* uart registration process */
void __init s3c6400_common_init_uarts(struct s3c2410_uartcfg *cfg, int no)
{
s3c24xx_init_uartdevs("s3c6400-uart", s3c64xx_uart_resources, cfg, no);
}
/* read cpu identification code */
void __init s3c64xx_init_io(struct map_desc *mach_desc, int size)
{
unsigned long idcode;
/* initialise the io descriptors we need for initialisation */
iotable_init(s3c_iodesc, ARRAY_SIZE(s3c_iodesc));
iotable_init(mach_desc, size);
idcode = __raw_readl(S3C_VA_SYS + 0x118);
if (!idcode) {
/* S3C6400 has the ID register in a different place,
* and needs a write before it can be read. */
__raw_writel(0x0, S3C_VA_SYS + 0xA1C);
idcode = __raw_readl(S3C_VA_SYS + 0xA1C);
}
s3c_init_cpu(idcode, cpu_ids, ARRAY_SIZE(cpu_ids));
}
static __init int s3c64xx_sysdev_init(void)
{
sysdev_class_register(&s3c64xx_sysclass);
return sysdev_register(&s3c64xx_sysdev);
}
core_initcall(s3c64xx_sysdev_init);
| gpl-2.0 |
dorimanx/SAMMY-ICS-SOURCE | crypto/sha1_generic.c | 4166 | 3458 | /*
* Cryptographic API.
*
* SHA1 Secure Hash Algorithm.
*
* Derived from cryptoapi implementation, adapted for in-place
* scatterlist interface.
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
static int sha1_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
const u8 *src;
partial = sctx->count & 0x3f;
sctx->count += len;
done = 0;
src = data;
if ((partial + len) > 63) {
u32 temp[SHA_WORKSPACE_WORDS];
if (partial) {
done = -partial;
memcpy(sctx->buffer + partial, data, done + 64);
src = sctx->buffer;
}
do {
sha_transform(sctx->state, src, temp);
done += 64;
src = data + done;
} while (done + 63 < len);
memset(temp, 0, sizeof(temp));
partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
return 0;
}
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
u32 i, index, padlen;
__be64 bits;
static const u8 padding[64] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 */
index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
sha1_update(desc, padding, padlen);
/* Append length */
sha1_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof *sctx);
return 0;
}
static int sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.final = sha1_final,
.export = sha1_export,
.import = sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha1_generic_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit sha1_generic_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
MODULE_ALIAS("sha1");
| gpl-2.0 |
caplio/HTC-DNA | lib/mpi/mpi-mpow.c | 4934 | 3074 | /* mpi-mpow.c - MPI functions
* Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
*
* This file is part of GnuPG.
*
* GnuPG is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* GnuPG is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include "mpi-internal.h"
#include "longlong.h"
static int build_index(const MPI *exparray, int k, int i, int t)
{
int j, bitno;
int index = 0;
bitno = t - i;
for (j = k - 1; j >= 0; j--) {
index <<= 1;
if (mpi_test_bit(exparray[j], bitno))
index |= 1;
}
return index;
}
/****************
* RES = (BASE[0] ^ EXP[0]) * (BASE[1] ^ EXP[1]) * ... * mod M
*/
int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI m)
{
int rc = -ENOMEM;
int k; /* number of elements */
int t; /* bit size of largest exponent */
int i, j, idx;
MPI *G = NULL; /* table with precomputed values of size 2^k */
MPI tmp = NULL;
for (k = 0; basearray[k]; k++)
;
if (!k) {
pr_emerg("mpi_mulpowm: assert(k) failed\n");
BUG();
}
for (t = 0, i = 0; (tmp = exparray[i]); i++) {
j = mpi_get_nbits(tmp);
if (j > t)
t = j;
}
if (i != k) {
pr_emerg("mpi_mulpowm: assert(i==k) failed\n");
BUG();
}
if (!t) {
pr_emerg("mpi_mulpowm: assert(t) failed\n");
BUG();
}
if (k >= 10) {
pr_emerg("mpi_mulpowm: assert(k<10) failed\n");
BUG();
}
G = kzalloc((1 << k) * sizeof *G, GFP_KERNEL);
if (!G)
goto err_out;
/* and calculate */
tmp = mpi_alloc(mpi_get_nlimbs(m) + 1);
if (!tmp)
goto nomem;
if (mpi_set_ui(res, 1) < 0)
goto nomem;
for (i = 1; i <= t; i++) {
if (mpi_mulm(tmp, res, res, m) < 0)
goto nomem;
idx = build_index(exparray, k, i, t);
if (!(idx >= 0 && idx < (1 << k))) {
pr_emerg("mpi_mulpowm: assert(idx >= 0 && idx < (1<<k)) failed\n");
BUG();
}
if (!G[idx]) {
if (!idx) {
G[0] = mpi_alloc_set_ui(1);
if (!G[0])
goto nomem;
} else {
for (j = 0; j < k; j++) {
if ((idx & (1 << j))) {
if (!G[idx]) {
if (mpi_copy
(&G[idx],
basearray[j]) < 0)
goto nomem;
} else {
if (mpi_mulm
(G[idx], G[idx],
basearray[j],
m) < 0)
goto nomem;
}
}
}
if (!G[idx]) {
G[idx] = mpi_alloc(0);
if (!G[idx])
goto nomem;
}
}
}
if (mpi_mulm(res, tmp, G[idx], m) < 0)
goto nomem;
}
rc = 0;
nomem:
/* cleanup */
mpi_free(tmp);
for (i = 0; i < (1 << k); i++)
mpi_free(G[i]);
kfree(G);
err_out:
return rc;
}
| gpl-2.0 |
LiquidSmooth-Devices/android_kernel_samsung_hlte | lib/lzo/lzo1x_compress.c | 6214 | 6295 | /*
* LZO1X Compressor from LZO
*
* Copyright (C) 1996-2012 Markus F.X.J. Oberhumer <markus@oberhumer.com>
*
* The full LZO package can be found at:
* http://www.oberhumer.com/opensource/lzo/
*
* Changed for Linux kernel use by:
* Nitin Gupta <nitingupta910@gmail.com>
* Richard Purdie <rpurdie@openedhand.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <linux/lzo.h>
#include "lzodefs.h"
static noinline size_t
lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len,
size_t ti, void *wrkmem)
{
const unsigned char *ip;
unsigned char *op;
const unsigned char * const in_end = in + in_len;
const unsigned char * const ip_end = in + in_len - 20;
const unsigned char *ii;
lzo_dict_t * const dict = (lzo_dict_t *) wrkmem;
op = out;
ip = in;
ii = ip;
ip += ti < 4 ? 4 - ti : 0;
for (;;) {
const unsigned char *m_pos;
size_t t, m_len, m_off;
u32 dv;
literal:
ip += 1 + ((ip - ii) >> 5);
next:
if (unlikely(ip >= ip_end))
break;
dv = get_unaligned_le32(ip);
t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
m_pos = in + dict[t];
dict[t] = (lzo_dict_t) (ip - in);
if (unlikely(dv != get_unaligned_le32(m_pos)))
goto literal;
ii -= ti;
ti = 0;
t = ip - ii;
if (t != 0) {
if (t <= 3) {
op[-2] |= t;
COPY4(op, ii);
op += t;
} else if (t <= 16) {
*op++ = (t - 3);
COPY8(op, ii);
COPY8(op + 8, ii + 8);
op += t;
} else {
if (t <= 18) {
*op++ = (t - 3);
} else {
size_t tt = t - 18;
*op++ = 0;
while (unlikely(tt > 255)) {
tt -= 255;
*op++ = 0;
}
*op++ = tt;
}
do {
COPY8(op, ii);
COPY8(op + 8, ii + 8);
op += 16;
ii += 16;
t -= 16;
} while (t >= 16);
if (t > 0) do {
*op++ = *ii++;
} while (--t > 0);
}
}
m_len = 4;
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
u64 v;
v = get_unaligned((const u64 *) (ip + m_len)) ^
get_unaligned((const u64 *) (m_pos + m_len));
if (unlikely(v == 0)) {
do {
m_len += 8;
v = get_unaligned((const u64 *) (ip + m_len)) ^
get_unaligned((const u64 *) (m_pos + m_len));
if (unlikely(ip + m_len >= ip_end))
goto m_len_done;
} while (v == 0);
}
# if defined(__LITTLE_ENDIAN)
m_len += (unsigned) __builtin_ctzll(v) / 8;
# elif defined(__BIG_ENDIAN)
m_len += (unsigned) __builtin_clzll(v) / 8;
# else
# error "missing endian definition"
# endif
#elif defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ32)
u32 v;
v = get_unaligned((const u32 *) (ip + m_len)) ^
get_unaligned((const u32 *) (m_pos + m_len));
if (unlikely(v == 0)) {
do {
m_len += 4;
v = get_unaligned((const u32 *) (ip + m_len)) ^
get_unaligned((const u32 *) (m_pos + m_len));
if (v != 0)
break;
m_len += 4;
v = get_unaligned((const u32 *) (ip + m_len)) ^
get_unaligned((const u32 *) (m_pos + m_len));
if (unlikely(ip + m_len >= ip_end))
goto m_len_done;
} while (v == 0);
}
# if defined(__LITTLE_ENDIAN)
m_len += (unsigned) __builtin_ctz(v) / 8;
# elif defined(__BIG_ENDIAN)
m_len += (unsigned) __builtin_clz(v) / 8;
# else
# error "missing endian definition"
# endif
#else
if (unlikely(ip[m_len] == m_pos[m_len])) {
do {
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (ip[m_len] != m_pos[m_len])
break;
m_len += 1;
if (unlikely(ip + m_len >= ip_end))
goto m_len_done;
} while (ip[m_len] == m_pos[m_len]);
}
#endif
}
m_len_done:
m_off = ip - m_pos;
ip += m_len;
ii = ip;
if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
m_off -= 1;
*op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
*op++ = (m_off >> 3);
} else if (m_off <= M3_MAX_OFFSET) {
m_off -= 1;
if (m_len <= M3_MAX_LEN)
*op++ = (M3_MARKER | (m_len - 2));
else {
m_len -= M3_MAX_LEN;
*op++ = M3_MARKER | 0;
while (unlikely(m_len > 255)) {
m_len -= 255;
*op++ = 0;
}
*op++ = (m_len);
}
*op++ = (m_off << 2);
*op++ = (m_off >> 6);
} else {
m_off -= 0x4000;
if (m_len <= M4_MAX_LEN)
*op++ = (M4_MARKER | ((m_off >> 11) & 8)
| (m_len - 2));
else {
m_len -= M4_MAX_LEN;
*op++ = (M4_MARKER | ((m_off >> 11) & 8));
while (unlikely(m_len > 255)) {
m_len -= 255;
*op++ = 0;
}
*op++ = (m_len);
}
*op++ = (m_off << 2);
*op++ = (m_off >> 6);
}
goto next;
}
*out_len = op - out;
return in_end - (ii - ti);
}
int lzo1x_1_compress(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len,
void *wrkmem)
{
const unsigned char *ip = in;
unsigned char *op = out;
size_t l = in_len;
size_t t = 0;
while (l > 20) {
size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1);
uintptr_t ll_end = (uintptr_t) ip + ll;
if ((ll_end + ((t + ll) >> 5)) <= ll_end)
break;
BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem);
ip += ll;
op += *out_len;
l -= ll;
}
t += l;
if (t > 0) {
const unsigned char *ii = in + in_len - t;
if (op == out && t <= 238) {
*op++ = (17 + t);
} else if (t <= 3) {
op[-2] |= t;
} else if (t <= 18) {
*op++ = (t - 3);
} else {
size_t tt = t - 18;
*op++ = 0;
while (tt > 255) {
tt -= 255;
*op++ = 0;
}
*op++ = tt;
}
if (t >= 16) do {
COPY8(op, ii);
COPY8(op + 8, ii + 8);
op += 16;
ii += 16;
t -= 16;
} while (t >= 16);
if (t > 0) do {
*op++ = *ii++;
} while (--t > 0);
}
*op++ = M4_MARKER | 1;
*op++ = 0;
*op++ = 0;
*out_len = op - out;
return LZO_E_OK;
}
EXPORT_SYMBOL_GPL(lzo1x_1_compress);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO1X-1 Compressor");
| gpl-2.0 |
robcore/machinex_kernel | drivers/media/rc/keymaps/rc-twinhan1027.c | 9542 | 1854 | #include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table twinhan_vp1027[] = {
{ 0x16, KEY_POWER2 },
{ 0x17, KEY_FAVORITES },
{ 0x0f, KEY_TEXT },
{ 0x48, KEY_INFO},
{ 0x1c, KEY_EPG },
{ 0x04, KEY_LIST },
{ 0x03, KEY_1 },
{ 0x01, KEY_2 },
{ 0x06, KEY_3 },
{ 0x09, KEY_4 },
{ 0x1d, KEY_5 },
{ 0x1f, KEY_6 },
{ 0x0d, KEY_7 },
{ 0x19, KEY_8 },
{ 0x1b, KEY_9 },
{ 0x15, KEY_0 },
{ 0x0c, KEY_CANCEL },
{ 0x4a, KEY_CLEAR },
{ 0x13, KEY_BACKSPACE },
{ 0x00, KEY_TAB },
{ 0x4b, KEY_UP },
{ 0x51, KEY_DOWN },
{ 0x4e, KEY_LEFT },
{ 0x52, KEY_RIGHT },
{ 0x4f, KEY_ENTER },
{ 0x1e, KEY_VOLUMEUP },
{ 0x0a, KEY_VOLUMEDOWN },
{ 0x02, KEY_CHANNELDOWN },
{ 0x05, KEY_CHANNELUP },
{ 0x11, KEY_RECORD },
{ 0x14, KEY_PLAY },
{ 0x4c, KEY_PAUSE },
{ 0x1a, KEY_STOP },
{ 0x40, KEY_REWIND },
{ 0x12, KEY_FASTFORWARD },
{ 0x41, KEY_PREVIOUSSONG },
{ 0x42, KEY_NEXTSONG },
{ 0x54, KEY_SAVE },
{ 0x50, KEY_LANGUAGE },
{ 0x47, KEY_MEDIA },
{ 0x4d, KEY_SCREEN },
{ 0x43, KEY_SUBTITLE },
{ 0x10, KEY_MUTE },
{ 0x49, KEY_AUDIO },
{ 0x07, KEY_SLEEP },
{ 0x08, KEY_VIDEO },
{ 0x0e, KEY_AGAIN },
{ 0x45, KEY_EQUAL },
{ 0x46, KEY_MINUS },
{ 0x18, KEY_RED },
{ 0x53, KEY_GREEN },
{ 0x5e, KEY_YELLOW },
{ 0x5f, KEY_BLUE },
};
static struct rc_map_list twinhan_vp1027_map = {
.map = {
.scan = twinhan_vp1027,
.size = ARRAY_SIZE(twinhan_vp1027),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_TWINHAN_VP1027_DVBS,
}
};
static int __init init_rc_map_twinhan_vp1027(void)
{
return rc_map_register(&twinhan_vp1027_map);
}
static void __exit exit_rc_map_twinhan_vp1027(void)
{
rc_map_unregister(&twinhan_vp1027_map);
}
module_init(init_rc_map_twinhan_vp1027)
module_exit(exit_rc_map_twinhan_vp1027)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sergey Ivanov <123kash@gmail.com>");
| gpl-2.0 |
XileForce/Vindicator-S6 | drivers/pci/hotplug/shpchp_hpc.c | 9798 | 28849 | /*
* Standard PCI Hot Plug Driver
*
* Copyright (C) 1995,2001 Compaq Computer Corporation
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2001 IBM Corp.
* Copyright (C) 2003-2004 Intel Corporation
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com>
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include "shpchp.h"
/* Slot Available Register I field definition */
#define SLOT_33MHZ 0x0000001f
#define SLOT_66MHZ_PCIX 0x00001f00
#define SLOT_100MHZ_PCIX 0x001f0000
#define SLOT_133MHZ_PCIX 0x1f000000
/* Slot Available Register II field definition */
#define SLOT_66MHZ 0x0000001f
#define SLOT_66MHZ_PCIX_266 0x00000f00
#define SLOT_100MHZ_PCIX_266 0x0000f000
#define SLOT_133MHZ_PCIX_266 0x000f0000
#define SLOT_66MHZ_PCIX_533 0x00f00000
#define SLOT_100MHZ_PCIX_533 0x0f000000
#define SLOT_133MHZ_PCIX_533 0xf0000000
/* Slot Configuration */
#define SLOT_NUM 0x0000001F
#define FIRST_DEV_NUM 0x00001F00
#define PSN 0x07FF0000
#define UPDOWN 0x20000000
#define MRLSENSOR 0x40000000
#define ATTN_BUTTON 0x80000000
/*
* Interrupt Locator Register definitions
*/
#define CMD_INTR_PENDING (1 << 0)
#define SLOT_INTR_PENDING(i) (1 << (i + 1))
/*
* Controller SERR-INT Register
*/
#define GLOBAL_INTR_MASK (1 << 0)
#define GLOBAL_SERR_MASK (1 << 1)
#define COMMAND_INTR_MASK (1 << 2)
#define ARBITER_SERR_MASK (1 << 3)
#define COMMAND_DETECTED (1 << 16)
#define ARBITER_DETECTED (1 << 17)
#define SERR_INTR_RSVDZ_MASK 0xfffc0000
/*
* Logical Slot Register definitions
*/
#define SLOT_REG(i) (SLOT1 + (4 * i))
#define SLOT_STATE_SHIFT (0)
#define SLOT_STATE_MASK (3 << 0)
#define SLOT_STATE_PWRONLY (1)
#define SLOT_STATE_ENABLED (2)
#define SLOT_STATE_DISABLED (3)
#define PWR_LED_STATE_SHIFT (2)
#define PWR_LED_STATE_MASK (3 << 2)
#define ATN_LED_STATE_SHIFT (4)
#define ATN_LED_STATE_MASK (3 << 4)
#define ATN_LED_STATE_ON (1)
#define ATN_LED_STATE_BLINK (2)
#define ATN_LED_STATE_OFF (3)
#define POWER_FAULT (1 << 6)
#define ATN_BUTTON (1 << 7)
#define MRL_SENSOR (1 << 8)
#define MHZ66_CAP (1 << 9)
#define PRSNT_SHIFT (10)
#define PRSNT_MASK (3 << 10)
#define PCIX_CAP_SHIFT (12)
#define PCIX_CAP_MASK_PI1 (3 << 12)
#define PCIX_CAP_MASK_PI2 (7 << 12)
#define PRSNT_CHANGE_DETECTED (1 << 16)
#define ISO_PFAULT_DETECTED (1 << 17)
#define BUTTON_PRESS_DETECTED (1 << 18)
#define MRL_CHANGE_DETECTED (1 << 19)
#define CON_PFAULT_DETECTED (1 << 20)
#define PRSNT_CHANGE_INTR_MASK (1 << 24)
#define ISO_PFAULT_INTR_MASK (1 << 25)
#define BUTTON_PRESS_INTR_MASK (1 << 26)
#define MRL_CHANGE_INTR_MASK (1 << 27)
#define CON_PFAULT_INTR_MASK (1 << 28)
#define MRL_CHANGE_SERR_MASK (1 << 29)
#define CON_PFAULT_SERR_MASK (1 << 30)
#define SLOT_REG_RSVDZ_MASK ((1 << 15) | (7 << 21))
/*
* SHPC Command Code definitnions
*
* Slot Operation 00h - 3Fh
* Set Bus Segment Speed/Mode A 40h - 47h
* Power-Only All Slots 48h
* Enable All Slots 49h
* Set Bus Segment Speed/Mode B (PI=2) 50h - 5Fh
* Reserved Command Codes 60h - BFh
* Vendor Specific Commands C0h - FFh
*/
#define SET_SLOT_PWR 0x01 /* Slot Operation */
#define SET_SLOT_ENABLE 0x02
#define SET_SLOT_DISABLE 0x03
#define SET_PWR_ON 0x04
#define SET_PWR_BLINK 0x08
#define SET_PWR_OFF 0x0c
#define SET_ATTN_ON 0x10
#define SET_ATTN_BLINK 0x20
#define SET_ATTN_OFF 0x30
#define SETA_PCI_33MHZ 0x40 /* Set Bus Segment Speed/Mode A */
#define SETA_PCI_66MHZ 0x41
#define SETA_PCIX_66MHZ 0x42
#define SETA_PCIX_100MHZ 0x43
#define SETA_PCIX_133MHZ 0x44
#define SETA_RESERVED1 0x45
#define SETA_RESERVED2 0x46
#define SETA_RESERVED3 0x47
#define SET_PWR_ONLY_ALL 0x48 /* Power-Only All Slots */
#define SET_ENABLE_ALL 0x49 /* Enable All Slots */
#define SETB_PCI_33MHZ 0x50 /* Set Bus Segment Speed/Mode B */
#define SETB_PCI_66MHZ 0x51
#define SETB_PCIX_66MHZ_PM 0x52
#define SETB_PCIX_100MHZ_PM 0x53
#define SETB_PCIX_133MHZ_PM 0x54
#define SETB_PCIX_66MHZ_EM 0x55
#define SETB_PCIX_100MHZ_EM 0x56
#define SETB_PCIX_133MHZ_EM 0x57
#define SETB_PCIX_66MHZ_266 0x58
#define SETB_PCIX_100MHZ_266 0x59
#define SETB_PCIX_133MHZ_266 0x5a
#define SETB_PCIX_66MHZ_533 0x5b
#define SETB_PCIX_100MHZ_533 0x5c
#define SETB_PCIX_133MHZ_533 0x5d
#define SETB_RESERVED1 0x5e
#define SETB_RESERVED2 0x5f
/*
* SHPC controller command error code
*/
#define SWITCH_OPEN 0x1
#define INVALID_CMD 0x2
#define INVALID_SPEED_MODE 0x4
/*
* For accessing SHPC Working Register Set via PCI Configuration Space
*/
#define DWORD_SELECT 0x2
#define DWORD_DATA 0x4
/* Field Offset in Logical Slot Register - byte boundary */
#define SLOT_EVENT_LATCH 0x2
#define SLOT_SERR_INT_MASK 0x3
static irqreturn_t shpc_isr(int irq, void *dev_id);
static void start_int_poll_timer(struct controller *ctrl, int sec);
static int hpc_check_cmd_status(struct controller *ctrl);
static inline u8 shpc_readb(struct controller *ctrl, int reg)
{
return readb(ctrl->creg + reg);
}
static inline void shpc_writeb(struct controller *ctrl, int reg, u8 val)
{
writeb(val, ctrl->creg + reg);
}
static inline u16 shpc_readw(struct controller *ctrl, int reg)
{
return readw(ctrl->creg + reg);
}
static inline void shpc_writew(struct controller *ctrl, int reg, u16 val)
{
writew(val, ctrl->creg + reg);
}
static inline u32 shpc_readl(struct controller *ctrl, int reg)
{
return readl(ctrl->creg + reg);
}
static inline void shpc_writel(struct controller *ctrl, int reg, u32 val)
{
writel(val, ctrl->creg + reg);
}
static inline int shpc_indirect_read(struct controller *ctrl, int index,
u32 *value)
{
int rc;
u32 cap_offset = ctrl->cap_offset;
struct pci_dev *pdev = ctrl->pci_dev;
rc = pci_write_config_byte(pdev, cap_offset + DWORD_SELECT, index);
if (rc)
return rc;
return pci_read_config_dword(pdev, cap_offset + DWORD_DATA, value);
}
/*
* This is the interrupt polling timeout function.
*/
static void int_poll_timeout(unsigned long data)
{
struct controller *ctrl = (struct controller *)data;
/* Poll for interrupt events. regs == NULL => polling */
shpc_isr(0, ctrl);
init_timer(&ctrl->poll_timer);
if (!shpchp_poll_time)
shpchp_poll_time = 2; /* default polling interval is 2 sec */
start_int_poll_timer(ctrl, shpchp_poll_time);
}
/*
* This function starts the interrupt polling timer.
*/
static void start_int_poll_timer(struct controller *ctrl, int sec)
{
/* Clamp to sane value */
if ((sec <= 0) || (sec > 60))
sec = 2;
ctrl->poll_timer.function = &int_poll_timeout;
ctrl->poll_timer.data = (unsigned long)ctrl;
ctrl->poll_timer.expires = jiffies + sec * HZ;
add_timer(&ctrl->poll_timer);
}
static inline int is_ctrl_busy(struct controller *ctrl)
{
u16 cmd_status = shpc_readw(ctrl, CMD_STATUS);
return cmd_status & 0x1;
}
/*
* Returns 1 if SHPC finishes executing a command within 1 sec,
* otherwise returns 0.
*/
static inline int shpc_poll_ctrl_busy(struct controller *ctrl)
{
int i;
if (!is_ctrl_busy(ctrl))
return 1;
/* Check every 0.1 sec for a total of 1 sec */
for (i = 0; i < 10; i++) {
msleep(100);
if (!is_ctrl_busy(ctrl))
return 1;
}
return 0;
}
static inline int shpc_wait_cmd(struct controller *ctrl)
{
int retval = 0;
unsigned long timeout = msecs_to_jiffies(1000);
int rc;
if (shpchp_poll_mode)
rc = shpc_poll_ctrl_busy(ctrl);
else
rc = wait_event_interruptible_timeout(ctrl->queue,
!is_ctrl_busy(ctrl), timeout);
if (!rc && is_ctrl_busy(ctrl)) {
retval = -EIO;
ctrl_err(ctrl, "Command not completed in 1000 msec\n");
} else if (rc < 0) {
retval = -EINTR;
ctrl_info(ctrl, "Command was interrupted by a signal\n");
}
return retval;
}
static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd)
{
struct controller *ctrl = slot->ctrl;
u16 cmd_status;
int retval = 0;
u16 temp_word;
mutex_lock(&slot->ctrl->cmd_lock);
if (!shpc_poll_ctrl_busy(ctrl)) {
/* After 1 sec and and the controller is still busy */
ctrl_err(ctrl, "Controller is still busy after 1 sec\n");
retval = -EBUSY;
goto out;
}
++t_slot;
temp_word = (t_slot << 8) | (cmd & 0xFF);
ctrl_dbg(ctrl, "%s: t_slot %x cmd %x\n", __func__, t_slot, cmd);
/* To make sure the Controller Busy bit is 0 before we send out the
* command.
*/
shpc_writew(ctrl, CMD, temp_word);
/*
* Wait for command completion.
*/
retval = shpc_wait_cmd(slot->ctrl);
if (retval)
goto out;
cmd_status = hpc_check_cmd_status(slot->ctrl);
if (cmd_status) {
ctrl_err(ctrl,
"Failed to issued command 0x%x (error code = %d)\n",
cmd, cmd_status);
retval = -EIO;
}
out:
mutex_unlock(&slot->ctrl->cmd_lock);
return retval;
}
static int hpc_check_cmd_status(struct controller *ctrl)
{
int retval = 0;
u16 cmd_status = shpc_readw(ctrl, CMD_STATUS) & 0x000F;
switch (cmd_status >> 1) {
case 0:
retval = 0;
break;
case 1:
retval = SWITCH_OPEN;
ctrl_err(ctrl, "Switch opened!\n");
break;
case 2:
retval = INVALID_CMD;
ctrl_err(ctrl, "Invalid HPC command!\n");
break;
case 4:
retval = INVALID_SPEED_MODE;
ctrl_err(ctrl, "Invalid bus speed/mode!\n");
break;
default:
retval = cmd_status;
}
return retval;
}
static int hpc_get_attention_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
u8 state = (slot_reg & ATN_LED_STATE_MASK) >> ATN_LED_STATE_SHIFT;
switch (state) {
case ATN_LED_STATE_ON:
*status = 1; /* On */
break;
case ATN_LED_STATE_BLINK:
*status = 2; /* Blink */
break;
case ATN_LED_STATE_OFF:
*status = 0; /* Off */
break;
default:
*status = 0xFF; /* Reserved */
break;
}
return 0;
}
static int hpc_get_power_status(struct slot * slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
u8 state = (slot_reg & SLOT_STATE_MASK) >> SLOT_STATE_SHIFT;
switch (state) {
case SLOT_STATE_PWRONLY:
*status = 2; /* Powered only */
break;
case SLOT_STATE_ENABLED:
*status = 1; /* Enabled */
break;
case SLOT_STATE_DISABLED:
*status = 0; /* Disabled */
break;
default:
*status = 0xFF; /* Reserved */
break;
}
return 0;
}
static int hpc_get_latch_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
*status = !!(slot_reg & MRL_SENSOR); /* 0 -> close; 1 -> open */
return 0;
}
static int hpc_get_adapter_status(struct slot *slot, u8 *status)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
u8 state = (slot_reg & PRSNT_MASK) >> PRSNT_SHIFT;
*status = (state != 0x3) ? 1 : 0;
return 0;
}
static int hpc_get_prog_int(struct slot *slot, u8 *prog_int)
{
struct controller *ctrl = slot->ctrl;
*prog_int = shpc_readb(ctrl, PROG_INTERFACE);
return 0;
}
static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value)
{
int retval = 0;
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
u8 m66_cap = !!(slot_reg & MHZ66_CAP);
u8 pi, pcix_cap;
if ((retval = hpc_get_prog_int(slot, &pi)))
return retval;
switch (pi) {
case 1:
pcix_cap = (slot_reg & PCIX_CAP_MASK_PI1) >> PCIX_CAP_SHIFT;
break;
case 2:
pcix_cap = (slot_reg & PCIX_CAP_MASK_PI2) >> PCIX_CAP_SHIFT;
break;
default:
return -ENODEV;
}
ctrl_dbg(ctrl, "%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n",
__func__, slot_reg, pcix_cap, m66_cap);
switch (pcix_cap) {
case 0x0:
*value = m66_cap ? PCI_SPEED_66MHz : PCI_SPEED_33MHz;
break;
case 0x1:
*value = PCI_SPEED_66MHz_PCIX;
break;
case 0x3:
*value = PCI_SPEED_133MHz_PCIX;
break;
case 0x4:
*value = PCI_SPEED_133MHz_PCIX_266;
break;
case 0x5:
*value = PCI_SPEED_133MHz_PCIX_533;
break;
case 0x2:
default:
*value = PCI_SPEED_UNKNOWN;
retval = -ENODEV;
break;
}
ctrl_dbg(ctrl, "Adapter speed = %d\n", *value);
return retval;
}
static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode)
{
int retval = 0;
struct controller *ctrl = slot->ctrl;
u16 sec_bus_status = shpc_readw(ctrl, SEC_BUS_CONFIG);
u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
if (pi == 2) {
*mode = (sec_bus_status & 0x0100) >> 8;
} else {
retval = -1;
}
ctrl_dbg(ctrl, "Mode 1 ECC cap = %d\n", *mode);
return retval;
}
static int hpc_query_power_fault(struct slot * slot)
{
struct controller *ctrl = slot->ctrl;
u32 slot_reg = shpc_readl(ctrl, SLOT_REG(slot->hp_slot));
/* Note: Logic 0 => fault */
return !(slot_reg & POWER_FAULT);
}
static int hpc_set_attention_status(struct slot *slot, u8 value)
{
u8 slot_cmd = 0;
switch (value) {
case 0 :
slot_cmd = SET_ATTN_OFF; /* OFF */
break;
case 1:
slot_cmd = SET_ATTN_ON; /* ON */
break;
case 2:
slot_cmd = SET_ATTN_BLINK; /* BLINK */
break;
default:
return -1;
}
return shpc_write_cmd(slot, slot->hp_slot, slot_cmd);
}
static void hpc_set_green_led_on(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_ON);
}
static void hpc_set_green_led_off(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_OFF);
}
static void hpc_set_green_led_blink(struct slot *slot)
{
shpc_write_cmd(slot, slot->hp_slot, SET_PWR_BLINK);
}
static void hpc_release_ctlr(struct controller *ctrl)
{
int i;
u32 slot_reg, serr_int;
/*
* Mask event interrupts and SERRs of all slots
*/
for (i = 0; i < ctrl->num_slots; i++) {
slot_reg = shpc_readl(ctrl, SLOT_REG(i));
slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK |
CON_PFAULT_SERR_MASK);
slot_reg &= ~SLOT_REG_RSVDZ_MASK;
shpc_writel(ctrl, SLOT_REG(i), slot_reg);
}
cleanup_slots(ctrl);
/*
* Mask SERR and System Interrupt generation
*/
serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE);
serr_int |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK |
COMMAND_INTR_MASK | ARBITER_SERR_MASK);
serr_int &= ~SERR_INTR_RSVDZ_MASK;
shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
if (shpchp_poll_mode)
del_timer(&ctrl->poll_timer);
else {
free_irq(ctrl->pci_dev->irq, ctrl);
pci_disable_msi(ctrl->pci_dev);
}
iounmap(ctrl->creg);
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
}
static int hpc_power_on_slot(struct slot * slot)
{
int retval;
retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR);
if (retval)
ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
return retval;
}
static int hpc_slot_enable(struct slot * slot)
{
int retval;
/* Slot - Enable, Power Indicator - Blink, Attention Indicator - Off */
retval = shpc_write_cmd(slot, slot->hp_slot,
SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF);
if (retval)
ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
return retval;
}
static int hpc_slot_disable(struct slot * slot)
{
int retval;
/* Slot - Disable, Power Indicator - Off, Attention Indicator - On */
retval = shpc_write_cmd(slot, slot->hp_slot,
SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON);
if (retval)
ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__);
return retval;
}
static int shpc_get_cur_bus_speed(struct controller *ctrl)
{
int retval = 0;
struct pci_bus *bus = ctrl->pci_dev->subordinate;
enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG);
u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
if ((pi == 1) && (speed_mode > 4)) {
retval = -ENODEV;
goto out;
}
switch (speed_mode) {
case 0x0:
bus_speed = PCI_SPEED_33MHz;
break;
case 0x1:
bus_speed = PCI_SPEED_66MHz;
break;
case 0x2:
bus_speed = PCI_SPEED_66MHz_PCIX;
break;
case 0x3:
bus_speed = PCI_SPEED_100MHz_PCIX;
break;
case 0x4:
bus_speed = PCI_SPEED_133MHz_PCIX;
break;
case 0x5:
bus_speed = PCI_SPEED_66MHz_PCIX_ECC;
break;
case 0x6:
bus_speed = PCI_SPEED_100MHz_PCIX_ECC;
break;
case 0x7:
bus_speed = PCI_SPEED_133MHz_PCIX_ECC;
break;
case 0x8:
bus_speed = PCI_SPEED_66MHz_PCIX_266;
break;
case 0x9:
bus_speed = PCI_SPEED_100MHz_PCIX_266;
break;
case 0xa:
bus_speed = PCI_SPEED_133MHz_PCIX_266;
break;
case 0xb:
bus_speed = PCI_SPEED_66MHz_PCIX_533;
break;
case 0xc:
bus_speed = PCI_SPEED_100MHz_PCIX_533;
break;
case 0xd:
bus_speed = PCI_SPEED_133MHz_PCIX_533;
break;
default:
retval = -ENODEV;
break;
}
out:
bus->cur_bus_speed = bus_speed;
dbg("Current bus speed = %d\n", bus_speed);
return retval;
}
static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
{
int retval;
struct controller *ctrl = slot->ctrl;
u8 pi, cmd;
pi = shpc_readb(ctrl, PROG_INTERFACE);
if ((pi == 1) && (value > PCI_SPEED_133MHz_PCIX))
return -EINVAL;
switch (value) {
case PCI_SPEED_33MHz:
cmd = SETA_PCI_33MHZ;
break;
case PCI_SPEED_66MHz:
cmd = SETA_PCI_66MHZ;
break;
case PCI_SPEED_66MHz_PCIX:
cmd = SETA_PCIX_66MHZ;
break;
case PCI_SPEED_100MHz_PCIX:
cmd = SETA_PCIX_100MHZ;
break;
case PCI_SPEED_133MHz_PCIX:
cmd = SETA_PCIX_133MHZ;
break;
case PCI_SPEED_66MHz_PCIX_ECC:
cmd = SETB_PCIX_66MHZ_EM;
break;
case PCI_SPEED_100MHz_PCIX_ECC:
cmd = SETB_PCIX_100MHZ_EM;
break;
case PCI_SPEED_133MHz_PCIX_ECC:
cmd = SETB_PCIX_133MHZ_EM;
break;
case PCI_SPEED_66MHz_PCIX_266:
cmd = SETB_PCIX_66MHZ_266;
break;
case PCI_SPEED_100MHz_PCIX_266:
cmd = SETB_PCIX_100MHZ_266;
break;
case PCI_SPEED_133MHz_PCIX_266:
cmd = SETB_PCIX_133MHZ_266;
break;
case PCI_SPEED_66MHz_PCIX_533:
cmd = SETB_PCIX_66MHZ_533;
break;
case PCI_SPEED_100MHz_PCIX_533:
cmd = SETB_PCIX_100MHZ_533;
break;
case PCI_SPEED_133MHz_PCIX_533:
cmd = SETB_PCIX_133MHZ_533;
break;
default:
return -EINVAL;
}
retval = shpc_write_cmd(slot, 0, cmd);
if (retval)
ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
else
shpc_get_cur_bus_speed(ctrl);
return retval;
}
static irqreturn_t shpc_isr(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
u32 serr_int, slot_reg, intr_loc, intr_loc2;
int hp_slot;
/* Check to see if it was our interrupt */
intr_loc = shpc_readl(ctrl, INTR_LOC);
if (!intr_loc)
return IRQ_NONE;
ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc);
if(!shpchp_poll_mode) {
/*
* Mask Global Interrupt Mask - see implementation
* note on p. 139 of SHPC spec rev 1.0
*/
serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE);
serr_int |= GLOBAL_INTR_MASK;
serr_int &= ~SERR_INTR_RSVDZ_MASK;
shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
intr_loc2 = shpc_readl(ctrl, INTR_LOC);
ctrl_dbg(ctrl, "%s: intr_loc2 = %x\n", __func__, intr_loc2);
}
if (intr_loc & CMD_INTR_PENDING) {
/*
* Command Complete Interrupt Pending
* RO only - clear by writing 1 to the Command Completion
* Detect bit in Controller SERR-INT register
*/
serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE);
serr_int &= ~SERR_INTR_RSVDZ_MASK;
shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
wake_up_interruptible(&ctrl->queue);
}
if (!(intr_loc & ~CMD_INTR_PENDING))
goto out;
for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
/* To find out which slot has interrupt pending */
if (!(intr_loc & SLOT_INTR_PENDING(hp_slot)))
continue;
slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
ctrl_dbg(ctrl, "Slot %x with intr, slot register = %x\n",
hp_slot, slot_reg);
if (slot_reg & MRL_CHANGE_DETECTED)
shpchp_handle_switch_change(hp_slot, ctrl);
if (slot_reg & BUTTON_PRESS_DETECTED)
shpchp_handle_attention_button(hp_slot, ctrl);
if (slot_reg & PRSNT_CHANGE_DETECTED)
shpchp_handle_presence_change(hp_slot, ctrl);
if (slot_reg & (ISO_PFAULT_DETECTED | CON_PFAULT_DETECTED))
shpchp_handle_power_fault(hp_slot, ctrl);
/* Clear all slot events */
slot_reg &= ~SLOT_REG_RSVDZ_MASK;
shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg);
}
out:
if (!shpchp_poll_mode) {
/* Unmask Global Interrupt Mask */
serr_int = shpc_readl(ctrl, SERR_INTR_ENABLE);
serr_int &= ~(GLOBAL_INTR_MASK | SERR_INTR_RSVDZ_MASK);
shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int);
}
return IRQ_HANDLED;
}
static int shpc_get_max_bus_speed(struct controller *ctrl)
{
int retval = 0;
struct pci_bus *bus = ctrl->pci_dev->subordinate;
enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1);
u32 slot_avail2 = shpc_readl(ctrl, SLOT_AVAIL2);
if (pi == 2) {
if (slot_avail2 & SLOT_133MHZ_PCIX_533)
bus_speed = PCI_SPEED_133MHz_PCIX_533;
else if (slot_avail2 & SLOT_100MHZ_PCIX_533)
bus_speed = PCI_SPEED_100MHz_PCIX_533;
else if (slot_avail2 & SLOT_66MHZ_PCIX_533)
bus_speed = PCI_SPEED_66MHz_PCIX_533;
else if (slot_avail2 & SLOT_133MHZ_PCIX_266)
bus_speed = PCI_SPEED_133MHz_PCIX_266;
else if (slot_avail2 & SLOT_100MHZ_PCIX_266)
bus_speed = PCI_SPEED_100MHz_PCIX_266;
else if (slot_avail2 & SLOT_66MHZ_PCIX_266)
bus_speed = PCI_SPEED_66MHz_PCIX_266;
}
if (bus_speed == PCI_SPEED_UNKNOWN) {
if (slot_avail1 & SLOT_133MHZ_PCIX)
bus_speed = PCI_SPEED_133MHz_PCIX;
else if (slot_avail1 & SLOT_100MHZ_PCIX)
bus_speed = PCI_SPEED_100MHz_PCIX;
else if (slot_avail1 & SLOT_66MHZ_PCIX)
bus_speed = PCI_SPEED_66MHz_PCIX;
else if (slot_avail2 & SLOT_66MHZ)
bus_speed = PCI_SPEED_66MHz;
else if (slot_avail1 & SLOT_33MHZ)
bus_speed = PCI_SPEED_33MHz;
else
retval = -ENODEV;
}
bus->max_bus_speed = bus_speed;
ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed);
return retval;
}
static struct hpc_ops shpchp_hpc_ops = {
.power_on_slot = hpc_power_on_slot,
.slot_enable = hpc_slot_enable,
.slot_disable = hpc_slot_disable,
.set_bus_speed_mode = hpc_set_bus_speed_mode,
.set_attention_status = hpc_set_attention_status,
.get_power_status = hpc_get_power_status,
.get_attention_status = hpc_get_attention_status,
.get_latch_status = hpc_get_latch_status,
.get_adapter_status = hpc_get_adapter_status,
.get_adapter_speed = hpc_get_adapter_speed,
.get_mode1_ECC_cap = hpc_get_mode1_ECC_cap,
.get_prog_int = hpc_get_prog_int,
.query_power_fault = hpc_query_power_fault,
.green_led_on = hpc_set_green_led_on,
.green_led_off = hpc_set_green_led_off,
.green_led_blink = hpc_set_green_led_blink,
.release_ctlr = hpc_release_ctlr,
};
int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
{
int rc = -1, num_slots = 0;
u8 hp_slot;
u32 shpc_base_offset;
u32 tempdword, slot_reg, slot_config;
u8 i;
ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
ctrl_dbg(ctrl, "Hotplug Controller:\n");
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
/* amd shpc driver doesn't use Base Offset; assume 0 */
ctrl->mmio_base = pci_resource_start(pdev, 0);
ctrl->mmio_size = pci_resource_len(pdev, 0);
} else {
ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC);
if (!ctrl->cap_offset) {
ctrl_err(ctrl, "Cannot find PCI capability\n");
goto abort;
}
ctrl_dbg(ctrl, " cap_offset = %x\n", ctrl->cap_offset);
rc = shpc_indirect_read(ctrl, 0, &shpc_base_offset);
if (rc) {
ctrl_err(ctrl, "Cannot read base_offset\n");
goto abort;
}
rc = shpc_indirect_read(ctrl, 3, &tempdword);
if (rc) {
ctrl_err(ctrl, "Cannot read slot config\n");
goto abort;
}
num_slots = tempdword & SLOT_NUM;
ctrl_dbg(ctrl, " num_slots (indirect) %x\n", num_slots);
for (i = 0; i < 9 + num_slots; i++) {
rc = shpc_indirect_read(ctrl, i, &tempdword);
if (rc) {
ctrl_err(ctrl,
"Cannot read creg (index = %d)\n", i);
goto abort;
}
ctrl_dbg(ctrl, " offset %d: value %x\n", i, tempdword);
}
ctrl->mmio_base =
pci_resource_start(pdev, 0) + shpc_base_offset;
ctrl->mmio_size = 0x24 + 0x4 * num_slots;
}
ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device);
rc = pci_enable_device(pdev);
if (rc) {
ctrl_err(ctrl, "pci_enable_device failed\n");
goto abort;
}
if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) {
ctrl_err(ctrl, "Cannot reserve MMIO region\n");
rc = -1;
goto abort;
}
ctrl->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size);
if (!ctrl->creg) {
ctrl_err(ctrl, "Cannot remap MMIO region %lx @ %lx\n",
ctrl->mmio_size, ctrl->mmio_base);
release_mem_region(ctrl->mmio_base, ctrl->mmio_size);
rc = -1;
goto abort;
}
ctrl_dbg(ctrl, "ctrl->creg %p\n", ctrl->creg);
mutex_init(&ctrl->crit_sect);
mutex_init(&ctrl->cmd_lock);
/* Setup wait queue */
init_waitqueue_head(&ctrl->queue);
ctrl->hpc_ops = &shpchp_hpc_ops;
/* Return PCI Controller Info */
slot_config = shpc_readl(ctrl, SLOT_CONFIG);
ctrl->slot_device_offset = (slot_config & FIRST_DEV_NUM) >> 8;
ctrl->num_slots = slot_config & SLOT_NUM;
ctrl->first_slot = (slot_config & PSN) >> 16;
ctrl->slot_num_inc = ((slot_config & UPDOWN) >> 29) ? 1 : -1;
/* Mask Global Interrupt Mask & Command Complete Interrupt Mask */
tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
tempdword |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK |
COMMAND_INTR_MASK | ARBITER_SERR_MASK);
tempdword &= ~SERR_INTR_RSVDZ_MASK;
shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword);
tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
/* Mask the MRL sensor SERR Mask of individual slot in
* Slot SERR-INT Mask & clear all the existing event if any
*/
for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n",
hp_slot, slot_reg);
slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK |
CON_PFAULT_SERR_MASK);
slot_reg &= ~SLOT_REG_RSVDZ_MASK;
shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg);
}
if (shpchp_poll_mode) {
/* Install interrupt polling timer. Start with 10 sec delay */
init_timer(&ctrl->poll_timer);
start_int_poll_timer(ctrl, 10);
} else {
/* Installs the interrupt handler */
rc = pci_enable_msi(pdev);
if (rc) {
ctrl_info(ctrl,
"Can't get msi for the hotplug controller\n");
ctrl_info(ctrl,
"Use INTx for the hotplug controller\n");
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
MY_NAME, (void *)ctrl);
ctrl_dbg(ctrl, "request_irq %d (returns %d)\n",
ctrl->pci_dev->irq, rc);
if (rc) {
ctrl_err(ctrl, "Can't get irq %d for the hotplug "
"controller\n", ctrl->pci_dev->irq);
goto abort_iounmap;
}
}
ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq);
shpc_get_max_bus_speed(ctrl);
shpc_get_cur_bus_speed(ctrl);
/*
* Unmask all event interrupts of all slots
*/
for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) {
slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot));
ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n",
hp_slot, slot_reg);
slot_reg &= ~(PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK |
BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK |
CON_PFAULT_INTR_MASK | SLOT_REG_RSVDZ_MASK);
shpc_writel(ctrl, SLOT_REG(hp_slot), slot_reg);
}
if (!shpchp_poll_mode) {
/* Unmask all general input interrupts and SERR */
tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
tempdword &= ~(GLOBAL_INTR_MASK | COMMAND_INTR_MASK |
SERR_INTR_RSVDZ_MASK);
shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword);
tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE);
ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword);
}
return 0;
/* We end up here for the many possible ways to fail this API. */
abort_iounmap:
iounmap(ctrl->creg);
abort:
return rc;
}
| gpl-2.0 |
StephenRJ/cm12_kernel_moto_shamu | arch/arm/mach-msm/restart.c | 71 | 8786 | /* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/reboot.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/mfd/pmic8058.h>
#include <linux/mfd/pmic8901.h>
#include <linux/mfd/pm8xxx/misc.h>
#include <linux/qpnp/power-on.h>
#include <linux/of_address.h>
#include <soc/qcom/scm.h>
#include <asm/cacheflush.h>
#include <mach/msm_iomap.h>
#include <mach/restart.h>
#include <soc/qcom/socinfo.h>
#include <mach/irqs.h>
#include "msm_watchdog.h"
#include "timer.h"
#include "wdog_debug.h"
#define WDT0_RST 0x38
#define WDT0_EN 0x40
#define WDT0_BARK_TIME 0x4C
#define WDT0_BITE_TIME 0x5C
#define PSHOLD_CTL_SU (MSM_TLMM_BASE + 0x820)
#define RESTART_REASON_ADDR 0x65C
#define DLOAD_MODE_ADDR 0x0
#define EMERGENCY_DLOAD_MODE_ADDR 0xFE0
#define EMERGENCY_DLOAD_MAGIC1 0x322A4F99
#define EMERGENCY_DLOAD_MAGIC2 0xC67E4350
#define EMERGENCY_DLOAD_MAGIC3 0x77777777
#define SCM_IO_DISABLE_PMIC_ARBITER 1
static int restart_mode;
void *restart_reason;
int pmic_reset_irq;
static void __iomem *msm_tmr0_base;
#ifdef CONFIG_MSM_DLOAD_MODE
#define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
#define DL_MODE_PROP "qcom,msm-imem-download_mode"
static int in_panic;
static void *dload_mode_addr;
static bool dload_mode_enabled;
static void *emergency_dload_mode_addr;
/* Download mode master kill-switch */
static int dload_set(const char *val, struct kernel_param *kp);
static int download_mode = 1;
module_param_call(download_mode, dload_set, param_get_int,
&download_mode, 0644);
static int panic_prep_restart(struct notifier_block *this,
unsigned long event, void *ptr)
{
in_panic = 1;
return NOTIFY_DONE;
}
static struct notifier_block panic_blk = {
.notifier_call = panic_prep_restart,
};
static void set_dload_mode(int on)
{
if (dload_mode_addr) {
__raw_writel(on ? 0xE47B337D : 0, dload_mode_addr);
__raw_writel(on ? 0xCE14091A : 0,
dload_mode_addr + sizeof(unsigned int));
mb();
dload_mode_enabled = on;
}
}
static bool get_dload_mode(void)
{
return dload_mode_enabled;
}
static void enable_emergency_dload_mode(void)
{
if (emergency_dload_mode_addr) {
__raw_writel(EMERGENCY_DLOAD_MAGIC1,
emergency_dload_mode_addr);
__raw_writel(EMERGENCY_DLOAD_MAGIC2,
emergency_dload_mode_addr +
sizeof(unsigned int));
__raw_writel(EMERGENCY_DLOAD_MAGIC3,
emergency_dload_mode_addr +
(2 * sizeof(unsigned int)));
/* Need disable the pmic wdt, then the emergency dload mode
* will not auto reset. */
qpnp_pon_wd_config(0);
mb();
}
}
static int dload_set(const char *val, struct kernel_param *kp)
{
int ret;
int old_val = download_mode;
ret = param_set_int(val, kp);
if (ret)
return ret;
/* If download_mode is not zero or one, ignore. */
if (download_mode >> 1) {
download_mode = old_val;
return -EINVAL;
}
set_dload_mode(download_mode);
return 0;
}
#else
#define set_dload_mode(x) do {} while (0)
static void enable_emergency_dload_mode(void)
{
printk(KERN_ERR "dload mode is not enabled on target\n");
}
static bool get_dload_mode(void)
{
return false;
}
#endif
void msm_set_restart_mode(int mode)
{
restart_mode = mode;
}
EXPORT_SYMBOL(msm_set_restart_mode);
static bool scm_pmic_arbiter_disable_supported;
/*
* Force the SPMI PMIC arbiter to shutdown so that no more SPMI transactions
* are sent from the MSM to the PMIC. This is required in order to avoid an
* SPMI lockup on certain PMIC chips if PS_HOLD is lowered in the middle of
* an SPMI transaction.
*/
static void halt_spmi_pmic_arbiter(void)
{
if (scm_pmic_arbiter_disable_supported) {
pr_crit("Calling SCM to disable SPMI PMIC arbiter\n");
scm_call_atomic1(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER, 0);
}
}
static void __msm_power_off(int lower_pshold)
{
printk(KERN_CRIT "Powering off the SoC\n");
#ifdef CONFIG_MSM_DLOAD_MODE
set_dload_mode(0);
#endif
pm8xxx_reset_pwr_off(0);
qpnp_pon_system_pwr_off(PON_POWER_OFF_SHUTDOWN);
if (lower_pshold) {
halt_spmi_pmic_arbiter();
__raw_writel(0, MSM_MPM2_PSHOLD_BASE);
mdelay(10000);
printk(KERN_ERR "Powering off has failed\n");
}
return;
}
static void msm_power_off(void)
{
/* MSM initiated power off, lower ps_hold */
__msm_power_off(1);
}
static void msm_restart_prepare(const char *cmd)
{
#ifdef CONFIG_MSM_DLOAD_MODE
/* This looks like a normal reboot at this point. */
set_dload_mode(0);
/* Write download mode flags if we're panic'ing */
set_dload_mode(in_panic);
/* Write download mode flags if restart_mode says so */
if (restart_mode == RESTART_DLOAD)
set_dload_mode(1);
/* Kill download mode if master-kill switch is set */
if (!download_mode)
set_dload_mode(0);
#endif
pm8xxx_reset_pwr_off(1);
/* Hard reset the PMIC unless memory contents must be maintained. */
if (get_dload_mode() || (cmd != NULL && cmd[0] != '\0') || in_panic)
qpnp_pon_system_pwr_off(PON_POWER_OFF_WARM_RESET);
else
qpnp_pon_system_pwr_off(PON_POWER_OFF_HARD_RESET);
if (cmd != NULL) {
if (!strncmp(cmd, "bootloader", 10)) {
__raw_writel(0x77665500, restart_reason);
/* set reboot_bl flag in PMIC for cold reset */
qpnp_pon_store_extra_reset_info(RESET_EXTRA_REBOOT_BL_REASON,
RESET_EXTRA_REBOOT_BL_REASON);
/*
* force cold reboot here to avoid impaction from
* modem double reboot workaround solution.
*/
qpnp_pon_system_pwr_off(PON_POWER_OFF_HARD_RESET);
} else if (!strncmp(cmd, "recovery", 8)) {
__raw_writel(0x77665502, restart_reason);
} else if (!strcmp(cmd, "rtc")) {
__raw_writel(0x77665503, restart_reason);
} else if (!strncmp(cmd, "oem-", 4)) {
unsigned long code;
code = simple_strtoul(cmd + 4, NULL, 16) & 0xff;
__raw_writel(0x6f656d00 | code, restart_reason);
} else if (!strncmp(cmd, "edl", 3)) {
enable_emergency_dload_mode();
} else {
__raw_writel(0x77665501, restart_reason);
}
} else if (in_panic == 1) {
__raw_writel(0x77665505, restart_reason);
qpnp_pon_store_extra_reset_info(RESET_EXTRA_PANIC_REASON, 1);
} else {
__raw_writel(0x77665501, restart_reason);
}
flush_cache_all();
outer_flush_all();
}
void msm_restart(char mode, const char *cmd)
{
printk(KERN_NOTICE "Going down for restart now\n");
msm_restart_prepare(cmd);
/* Needed to bypass debug image on some chips */
msm_disable_wdog_debug();
halt_spmi_pmic_arbiter();
__raw_writel(0, MSM_MPM2_PSHOLD_BASE);
mdelay(10000);
printk(KERN_ERR "Restarting has failed\n");
}
static int __init msm_restart_init(void)
{
struct device_node *np;
int ret = 0;
#ifdef CONFIG_MSM_DLOAD_MODE
atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
np = of_find_compatible_node(NULL, NULL, DL_MODE_PROP);
if (!np) {
pr_err("unable to find DT imem download mode node\n");
ret = -ENODEV;
goto err_dl_mode;
}
dload_mode_addr = of_iomap(np, 0);
if (!dload_mode_addr) {
pr_err("unable to map imem download model offset\n");
ret = -ENOMEM;
goto err_dl_mode;
}
np = of_find_compatible_node(NULL, NULL, EDL_MODE_PROP);
if (!np) {
pr_err("unable to find DT imem emergency download mode node\n");
ret = -ENODEV;
goto err_edl_mode;
}
emergency_dload_mode_addr = of_iomap(np, 0);
if (!emergency_dload_mode_addr) {
pr_err("unable to map imem emergency download model offset\n");
ret = -ENOMEM;
goto err_edl_mode;
}
set_dload_mode(download_mode);
#endif
msm_tmr0_base = msm_timer_get_timer0_base();
np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-restart_reason");
if (!np) {
pr_err("unable to find DT imem restart reason node\n");
ret = -ENODEV;
goto err_restart_reason;
}
restart_reason = of_iomap(np, 0);
if (!restart_reason) {
pr_err("unable to map imem restart reason offset\n");
ret = -ENOMEM;
goto err_restart_reason;
}
pm_power_off = msm_power_off;
if (scm_is_call_available(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER) > 0)
scm_pmic_arbiter_disable_supported = true;
return 0;
err_restart_reason:
#ifdef CONFIG_MSM_DLOAD_MODE
iounmap(emergency_dload_mode_addr);
err_edl_mode:
iounmap(dload_mode_addr);
err_dl_mode:
#endif
return ret;
}
early_initcall(msm_restart_init);
| gpl-2.0 |
percy-g2/Novathor_xperia_u8500 | 6.1.1.B.0.253/external/qemu/distrib/sdl-1.2.12/src/stdlib/SDL_qsort.c | 71 | 14685 | /* qsort.c
* (c) 1998 Gareth McCaughan
*
* This is a drop-in replacement for the C library's |qsort()| routine.
*
* Features:
* - Median-of-three pivoting (and more)
* - Truncation and final polishing by a single insertion sort
* - Early truncation when no swaps needed in pivoting step
* - Explicit recursion, guaranteed not to overflow
* - A few little wrinkles stolen from the GNU |qsort()|.
* - separate code for non-aligned / aligned / word-size objects
*
* This code may be reproduced freely provided
* - this file is retained unaltered apart from minor
* changes for portability and efficiency
* - no changes are made to this comment
* - any changes that *are* made are clearly flagged
* - the _ID string below is altered by inserting, after
* the date, the string " altered" followed at your option
* by other material. (Exceptions: you may change the name
* of the exported routine without changing the ID string.
* You may change the values of the macros TRUNC_* and
* PIVOT_THRESHOLD without changing the ID string, provided
* they remain constants with TRUNC_nonaligned, TRUNC_aligned
* and TRUNC_words/WORD_BYTES between 8 and 24, and
* PIVOT_THRESHOLD between 32 and 200.)
*
* You may use it in anything you like; you may make money
* out of it; you may distribute it in object form or as
* part of an executable without including source code;
* you don't have to credit me. (But it would be nice if
* you did.)
*
* If you find problems with this code, or find ways of
* making it significantly faster, please let me know!
* My e-mail address, valid as of early 1998 and certainly
* OK for at least the next 18 months, is
* gjm11@dpmms.cam.ac.uk
* Thanks!
*
* Gareth McCaughan Peterhouse Cambridge 1998
*/
#include "SDL_config.h"
/*
#include <assert.h>
#include <stdlib.h>
#include <string.h>
*/
#include "SDL_stdinc.h"
#define assert(X)
#define malloc SDL_malloc
#define free SDL_free
#define memcpy SDL_memcpy
#define memmove SDL_memmove
#define qsort SDL_qsort
#ifndef HAVE_QSORT
static char _ID[]="<qsort.c gjm 1.12 1998-03-19>";
/* How many bytes are there per word? (Must be a power of 2,
* and must in fact equal sizeof(int).)
*/
#define WORD_BYTES sizeof(int)
/* How big does our stack need to be? Answer: one entry per
* bit in a |size_t|.
*/
#define STACK_SIZE (8*sizeof(size_t))
/* Different situations have slightly different requirements,
* and we make life epsilon easier by using different truncation
* points for the three different cases.
* So far, I have tuned TRUNC_words and guessed that the same
* value might work well for the other two cases. Of course
* what works well on my machine might work badly on yours.
*/
#define TRUNC_nonaligned 12
#define TRUNC_aligned 12
#define TRUNC_words 12*WORD_BYTES /* nb different meaning */
/* We use a simple pivoting algorithm for shortish sub-arrays
* and a more complicated one for larger ones. The threshold
* is PIVOT_THRESHOLD.
*/
#define PIVOT_THRESHOLD 40
typedef struct { char * first; char * last; } stack_entry;
#define pushLeft {stack[stacktop].first=ffirst;stack[stacktop++].last=last;}
#define pushRight {stack[stacktop].first=first;stack[stacktop++].last=llast;}
#define doLeft {first=ffirst;llast=last;continue;}
#define doRight {ffirst=first;last=llast;continue;}
#define pop {if (--stacktop<0) break;\
first=ffirst=stack[stacktop].first;\
last=llast=stack[stacktop].last;\
continue;}
/* Some comments on the implementation.
* 1. When we finish partitioning the array into "low"
* and "high", we forget entirely about short subarrays,
* because they'll be done later by insertion sort.
* Doing lots of little insertion sorts might be a win
* on large datasets for locality-of-reference reasons,
* but it makes the code much nastier and increases
* bookkeeping overhead.
* 2. We always save the shorter and get to work on the
* longer. This guarantees that every time we push
* an item onto the stack its size is <= 1/2 of that
* of its parent; so the stack can't need more than
* log_2(max-array-size) entries.
* 3. We choose a pivot by looking at the first, last
* and middle elements. We arrange them into order
* because it's easy to do that in conjunction with
* choosing the pivot, and it makes things a little
* easier in the partitioning step. Anyway, the pivot
* is the middle of these three. It's still possible
* to construct datasets where the algorithm takes
* time of order n^2, but it simply never happens in
* practice.
* 3' Newsflash: On further investigation I find that
* it's easy to construct datasets where median-of-3
* simply isn't good enough. So on large-ish subarrays
* we do a more sophisticated pivoting: we take three
* sets of 3 elements, find their medians, and then
* take the median of those.
* 4. We copy the pivot element to a separate place
* because that way we can always do our comparisons
* directly against a pointer to that separate place,
* and don't have to wonder "did we move the pivot
* element?". This makes the inner loop better.
* 5. It's possible to make the pivoting even more
* reliable by looking at more candidates when n
* is larger. (Taking this to its logical conclusion
* results in a variant of quicksort that doesn't
* have that n^2 worst case.) However, the overhead
* from the extra bookkeeping means that it's just
* not worth while.
* 6. This is pretty clean and portable code. Here are
* all the potential portability pitfalls and problems
* I know of:
* - In one place (the insertion sort) I construct
* a pointer that points just past the end of the
* supplied array, and assume that (a) it won't
* compare equal to any pointer within the array,
* and (b) it will compare equal to a pointer
* obtained by stepping off the end of the array.
* These might fail on some segmented architectures.
* - I assume that there are 8 bits in a |char| when
* computing the size of stack needed. This would
* fail on machines with 9-bit or 16-bit bytes.
* - I assume that if |((int)base&(sizeof(int)-1))==0|
* and |(size&(sizeof(int)-1))==0| then it's safe to
* get at array elements via |int*|s, and that if
* actually |size==sizeof(int)| as well then it's
* safe to treat the elements as |int|s. This might
* fail on systems that convert pointers to integers
* in non-standard ways.
* - I assume that |8*sizeof(size_t)<=INT_MAX|. This
* would be false on a machine with 8-bit |char|s,
* 16-bit |int|s and 4096-bit |size_t|s. :-)
*/
/* The recursion logic is the same in each case: */
#define Recurse(Trunc) \
{ size_t l=last-ffirst,r=llast-first; \
if (l<Trunc) { \
if (r>=Trunc) doRight \
else pop \
} \
else if (l<=r) { pushLeft; doRight } \
else if (r>=Trunc) { pushRight; doLeft }\
else doLeft \
}
/* and so is the pivoting logic: */
#define Pivot(swapper,sz) \
if ((size_t)(last-first)>PIVOT_THRESHOLD*sz) mid=pivot_big(first,mid,last,sz,compare);\
else { \
if (compare(first,mid)<0) { \
if (compare(mid,last)>0) { \
swapper(mid,last); \
if (compare(first,mid)>0) swapper(first,mid);\
} \
} \
else { \
if (compare(mid,last)>0) swapper(first,last)\
else { \
swapper(first,mid); \
if (compare(mid,last)>0) swapper(mid,last);\
} \
} \
first+=sz; last-=sz; \
}
#ifdef DEBUG_QSORT
#include <stdio.h>
#endif
/* and so is the partitioning logic: */
#define Partition(swapper,sz) { \
int swapped=0; \
do { \
while (compare(first,pivot)<0) first+=sz; \
while (compare(pivot,last)<0) last-=sz; \
if (first<last) { \
swapper(first,last); swapped=1; \
first+=sz; last-=sz; } \
else if (first==last) { first+=sz; last-=sz; break; }\
} while (first<=last); \
if (!swapped) pop \
}
/* and so is the pre-insertion-sort operation of putting
* the smallest element into place as a sentinel.
* Doing this makes the inner loop nicer. I got this
* idea from the GNU implementation of qsort().
*/
#define PreInsertion(swapper,limit,sz) \
first=base; \
last=first + (nmemb>limit ? limit : nmemb-1)*sz;\
while (last!=base) { \
if (compare(first,last)>0) first=last; \
last-=sz; } \
if (first!=base) swapper(first,(char*)base);
/* and so is the insertion sort, in the first two cases: */
#define Insertion(swapper) \
last=((char*)base)+nmemb*size; \
for (first=((char*)base)+size;first!=last;first+=size) { \
char *test; \
/* Find the right place for |first|. \
* My apologies for var reuse. */ \
for (test=first-size;compare(test,first)>0;test-=size) ; \
test+=size; \
if (test!=first) { \
/* Shift everything in [test,first) \
* up by one, and place |first| \
* where |test| is. */ \
memcpy(pivot,first,size); \
memmove(test+size,test,first-test); \
memcpy(test,pivot,size); \
} \
}
#define SWAP_nonaligned(a,b) { \
register char *aa=(a),*bb=(b); \
register size_t sz=size; \
do { register char t=*aa; *aa++=*bb; *bb++=t; } while (--sz); }
#define SWAP_aligned(a,b) { \
register int *aa=(int*)(a),*bb=(int*)(b); \
register size_t sz=size; \
do { register int t=*aa;*aa++=*bb; *bb++=t; } while (sz-=WORD_BYTES); }
#define SWAP_words(a,b) { \
register int t=*((int*)a); *((int*)a)=*((int*)b); *((int*)b)=t; }
/* ---------------------------------------------------------------------- */
static char * pivot_big(char *first, char *mid, char *last, size_t size,
int compare(const void *, const void *)) {
size_t d=(((last-first)/size)>>3)*size;
char *m1,*m2,*m3;
{ char *a=first, *b=first+d, *c=first+2*d;
#ifdef DEBUG_QSORT
fprintf(stderr,"< %d %d %d\n",*(int*)a,*(int*)b,*(int*)c);
#endif
m1 = compare(a,b)<0 ?
(compare(b,c)<0 ? b : (compare(a,c)<0 ? c : a))
: (compare(a,c)<0 ? a : (compare(b,c)<0 ? c : b));
}
{ char *a=mid-d, *b=mid, *c=mid+d;
#ifdef DEBUG_QSORT
fprintf(stderr,". %d %d %d\n",*(int*)a,*(int*)b,*(int*)c);
#endif
m2 = compare(a,b)<0 ?
(compare(b,c)<0 ? b : (compare(a,c)<0 ? c : a))
: (compare(a,c)<0 ? a : (compare(b,c)<0 ? c : b));
}
{ char *a=last-2*d, *b=last-d, *c=last;
#ifdef DEBUG_QSORT
fprintf(stderr,"> %d %d %d\n",*(int*)a,*(int*)b,*(int*)c);
#endif
m3 = compare(a,b)<0 ?
(compare(b,c)<0 ? b : (compare(a,c)<0 ? c : a))
: (compare(a,c)<0 ? a : (compare(b,c)<0 ? c : b));
}
#ifdef DEBUG_QSORT
fprintf(stderr,"-> %d %d %d\n",*(int*)m1,*(int*)m2,*(int*)m3);
#endif
return compare(m1,m2)<0 ?
(compare(m2,m3)<0 ? m2 : (compare(m1,m3)<0 ? m3 : m1))
: (compare(m1,m3)<0 ? m1 : (compare(m2,m3)<0 ? m3 : m2));
}
/* ---------------------------------------------------------------------- */
static void qsort_nonaligned(void *base, size_t nmemb, size_t size,
int (*compare)(const void *, const void *)) {
stack_entry stack[STACK_SIZE];
int stacktop=0;
char *first,*last;
char *pivot=malloc(size);
size_t trunc=TRUNC_nonaligned*size;
assert(pivot!=0);
first=(char*)base; last=first+(nmemb-1)*size;
if ((size_t)(last-first)>trunc) {
char *ffirst=first, *llast=last;
while (1) {
/* Select pivot */
{ char * mid=first+size*((last-first)/size >> 1);
Pivot(SWAP_nonaligned,size);
memcpy(pivot,mid,size);
}
/* Partition. */
Partition(SWAP_nonaligned,size);
/* Prepare to recurse/iterate. */
Recurse(trunc)
}
}
PreInsertion(SWAP_nonaligned,TRUNC_nonaligned,size);
Insertion(SWAP_nonaligned);
free(pivot);
}
static void qsort_aligned(void *base, size_t nmemb, size_t size,
int (*compare)(const void *, const void *)) {
stack_entry stack[STACK_SIZE];
int stacktop=0;
char *first,*last;
char *pivot=malloc(size);
size_t trunc=TRUNC_aligned*size;
assert(pivot!=0);
first=(char*)base; last=first+(nmemb-1)*size;
if ((size_t)(last-first)>trunc) {
char *ffirst=first,*llast=last;
while (1) {
/* Select pivot */
{ char * mid=first+size*((last-first)/size >> 1);
Pivot(SWAP_aligned,size);
memcpy(pivot,mid,size);
}
/* Partition. */
Partition(SWAP_aligned,size);
/* Prepare to recurse/iterate. */
Recurse(trunc)
}
}
PreInsertion(SWAP_aligned,TRUNC_aligned,size);
Insertion(SWAP_aligned);
free(pivot);
}
static void qsort_words(void *base, size_t nmemb,
int (*compare)(const void *, const void *)) {
stack_entry stack[STACK_SIZE];
int stacktop=0;
char *first,*last;
char *pivot=malloc(WORD_BYTES);
assert(pivot!=0);
first=(char*)base; last=first+(nmemb-1)*WORD_BYTES;
if (last-first>TRUNC_words) {
char *ffirst=first, *llast=last;
while (1) {
#ifdef DEBUG_QSORT
fprintf(stderr,"Doing %d:%d: ",
(first-(char*)base)/WORD_BYTES,
(last-(char*)base)/WORD_BYTES);
#endif
/* Select pivot */
{ char * mid=first+WORD_BYTES*((last-first) / (2*WORD_BYTES));
Pivot(SWAP_words,WORD_BYTES);
*(int*)pivot=*(int*)mid;
}
#ifdef DEBUG_QSORT
fprintf(stderr,"pivot=%d\n",*(int*)pivot);
#endif
/* Partition. */
Partition(SWAP_words,WORD_BYTES);
/* Prepare to recurse/iterate. */
Recurse(TRUNC_words)
}
}
PreInsertion(SWAP_words,(TRUNC_words/WORD_BYTES),WORD_BYTES);
/* Now do insertion sort. */
last=((char*)base)+nmemb*WORD_BYTES;
for (first=((char*)base)+WORD_BYTES;first!=last;first+=WORD_BYTES) {
/* Find the right place for |first|. My apologies for var reuse */
int *pl=(int*)(first-WORD_BYTES),*pr=(int*)first;
*(int*)pivot=*(int*)first;
for (;compare(pl,pivot)>0;pr=pl,--pl) {
*pr=*pl; }
if (pr!=(int*)first) *pr=*(int*)pivot;
}
free(pivot);
}
/* ---------------------------------------------------------------------- */
void qsort(void *base, size_t nmemb, size_t size,
int (*compare)(const void *, const void *)) {
if (nmemb<=1) return;
if (((uintptr_t)base|size)&(WORD_BYTES-1))
qsort_nonaligned(base,nmemb,size,compare);
else if (size!=WORD_BYTES)
qsort_aligned(base,nmemb,size,compare);
else
qsort_words(base,nmemb,compare);
}
#endif /* !HAVE_QSORT */
| gpl-2.0 |
m2mselect/owrt | DLpatch/linux-3.18.29/mm/rmap.c | 71 | 51497 | /*
* mm/rmap.c - physical to virtual reverse mappings
*
* Copyright 2001, Rik van Riel <riel@conectiva.com.br>
* Released under the General Public License (GPL).
*
* Simple, low overhead reverse mapping scheme.
* Please try to keep this thing as modular as possible.
*
* Provides methods for unmapping each kind of mapped page:
* the anon methods track anonymous pages, and
* the file methods track pages belonging to an inode.
*
* Original design by Rik van Riel <riel@conectiva.com.br> 2001
* File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
* Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
* Contributions by Hugh Dickins 2003, 2004
*/
/*
* Lock ordering in mm:
*
* inode->i_mutex (while writing or truncating, not reading or faulting)
* mm->mmap_sem
* page->flags PG_locked (lock_page)
* mapping->i_mmap_mutex
* anon_vma->rwsem
* mm->page_table_lock or pte_lock
* zone->lru_lock (in mark_page_accessed, isolate_lru_page)
* swap_lock (in swap_duplicate, swap_info_get)
* mmlist_lock (in mmput, drain_mmlist and others)
* mapping->private_lock (in __set_page_dirty_buffers)
* inode->i_lock (in set_page_dirty's __mark_inode_dirty)
* bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
* sb_lock (within inode_lock in fs/fs-writeback.c)
* mapping->tree_lock (widely used, in set_page_dirty,
* in arch-dependent flush_dcache_mmap_lock,
* within bdi.wb->list_lock in __sync_single_inode)
*
* anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
* ->tasklist_lock
* pte map lock
*/
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <linux/backing-dev.h>
#include <asm/tlbflush.h>
#include "internal.h"
static struct kmem_cache *anon_vma_cachep;
static struct kmem_cache *anon_vma_chain_cachep;
static inline struct anon_vma *anon_vma_alloc(void)
{
struct anon_vma *anon_vma;
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
anon_vma->degree = 1; /* Reference for first vma */
anon_vma->parent = anon_vma;
/*
* Initialise the anon_vma root to point to itself. If called
* from fork, the root will be reset to the parents anon_vma.
*/
anon_vma->root = anon_vma;
}
return anon_vma;
}
static inline void anon_vma_free(struct anon_vma *anon_vma)
{
VM_BUG_ON(atomic_read(&anon_vma->refcount));
/*
* Synchronize against page_lock_anon_vma_read() such that
* we can safely hold the lock without the anon_vma getting
* freed.
*
* Relies on the full mb implied by the atomic_dec_and_test() from
* put_anon_vma() against the acquire barrier implied by
* down_read_trylock() from page_lock_anon_vma_read(). This orders:
*
* page_lock_anon_vma_read() VS put_anon_vma()
* down_read_trylock() atomic_dec_and_test()
* LOCK MB
* atomic_read() rwsem_is_locked()
*
* LOCK should suffice since the actual taking of the lock must
* happen _before_ what follows.
*/
might_sleep();
if (rwsem_is_locked(&anon_vma->root->rwsem)) {
anon_vma_lock_write(anon_vma);
anon_vma_unlock_write(anon_vma);
}
kmem_cache_free(anon_vma_cachep, anon_vma);
}
static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
{
return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
}
static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
{
kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
}
static void anon_vma_chain_link(struct vm_area_struct *vma,
struct anon_vma_chain *avc,
struct anon_vma *anon_vma)
{
avc->vma = vma;
avc->anon_vma = anon_vma;
list_add(&avc->same_vma, &vma->anon_vma_chain);
anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
}
/**
* anon_vma_prepare - attach an anon_vma to a memory region
* @vma: the memory region in question
*
* This makes sure the memory mapping described by 'vma' has
* an 'anon_vma' attached to it, so that we can associate the
* anonymous pages mapped into it with that anon_vma.
*
* The common case will be that we already have one, but if
* not we either need to find an adjacent mapping that we
* can re-use the anon_vma from (very common when the only
* reason for splitting a vma has been mprotect()), or we
* allocate a new one.
*
* Anon-vma allocations are very subtle, because we may have
* optimistically looked up an anon_vma in page_lock_anon_vma_read()
* and that may actually touch the spinlock even in the newly
* allocated vma (it depends on RCU to make sure that the
* anon_vma isn't actually destroyed).
*
* As a result, we need to do proper anon_vma locking even
* for the new allocation. At the same time, we do not want
* to do any locking for the common case of already having
* an anon_vma.
*
* This must be called with the mmap_sem held for reading.
*/
int anon_vma_prepare(struct vm_area_struct *vma)
{
struct anon_vma *anon_vma = vma->anon_vma;
struct anon_vma_chain *avc;
might_sleep();
if (unlikely(!anon_vma)) {
struct mm_struct *mm = vma->vm_mm;
struct anon_vma *allocated;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_enomem;
anon_vma = find_mergeable_anon_vma(vma);
allocated = NULL;
if (!anon_vma) {
anon_vma = anon_vma_alloc();
if (unlikely(!anon_vma))
goto out_enomem_free_avc;
allocated = anon_vma;
}
anon_vma_lock_write(anon_vma);
/* page_table_lock to protect against threads */
spin_lock(&mm->page_table_lock);
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
/* vma reference or self-parent link for new root */
anon_vma->degree++;
allocated = NULL;
avc = NULL;
}
spin_unlock(&mm->page_table_lock);
anon_vma_unlock_write(anon_vma);
if (unlikely(allocated))
put_anon_vma(allocated);
if (unlikely(avc))
anon_vma_chain_free(avc);
}
return 0;
out_enomem_free_avc:
anon_vma_chain_free(avc);
out_enomem:
return -ENOMEM;
}
/*
* This is a useful helper function for locking the anon_vma root as
* we traverse the vma->anon_vma_chain, looping over anon_vma's that
* have the same vma.
*
* Such anon_vma's should have the same root, so you'd expect to see
* just a single mutex_lock for the whole traversal.
*/
static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
{
struct anon_vma *new_root = anon_vma->root;
if (new_root != root) {
if (WARN_ON_ONCE(root))
up_write(&root->rwsem);
root = new_root;
down_write(&root->rwsem);
}
return root;
}
static inline void unlock_anon_vma_root(struct anon_vma *root)
{
if (root)
up_write(&root->rwsem);
}
/*
* Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure.
*
* If dst->anon_vma is NULL this function tries to find and reuse existing
* anon_vma which has no vmas and only one child anon_vma. This prevents
* degradation of anon_vma hierarchy to endless linear chain in case of
* constantly forking task. On the other hand, an anon_vma with more than one
* child isn't reused even if there was no alive vma, thus rmap walker has a
* good chance of avoiding scanning the whole hierarchy when it searches where
* page is mapped.
*/
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
struct anon_vma_chain *avc, *pavc;
struct anon_vma *root = NULL;
list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma;
avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!avc)) {
unlock_anon_vma_root(root);
root = NULL;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto enomem_failure;
}
anon_vma = pavc->anon_vma;
root = lock_anon_vma_root(root, anon_vma);
anon_vma_chain_link(dst, avc, anon_vma);
/*
* Reuse existing anon_vma if its degree lower than two,
* that means it has no vma and only one anon_vma child.
*
* Do not chose parent anon_vma, otherwise first child
* will always reuse it. Root anon_vma is never reused:
* it has self-parent reference and at least one child.
*/
if (!dst->anon_vma && anon_vma != src->anon_vma &&
anon_vma->degree < 2)
dst->anon_vma = anon_vma;
}
if (dst->anon_vma)
dst->anon_vma->degree++;
unlock_anon_vma_root(root);
return 0;
enomem_failure:
/*
* dst->anon_vma is dropped here otherwise its degree can be incorrectly
* decremented in unlink_anon_vmas().
* We can safely do this because callers of anon_vma_clone() don't care
* about dst->anon_vma if anon_vma_clone() failed.
*/
dst->anon_vma = NULL;
unlink_anon_vmas(dst);
return -ENOMEM;
}
/*
* Attach vma to its own anon_vma, as well as to the anon_vmas that
* the corresponding VMA in the parent process is attached to.
* Returns 0 on success, non-zero on failure.
*/
int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
int error;
/* Don't bother if the parent process has no anon_vma here. */
if (!pvma->anon_vma)
return 0;
/* Drop inherited anon_vma, we'll reuse existing or allocate new. */
vma->anon_vma = NULL;
/*
* First, attach the new VMA to the parent VMA's anon_vmas,
* so rmap can find non-COWed pages in child processes.
*/
error = anon_vma_clone(vma, pvma);
if (error)
return error;
/* An existing anon_vma has been reused, all done then. */
if (vma->anon_vma)
return 0;
/* Then add our own anon_vma. */
anon_vma = anon_vma_alloc();
if (!anon_vma)
goto out_error;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_error_free_anon_vma;
/*
* The root anon_vma's spinlock is the lock actually used when we
* lock any of the anon_vmas in this anon_vma tree.
*/
anon_vma->root = pvma->anon_vma->root;
anon_vma->parent = pvma->anon_vma;
/*
* With refcounts, an anon_vma can stay around longer than the
* process it belongs to. The root anon_vma needs to be pinned until
* this anon_vma is freed, because the lock lives in the root.
*/
get_anon_vma(anon_vma->root);
/* Mark this anon_vma as the one where our new (COWed) pages go. */
vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
anon_vma->parent->degree++;
anon_vma_unlock_write(anon_vma);
return 0;
out_error_free_anon_vma:
put_anon_vma(anon_vma);
out_error:
unlink_anon_vmas(vma);
return -ENOMEM;
}
void unlink_anon_vmas(struct vm_area_struct *vma)
{
struct anon_vma_chain *avc, *next;
struct anon_vma *root = NULL;
/*
* Unlink each anon_vma chained to the VMA. This list is ordered
* from newest to oldest, ensuring the root anon_vma gets freed last.
*/
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
root = lock_anon_vma_root(root, anon_vma);
anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
/*
* Leave empty anon_vmas on the list - we'll need
* to free them outside the lock.
*/
if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
anon_vma->parent->degree--;
continue;
}
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
if (vma->anon_vma)
vma->anon_vma->degree--;
unlock_anon_vma_root(root);
/*
* Iterate the list once more, it now only contains empty and unlinked
* anon_vmas, destroy them. Could not do before due to __put_anon_vma()
* needing to write-acquire the anon_vma->root->rwsem.
*/
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
BUG_ON(anon_vma->degree);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
}
static void anon_vma_ctor(void *data)
{
struct anon_vma *anon_vma = data;
init_rwsem(&anon_vma->rwsem);
atomic_set(&anon_vma->refcount, 0);
anon_vma->rb_root = RB_ROOT;
}
void __init anon_vma_init(void)
{
anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
}
/*
* Getting a lock on a stable anon_vma from a page off the LRU is tricky!
*
* Since there is no serialization what so ever against page_remove_rmap()
* the best this function can do is return a locked anon_vma that might
* have been relevant to this page.
*
* The page might have been remapped to a different anon_vma or the anon_vma
* returned may already be freed (and even reused).
*
* In case it was remapped to a different anon_vma, the new anon_vma will be a
* child of the old anon_vma, and the anon_vma lifetime rules will therefore
* ensure that any anon_vma obtained from the page will still be valid for as
* long as we observe page_mapped() [ hence all those page_mapped() tests ].
*
* All users of this function must be very careful when walking the anon_vma
* chain and verify that the page in question is indeed mapped in it
* [ something equivalent to page_mapped_in_vma() ].
*
* Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap()
* that the anon_vma pointer from page->mapping is valid if there is a
* mapcount, we can dereference the anon_vma after observing those.
*/
struct anon_vma *page_get_anon_vma(struct page *page)
{
struct anon_vma *anon_vma = NULL;
unsigned long anon_mapping;
rcu_read_lock();
anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
if (!atomic_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
/*
* If this page is still mapped, then its anon_vma cannot have been
* freed. But if it has been unmapped, we have no security against the
* anon_vma structure being freed and reused (for another anon_vma:
* SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
* above cannot corrupt).
*/
if (!page_mapped(page)) {
rcu_read_unlock();
put_anon_vma(anon_vma);
return NULL;
}
out:
rcu_read_unlock();
return anon_vma;
}
/*
* Similar to page_get_anon_vma() except it locks the anon_vma.
*
* Its a little more complex as it tries to keep the fast path to a single
* atomic op -- the trylock. If we fail the trylock, we fall back to getting a
* reference like with page_get_anon_vma() and then block on the mutex.
*/
struct anon_vma *page_lock_anon_vma_read(struct page *page)
{
struct anon_vma *anon_vma = NULL;
struct anon_vma *root_anon_vma;
unsigned long anon_mapping;
rcu_read_lock();
anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
goto out;
if (!page_mapped(page))
goto out;
anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
root_anon_vma = ACCESS_ONCE(anon_vma->root);
if (down_read_trylock(&root_anon_vma->rwsem)) {
/*
* If the page is still mapped, then this anon_vma is still
* its anon_vma, and holding the mutex ensures that it will
* not go away, see anon_vma_free().
*/
if (!page_mapped(page)) {
up_read(&root_anon_vma->rwsem);
anon_vma = NULL;
}
goto out;
}
/* trylock failed, we got to sleep */
if (!atomic_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
if (!page_mapped(page)) {
rcu_read_unlock();
put_anon_vma(anon_vma);
return NULL;
}
/* we pinned the anon_vma, its safe to sleep */
rcu_read_unlock();
anon_vma_lock_read(anon_vma);
if (atomic_dec_and_test(&anon_vma->refcount)) {
/*
* Oops, we held the last refcount, release the lock
* and bail -- can't simply use put_anon_vma() because
* we'll deadlock on the anon_vma_lock_write() recursion.
*/
anon_vma_unlock_read(anon_vma);
__put_anon_vma(anon_vma);
anon_vma = NULL;
}
return anon_vma;
out:
rcu_read_unlock();
return anon_vma;
}
void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
{
anon_vma_unlock_read(anon_vma);
}
/*
* At what user virtual address is page expected in @vma?
*/
static inline unsigned long
__vma_address(struct page *page, struct vm_area_struct *vma)
{
pgoff_t pgoff = page_to_pgoff(page);
return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
}
inline unsigned long
vma_address(struct page *page, struct vm_area_struct *vma)
{
unsigned long address = __vma_address(page, vma);
/* page should be within @vma mapping range */
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
return address;
}
/*
* At what user virtual address is page expected in vma?
* Caller should check the page is actually part of the vma.
*/
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
{
unsigned long address;
if (PageAnon(page)) {
struct anon_vma *page__anon_vma = page_anon_vma(page);
/*
* Note: swapoff's unuse_vma() is more efficient with this
* check, and needs it to match anon_vma when KSM is active.
*/
if (!vma->anon_vma || !page__anon_vma ||
vma->anon_vma->root != page__anon_vma->root)
return -EFAULT;
} else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
if (!vma->vm_file ||
vma->vm_file->f_mapping != page->mapping)
return -EFAULT;
} else
return -EFAULT;
address = __vma_address(page, vma);
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
return -EFAULT;
return address;
}
pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd = NULL;
pmd_t pmde;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
/*
* Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at()
* without holding anon_vma lock for write. So when looking for a
* genuine pmde (in which to find pte), test present and !THP together.
*/
pmde = ACCESS_ONCE(*pmd);
if (!pmd_present(pmde) || pmd_trans_huge(pmde))
pmd = NULL;
out:
return pmd;
}
/*
* Check that @page is mapped at @address into @mm.
*
* If @sync is false, page_check_address may perform a racy check to avoid
* the page table lock when the pte is not present (helpful when reclaiming
* highly shared pages).
*
* On success returns with pte mapped and locked.
*/
pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
unsigned long address, spinlock_t **ptlp, int sync)
{
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
if (unlikely(PageHuge(page))) {
/* when pud is not present, pte will be NULL */
pte = huge_pte_offset(mm, address);
if (!pte)
return NULL;
ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
goto check;
}
pmd = mm_find_pmd(mm, address);
if (!pmd)
return NULL;
pte = pte_offset_map(pmd, address);
/* Make a quick check before getting the lock */
if (!sync && !pte_present(*pte)) {
pte_unmap(pte);
return NULL;
}
ptl = pte_lockptr(mm, pmd);
check:
spin_lock(ptl);
if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
*ptlp = ptl;
return pte;
}
pte_unmap_unlock(pte, ptl);
return NULL;
}
/**
* page_mapped_in_vma - check whether a page is really mapped in a VMA
* @page: the page to test
* @vma: the VMA to test
*
* Returns 1 if the page is mapped into the page tables of the VMA, 0
* if the page is not mapped into the page tables of this VMA. Only
* valid for normal file or anonymous VMAs.
*/
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
unsigned long address;
pte_t *pte;
spinlock_t *ptl;
address = __vma_address(page, vma);
if (unlikely(address < vma->vm_start || address >= vma->vm_end))
return 0;
pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
if (!pte) /* the page is not in this mm */
return 0;
pte_unmap_unlock(pte, ptl);
return 1;
}
struct page_referenced_arg {
int mapcount;
int referenced;
unsigned long vm_flags;
struct mem_cgroup *memcg;
};
/*
* arg: page_referenced_arg will be passed
*/
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl;
int referenced = 0;
struct page_referenced_arg *pra = arg;
if (unlikely(PageTransHuge(page))) {
pmd_t *pmd;
/*
* rmap might return false positives; we must filter
* these out using page_check_address_pmd().
*/
pmd = page_check_address_pmd(page, mm, address,
PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
if (!pmd)
return SWAP_AGAIN;
if (vma->vm_flags & VM_LOCKED) {
spin_unlock(ptl);
pra->vm_flags |= VM_LOCKED;
return SWAP_FAIL; /* To break the loop */
}
/* go ahead even if the pmd is pmd_trans_splitting() */
if (pmdp_clear_flush_young_notify(vma, address, pmd))
referenced++;
spin_unlock(ptl);
} else {
pte_t *pte;
/*
* rmap might return false positives; we must filter
* these out using page_check_address().
*/
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
return SWAP_AGAIN;
if (vma->vm_flags & VM_LOCKED) {
pte_unmap_unlock(pte, ptl);
pra->vm_flags |= VM_LOCKED;
return SWAP_FAIL; /* To break the loop */
}
if (ptep_clear_flush_young_notify(vma, address, pte)) {
/*
* Don't treat a reference through a sequentially read
* mapping as such. If the page has been used in
* another mapping, we will catch it; if this other
* mapping is already gone, the unmap path will have
* set PG_referenced or activated the page.
*/
if (likely(!(vma->vm_flags & VM_SEQ_READ)))
referenced++;
}
pte_unmap_unlock(pte, ptl);
}
if (referenced) {
pra->referenced++;
pra->vm_flags |= vma->vm_flags;
}
pra->mapcount--;
if (!pra->mapcount)
return SWAP_SUCCESS; /* To break the loop */
return SWAP_AGAIN;
}
static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
{
struct page_referenced_arg *pra = arg;
struct mem_cgroup *memcg = pra->memcg;
if (!mm_match_cgroup(vma->vm_mm, memcg))
return true;
return false;
}
/**
* page_referenced - test if the page was referenced
* @page: the page to test
* @is_locked: caller holds lock on the page
* @memcg: target memory cgroup
* @vm_flags: collect encountered vma->vm_flags who actually referenced the page
*
* Quick test_and_clear_referenced for all mappings to a page,
* returns the number of ptes which referenced the page.
*/
int page_referenced(struct page *page,
int is_locked,
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{
int ret;
int we_locked = 0;
struct page_referenced_arg pra = {
.mapcount = page_mapcount(page),
.memcg = memcg,
};
struct rmap_walk_control rwc = {
.rmap_one = page_referenced_one,
.arg = (void *)&pra,
.anon_lock = page_lock_anon_vma_read,
};
*vm_flags = 0;
if (!page_mapped(page))
return 0;
if (!page_rmapping(page))
return 0;
if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
we_locked = trylock_page(page);
if (!we_locked)
return 1;
}
/*
* If we are reclaiming on behalf of a cgroup, skip
* counting on behalf of references from different
* cgroups
*/
if (memcg) {
rwc.invalid_vma = invalid_page_referenced_vma;
}
ret = rmap_walk(page, &rwc);
*vm_flags = pra.vm_flags;
if (we_locked)
unlock_page(page);
return pra.referenced;
}
static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte;
spinlock_t *ptl;
int ret = 0;
int *cleaned = arg;
pte = page_check_address(page, mm, address, &ptl, 1);
if (!pte)
goto out;
if (pte_dirty(*pte) || pte_write(*pte)) {
pte_t entry;
flush_cache_page(vma, address, pte_pfn(*pte));
entry = ptep_clear_flush(vma, address, pte);
entry = pte_wrprotect(entry);
entry = pte_mkclean(entry);
set_pte_at(mm, address, pte, entry);
ret = 1;
}
pte_unmap_unlock(pte, ptl);
if (ret) {
mmu_notifier_invalidate_page(mm, address);
(*cleaned)++;
}
out:
return SWAP_AGAIN;
}
static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
{
if (vma->vm_flags & VM_SHARED)
return false;
return true;
}
int page_mkclean(struct page *page)
{
int cleaned = 0;
struct address_space *mapping;
struct rmap_walk_control rwc = {
.arg = (void *)&cleaned,
.rmap_one = page_mkclean_one,
.invalid_vma = invalid_mkclean_vma,
};
BUG_ON(!PageLocked(page));
if (!page_mapped(page))
return 0;
mapping = page_mapping(page);
if (!mapping)
return 0;
rmap_walk(page, &rwc);
return cleaned;
}
EXPORT_SYMBOL_GPL(page_mkclean);
/**
* page_move_anon_rmap - move a page to our anon_vma
* @page: the page to move to our anon_vma
* @vma: the vma the page belongs to
* @address: the user virtual address mapped
*
* When a page belongs exclusively to one process after a COW event,
* that page can be moved into the anon_vma that belongs to just that
* process, so the rmap code will not search the parent or sibling
* processes.
*/
void page_move_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = vma->anon_vma;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_VMA(!anon_vma, vma);
VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
}
/**
* __page_set_anon_rmap - set up new anonymous rmap
* @page: Page to add to rmap
* @vma: VM area to add page to.
* @address: User virtual address of the mapping
* @exclusive: the page is exclusively owned by the current process
*/
static void __page_set_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma);
if (PageAnon(page))
return;
/*
* If the page isn't exclusively mapped into this vma,
* we must use the _oldest_ possible anon_vma for the
* page mapping!
*/
if (!exclusive)
anon_vma = anon_vma->root;
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
page->index = linear_page_index(vma, address);
}
/**
* __page_check_anon_rmap - sanity check anonymous rmap addition
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*/
static void __page_check_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
#ifdef CONFIG_DEBUG_VM
/*
* The page's anon-rmap details (mapping and index) are guaranteed to
* be set up correctly at this point.
*
* We have exclusion against page_add_anon_rmap because the caller
* always holds the page locked, except if called from page_dup_rmap,
* in which case the page is already known to be setup.
*
* We have exclusion against page_add_new_anon_rmap because those pages
* are initially only visible via the pagetables, and the pte is locked
* over the call to page_add_new_anon_rmap.
*/
BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
BUG_ON(page->index != linear_page_index(vma, address));
#endif
}
/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
* The caller needs to hold the pte lock, and the page must be locked in
* the anon_vma case: to serialize mapping,index checking after setting,
* and to ensure that PageAnon is not being upgraded racily to PageKsm
* (but PageKsm is never downgraded to PageAnon).
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
do_page_add_anon_rmap(page, vma, address, 0);
}
/*
* Special version of the above for do_swap_page, which often runs
* into pages that are exclusively owned by the current process.
* Everybody else should continue to use page_add_anon_rmap above.
*/
void do_page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
int first = atomic_inc_and_test(&page->_mapcount);
if (first) {
/*
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and
* pte lock(a spinlock) is held, which implies preemption
* disabled.
*/
if (PageTransHuge(page))
__inc_zone_page_state(page,
NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
hpage_nr_pages(page));
}
if (unlikely(PageKsm(page)))
return;
VM_BUG_ON_PAGE(!PageLocked(page), page);
/* address might be in next vma when migration races vma_adjust */
if (first)
__page_set_anon_rmap(page, vma, address, exclusive);
else
__page_check_anon_rmap(page, vma, address);
}
/**
* page_add_new_anon_rmap - add pte mapping to a new anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
* Same as page_add_anon_rmap but must only be called on *new* pages.
* This means the inc-and-test can be bypassed.
* Page does not have to be locked.
*/
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
SetPageSwapBacked(page);
atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
if (PageTransHuge(page))
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
hpage_nr_pages(page));
__page_set_anon_rmap(page, vma, address, 1);
}
/**
* page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to
*
* The caller needs to hold the pte lock.
*/
void page_add_file_rmap(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;
bool locked;
memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
if (atomic_inc_and_test(&page->_mapcount)) {
__inc_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
}
mem_cgroup_end_page_stat(memcg, locked, flags);
}
static void page_remove_file_rmap(struct page *page)
{
struct mem_cgroup *memcg;
unsigned long flags;
bool locked;
memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
goto out;
/* Hugepages are not counted in NR_FILE_MAPPED for now. */
if (unlikely(PageHuge(page)))
goto out;
/*
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and
* pte lock(a spinlock) is held, which implies preemption disabled.
*/
__dec_zone_page_state(page, NR_FILE_MAPPED);
mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
out:
mem_cgroup_end_page_stat(memcg, locked, flags);
}
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
*
* The caller needs to hold the pte lock.
*/
void page_remove_rmap(struct page *page)
{
if (!PageAnon(page)) {
page_remove_file_rmap(page);
return;
}
/* page still mapped by someone else? */
if (!atomic_add_negative(-1, &page->_mapcount))
return;
/* Hugepages are not counted in NR_ANON_PAGES for now. */
if (unlikely(PageHuge(page)))
return;
/*
* We use the irq-unsafe __{inc|mod}_zone_page_stat because
* these counters are not modified in interrupt context, and
* pte lock(a spinlock) is held, which implies preemption disabled.
*/
if (PageTransHuge(page))
__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
-hpage_nr_pages(page));
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping
* before us: so leave the reset to free_hot_cold_page,
* and remember that it's only reliable while mapped.
* Leaving it set also helps swapoff to reinstate ptes
* faster for those pages still in swapcache.
*/
}
/*
* @arg: enum ttu_flags will be passed to this argument
*/
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
enum ttu_flags flags = (enum ttu_flags)arg;
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
/*
* If the page is mlock()d, we cannot swap it out.
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
*/
if (!(flags & TTU_IGNORE_MLOCK)) {
if (vma->vm_flags & VM_LOCKED)
goto out_mlock;
if (flags & TTU_MUNLOCK)
goto out_unmap;
}
if (!(flags & TTU_IGNORE_ACCESS)) {
if (ptep_clear_flush_young_notify(vma, address, pte)) {
ret = SWAP_FAIL;
goto out_unmap;
}
}
/* Nuke the page table entry. */
flush_cache_page(vma, address, page_to_pfn(page));
pteval = ptep_clear_flush(vma, address, pte);
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
if (!PageHuge(page)) {
if (PageAnon(page))
dec_mm_counter(mm, MM_ANONPAGES);
else
dec_mm_counter(mm, MM_FILEPAGES);
}
set_pte_at(mm, address, pte,
swp_entry_to_pte(make_hwpoison_entry(page)));
} else if (pte_unused(pteval)) {
/*
* The guest indicated that the page content is of no
* interest anymore. Simply discard the pte, vmscan
* will take care of the rest.
*/
if (PageAnon(page))
dec_mm_counter(mm, MM_ANONPAGES);
else
dec_mm_counter(mm, MM_FILEPAGES);
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
pte_t swp_pte;
if (PageSwapCache(page)) {
/*
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
if (swap_duplicate(entry) < 0) {
set_pte_at(mm, address, pte, pteval);
ret = SWAP_FAIL;
goto out_unmap;
}
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
list_add(&mm->mmlist, &init_mm.mmlist);
spin_unlock(&mmlist_lock);
}
dec_mm_counter(mm, MM_ANONPAGES);
inc_mm_counter(mm, MM_SWAPENTS);
} else if (IS_ENABLED(CONFIG_MIGRATION)) {
/*
* Store the pfn of the page in a special migration
* pte. do_swap_page() will wait until the migration
* pte is removed and then restart fault handling.
*/
BUG_ON(!(flags & TTU_MIGRATION));
entry = make_migration_entry(page, pte_write(pteval));
}
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, address, pte, swp_pte);
BUG_ON(pte_file(*pte));
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
(flags & TTU_MIGRATION)) {
/* Establish migration entry for a file page */
swp_entry_t entry;
entry = make_migration_entry(page, pte_write(pteval));
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
} else
dec_mm_counter(mm, MM_FILEPAGES);
page_remove_rmap(page);
page_cache_release(page);
out_unmap:
pte_unmap_unlock(pte, ptl);
if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
mmu_notifier_invalidate_page(mm, address);
out:
return ret;
out_mlock:
pte_unmap_unlock(pte, ptl);
/*
* We need mmap_sem locking, Otherwise VM_LOCKED check makes
* unstable result and race. Plus, We can't wait here because
* we now hold anon_vma->rwsem or mapping->i_mmap_mutex.
* if trylock failed, the page remain in evictable lru and later
* vmscan could retry to move the page to unevictable lru if the
* page is actually mlocked.
*/
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
if (vma->vm_flags & VM_LOCKED) {
mlock_vma_page(page);
ret = SWAP_MLOCK;
}
up_read(&vma->vm_mm->mmap_sem);
}
return ret;
}
/*
* objrmap doesn't work for nonlinear VMAs because the assumption that
* offset-into-file correlates with offset-into-virtual-addresses does not hold.
* Consequently, given a particular page and its ->index, we cannot locate the
* ptes which are mapping that page without an exhaustive linear search.
*
* So what this code does is a mini "virtual scan" of each nonlinear VMA which
* maps the file to which the target page belongs. The ->vm_private_data field
* holds the current cursor into that scan. Successive searches will circulate
* around the vma's virtual address space.
*
* So as more replacement pressure is applied to the pages in a nonlinear VMA,
* more scanning pressure is placed against them as well. Eventually pages
* will become fully unmapped and are eligible for eviction.
*
* For very sparsely populated VMAs this is a little inefficient - chances are
* there there won't be many ptes located within the scan cluster. In this case
* maybe we could scan further - to the end of the pte page, perhaps.
*
* Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can
* acquire it without blocking. If vma locked, mlock the pages in the cluster,
* rather than unmapping them. If we encounter the "check_page" that vmscan is
* trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
*/
#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
struct vm_area_struct *vma, struct page *check_page)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
pte_t *pte;
pte_t pteval;
spinlock_t *ptl;
struct page *page;
unsigned long address;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
unsigned long end;
int ret = SWAP_AGAIN;
int locked_vma = 0;
address = (vma->vm_start + cursor) & CLUSTER_MASK;
end = address + CLUSTER_SIZE;
if (address < vma->vm_start)
address = vma->vm_start;
if (end > vma->vm_end)
end = vma->vm_end;
pmd = mm_find_pmd(mm, address);
if (!pmd)
return ret;
mmun_start = address;
mmun_end = end;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
/*
* If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
* keep the sem while scanning the cluster for mlocking pages.
*/
if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
locked_vma = (vma->vm_flags & VM_LOCKED);
if (!locked_vma)
up_read(&vma->vm_mm->mmap_sem); /* don't need it */
}
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
for (; address < end; pte++, address += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
page = vm_normal_page(vma, address, *pte);
BUG_ON(!page || PageAnon(page));
if (locked_vma) {
if (page == check_page) {
/* we know we have check_page locked */
mlock_vma_page(page);
ret = SWAP_MLOCK;
} else if (trylock_page(page)) {
/*
* If we can lock the page, perform mlock.
* Otherwise leave the page alone, it will be
* eventually encountered again later.
*/
mlock_vma_page(page);
unlock_page(page);
}
continue; /* don't unmap */
}
/*
* No need for _notify because we're within an
* mmu_notifier_invalidate_range_ {start|end} scope.
*/
if (ptep_clear_flush_young(vma, address, pte))
continue;
/* Nuke the page table entry. */
flush_cache_page(vma, address, pte_pfn(*pte));
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
if (page->index != linear_page_index(vma, address)) {
pte_t ptfile = pgoff_to_pte(page->index);
if (pte_soft_dirty(pteval))
ptfile = pte_file_mksoft_dirty(ptfile);
set_pte_at(mm, address, pte, ptfile);
}
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
page_remove_rmap(page);
page_cache_release(page);
dec_mm_counter(mm, MM_FILEPAGES);
(*mapcount)--;
}
pte_unmap_unlock(pte - 1, ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (locked_vma)
up_read(&vma->vm_mm->mmap_sem);
return ret;
}
static int try_to_unmap_nonlinear(struct page *page,
struct address_space *mapping, void *arg)
{
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
unsigned long cursor;
unsigned long max_nl_cursor = 0;
unsigned long max_nl_size = 0;
unsigned int mapcount;
list_for_each_entry(vma,
&mapping->i_mmap_nonlinear, shared.nonlinear) {
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
max_nl_cursor = cursor;
cursor = vma->vm_end - vma->vm_start;
if (cursor > max_nl_size)
max_nl_size = cursor;
}
if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */
return SWAP_FAIL;
}
/*
* We don't try to search for this page in the nonlinear vmas,
* and page_referenced wouldn't have found it anyway. Instead
* just walk the nonlinear vmas trying to age and unmap some.
* The mapcount of the page we came in with is irrelevant,
* but even so use it as a guide to how hard we should try?
*/
mapcount = page_mapcount(page);
if (!mapcount)
return ret;
cond_resched();
max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
if (max_nl_cursor == 0)
max_nl_cursor = CLUSTER_SIZE;
do {
list_for_each_entry(vma,
&mapping->i_mmap_nonlinear, shared.nonlinear) {
cursor = (unsigned long) vma->vm_private_data;
while (cursor < max_nl_cursor &&
cursor < vma->vm_end - vma->vm_start) {
if (try_to_unmap_cluster(cursor, &mapcount,
vma, page) == SWAP_MLOCK)
ret = SWAP_MLOCK;
cursor += CLUSTER_SIZE;
vma->vm_private_data = (void *) cursor;
if ((int)mapcount <= 0)
return ret;
}
vma->vm_private_data = (void *) max_nl_cursor;
}
cond_resched();
max_nl_cursor += CLUSTER_SIZE;
} while (max_nl_cursor <= max_nl_size);
/*
* Don't loop forever (perhaps all the remaining pages are
* in locked vmas). Reset cursor on all unreserved nonlinear
* vmas, now forgetting on which ones it had fallen behind.
*/
list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
vma->vm_private_data = NULL;
return ret;
}
bool is_vma_temporary_stack(struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
if (!maybe_stack)
return false;
if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
VM_STACK_INCOMPLETE_SETUP)
return true;
return false;
}
static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
{
return is_vma_temporary_stack(vma);
}
static int page_not_mapped(struct page *page)
{
return !page_mapped(page);
};
/**
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
* @flags: action and flags
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
* Return values are:
*
* SWAP_SUCCESS - we succeeded in removing all mappings
* SWAP_AGAIN - we missed a mapping, try again later
* SWAP_FAIL - the page is unswappable
* SWAP_MLOCK - page is mlocked.
*/
int try_to_unmap(struct page *page, enum ttu_flags flags)
{
int ret;
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
.done = page_not_mapped,
.file_nonlinear = try_to_unmap_nonlinear,
.anon_lock = page_lock_anon_vma_read,
};
VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
/*
* During exec, a temporary VMA is setup and later moved.
* The VMA is moved under the anon_vma lock but not the
* page tables leading to a race where migration cannot
* find the migration ptes. Rather than increasing the
* locking requirements of exec(), migration skips
* temporary VMAs until after exec() completes.
*/
if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
rwc.invalid_vma = invalid_migration_vma;
ret = rmap_walk(page, &rwc);
if (ret != SWAP_MLOCK && !page_mapped(page))
ret = SWAP_SUCCESS;
return ret;
}
/**
* try_to_munlock - try to munlock a page
* @page: the page to be munlocked
*
* Called from munlock code. Checks all of the VMAs mapping the page
* to make sure nobody else has this page mlocked. The page will be
* returned with PG_mlocked cleared if no other vmas have it mlocked.
*
* Return values are:
*
* SWAP_AGAIN - no vma is holding page mlocked, or,
* SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem
* SWAP_FAIL - page cannot be located at present
* SWAP_MLOCK - page is now mlocked.
*/
int try_to_munlock(struct page *page)
{
int ret;
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)TTU_MUNLOCK,
.done = page_not_mapped,
/*
* We don't bother to try to find the munlocked page in
* nonlinears. It's costly. Instead, later, page reclaim logic
* may call try_to_unmap() and recover PG_mlocked lazily.
*/
.file_nonlinear = NULL,
.anon_lock = page_lock_anon_vma_read,
};
VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
ret = rmap_walk(page, &rwc);
return ret;
}
void __put_anon_vma(struct anon_vma *anon_vma)
{
struct anon_vma *root = anon_vma->root;
anon_vma_free(anon_vma);
if (root != anon_vma && atomic_dec_and_test(&root->refcount))
anon_vma_free(root);
}
static struct anon_vma *rmap_walk_anon_lock(struct page *page,
struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
if (rwc->anon_lock)
return rwc->anon_lock(page);
/*
* Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
* because that depends on page_mapped(); but not all its usages
* are holding mmap_sem. Users without mmap_sem are required to
* take a reference count to prevent the anon_vma disappearing
*/
anon_vma = page_anon_vma(page);
if (!anon_vma)
return NULL;
anon_vma_lock_read(anon_vma);
return anon_vma;
}
/*
* rmap_walk_anon - do something to anonymous page using the object-based
* rmap method
* @page: the page to be handled
* @rwc: control variable according to each walk type
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
*
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
*/
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
pgoff_t pgoff = page_to_pgoff(page);
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
anon_vma = rmap_walk_anon_lock(page, rwc);
if (!anon_vma)
return ret;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
ret = rwc->rmap_one(page, vma, address, rwc->arg);
if (ret != SWAP_AGAIN)
break;
if (rwc->done && rwc->done(page))
break;
}
anon_vma_unlock_read(anon_vma);
return ret;
}
/*
* rmap_walk_file - do something to file page using the object-based rmap method
* @page: the page to be handled
* @rwc: control variable according to each walk type
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
*/
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page_to_pgoff(page);
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
/*
* The page lock not only makes sure that page->mapping cannot
* suddenly be NULLified by truncation, it makes sure that the
* structure at mapping cannot be freed and reused yet,
* so we can safely take mapping->i_mmap_mutex.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
if (!mapping)
return ret;
mutex_lock(&mapping->i_mmap_mutex);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
ret = rwc->rmap_one(page, vma, address, rwc->arg);
if (ret != SWAP_AGAIN)
goto done;
if (rwc->done && rwc->done(page))
goto done;
}
if (!rwc->file_nonlinear)
goto done;
if (list_empty(&mapping->i_mmap_nonlinear))
goto done;
ret = rwc->file_nonlinear(page, mapping, rwc->arg);
done:
mutex_unlock(&mapping->i_mmap_mutex);
return ret;
}
int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
{
if (unlikely(PageKsm(page)))
return rmap_walk_ksm(page, rwc);
else if (PageAnon(page))
return rmap_walk_anon(page, rwc);
else
return rmap_walk_file(page, rwc);
}
#ifdef CONFIG_HUGETLB_PAGE
/*
* The following three functions are for anonymous (private mapped) hugepages.
* Unlike common anonymous pages, anonymous hugepages have no accounting code
* and no lru code, because we handle hugepages differently from common pages.
*/
static void __hugepage_set_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address, int exclusive)
{
struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma);
if (PageAnon(page))
return;
if (!exclusive)
anon_vma = anon_vma->root;
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
page->mapping = (struct address_space *) anon_vma;
page->index = linear_page_index(vma, address);
}
void hugepage_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
struct anon_vma *anon_vma = vma->anon_vma;
int first;
BUG_ON(!PageLocked(page));
BUG_ON(!anon_vma);
/* address might be in next vma when migration races vma_adjust */
first = atomic_inc_and_test(&page->_mapcount);
if (first)
__hugepage_set_anon_rmap(page, vma, address, 0);
}
void hugepage_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(&page->_mapcount, 0);
__hugepage_set_anon_rmap(page, vma, address, 1);
}
#endif /* CONFIG_HUGETLB_PAGE */
| gpl-2.0 |
mi4i-dev/android_kernel_xiaomi_ferrari | arch/metag/mm/init.c | 839 | 10166 | /*
* Copyright (C) 2005,2006,2007,2008,2009,2010 Imagination Technologies
*
*/
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/pagemap.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
#include <asm/setup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/sections.h>
#include <asm/tlb.h>
#include <asm/user_gateway.h>
#include <asm/mmzone.h>
#include <asm/fixmap.h>
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
unsigned long empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
extern char __user_gateway_start;
extern char __user_gateway_end;
void *gateway_page;
/*
* Insert the gateway page into a set of page tables, creating the
* page tables if necessary.
*/
static void insert_gateway_page(pgd_t *pgd, unsigned long address)
{
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
BUG_ON(!pgd_present(*pgd));
pud = pud_offset(pgd, address);
BUG_ON(!pud_present(*pud));
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd)) {
pte = alloc_bootmem_pages(PAGE_SIZE);
set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
}
pte = pte_offset_kernel(pmd, address);
set_pte(pte, pfn_pte(__pa(gateway_page) >> PAGE_SHIFT, PAGE_READONLY));
}
/* Alloc and map a page in a known location accessible to userspace. */
static void __init user_gateway_init(void)
{
unsigned long address = USER_GATEWAY_PAGE;
int offset = pgd_index(address);
pgd_t *pgd;
gateway_page = alloc_bootmem_pages(PAGE_SIZE);
pgd = swapper_pg_dir + offset;
insert_gateway_page(pgd, address);
#ifdef CONFIG_METAG_META12
/*
* Insert the gateway page into our current page tables even
* though we've already inserted it into our reference page
* table (swapper_pg_dir). This is because with a META1 mmu we
* copy just the user address range and not the gateway page
* entry on context switch, see switch_mmu().
*/
pgd = (pgd_t *)mmu_get_base() + offset;
insert_gateway_page(pgd, address);
#endif /* CONFIG_METAG_META12 */
BUG_ON((&__user_gateway_end - &__user_gateway_start) > PAGE_SIZE);
gateway_page += (address & ~PAGE_MASK);
memcpy(gateway_page, &__user_gateway_start,
&__user_gateway_end - &__user_gateway_start);
/*
* We don't need to flush the TLB here, there should be no mapping
* present at boot for this address and only valid mappings are in
* the TLB (apart from on Meta 1.x, but those cached invalid
* mappings should be impossible to hit here).
*
* We don't flush the code cache here even though we have written
* code through the data cache and they may not be coherent. At
* this point we assume there is no stale data in the code cache
* for this address so there is no need to flush.
*/
}
static void __init allocate_pgdat(unsigned int nid)
{
unsigned long start_pfn, end_pfn;
#ifdef CONFIG_NEED_MULTIPLE_NODES
unsigned long phys;
#endif
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
#ifdef CONFIG_NEED_MULTIPLE_NODES
phys = __memblock_alloc_base(sizeof(struct pglist_data),
SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
/* Retry with all of system memory */
if (!phys)
phys = __memblock_alloc_base(sizeof(struct pglist_data),
SMP_CACHE_BYTES,
memblock_end_of_DRAM());
if (!phys)
panic("Can't allocate pgdat for node %d\n", nid);
NODE_DATA(nid) = __va(phys);
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
#endif
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
static void __init bootmem_init_one_node(unsigned int nid)
{
unsigned long total_pages, paddr;
unsigned long end_pfn;
struct pglist_data *p;
p = NODE_DATA(nid);
/* Nothing to do.. */
if (!p->node_spanned_pages)
return;
end_pfn = p->node_start_pfn + p->node_spanned_pages;
#ifdef CONFIG_HIGHMEM
if (end_pfn > max_low_pfn)
end_pfn = max_low_pfn;
#endif
total_pages = bootmem_bootmap_pages(end_pfn - p->node_start_pfn);
paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
if (!paddr)
panic("Can't allocate bootmap for nid[%d]\n", nid);
init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
/*
* XXX Handle initial reservations for the system memory node
* only for the moment, we'll refactor this later for handling
* reservations in other nodes.
*/
if (nid == 0) {
struct memblock_region *reg;
/* Reserve the sections we're already using. */
for_each_memblock(reserved, reg) {
unsigned long size = reg->size;
#ifdef CONFIG_HIGHMEM
/* ...but not highmem */
if (PFN_DOWN(reg->base) >= highstart_pfn)
continue;
if (PFN_UP(reg->base + size) > highstart_pfn)
size = (highstart_pfn - PFN_DOWN(reg->base))
<< PAGE_SHIFT;
#endif
reserve_bootmem(reg->base, size, BOOTMEM_DEFAULT);
}
}
sparse_memory_present_with_active_regions(nid);
}
static void __init do_init_bootmem(void)
{
struct memblock_region *reg;
int i;
/* Add active regions with valid PFNs. */
for_each_memblock(memory, reg) {
unsigned long start_pfn, end_pfn;
start_pfn = memblock_region_memory_base_pfn(reg);
end_pfn = memblock_region_memory_end_pfn(reg);
memblock_set_node(PFN_PHYS(start_pfn),
PFN_PHYS(end_pfn - start_pfn), 0);
}
/* All of system RAM sits in node 0 for the non-NUMA case */
allocate_pgdat(0);
node_set_online(0);
soc_mem_setup();
for_each_online_node(i)
bootmem_init_one_node(i);
sparse_init();
}
extern char _heap_start[];
static void __init init_and_reserve_mem(void)
{
unsigned long start_pfn, heap_start;
u64 base = min_low_pfn << PAGE_SHIFT;
u64 size = (max_low_pfn << PAGE_SHIFT) - base;
heap_start = (unsigned long) &_heap_start;
memblock_add(base, size);
/*
* Partially used pages are not usable - thus
* we are rounding upwards:
*/
start_pfn = PFN_UP(__pa(heap_start));
/*
* Reserve the kernel text.
*/
memblock_reserve(base, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - base);
#ifdef CONFIG_HIGHMEM
/*
* Add & reserve highmem, so page structures are initialised.
*/
base = highstart_pfn << PAGE_SHIFT;
size = (highend_pfn << PAGE_SHIFT) - base;
if (size) {
memblock_add(base, size);
memblock_reserve(base, size);
}
#endif
}
#ifdef CONFIG_HIGHMEM
/*
* Ensure we have allocated page tables in swapper_pg_dir for the
* fixed mappings range from 'start' to 'end'.
*/
static void __init allocate_pgtables(unsigned long start, unsigned long end)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
int i, j;
unsigned long vaddr;
vaddr = start;
i = pgd_index(vaddr);
j = pmd_index(vaddr);
pgd = swapper_pg_dir + i;
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pmd = (pmd_t *)pgd;
for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
vaddr += PMD_SIZE;
if (!pmd_none(*pmd))
continue;
pte = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, pte);
}
j = 0;
}
}
static void __init fixedrange_init(void)
{
unsigned long vaddr, end;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
/*
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
allocate_pgtables(vaddr, end);
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
allocate_pgtables(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP);
pgd = swapper_pg_dir + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
}
#endif /* CONFIG_HIGHMEM */
/*
* paging_init() continues the virtual memory environment setup which
* was begun by the code in arch/metag/kernel/setup.c.
*/
void __init paging_init(unsigned long mem_end)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
int nid;
init_and_reserve_mem();
memblock_allow_resize();
memblock_dump_all();
nodes_clear(node_online_map);
init_new_context(&init_task, &init_mm);
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
do_init_bootmem();
mmu_init(mem_end);
#ifdef CONFIG_HIGHMEM
fixedrange_init();
kmap_init();
#endif
/* Initialize the zero page to a bootmem page, already zeroed. */
empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
user_gateway_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long low, start_pfn;
start_pfn = pgdat->bdata->node_min_pfn;
low = pgdat->bdata->node_low_pfn;
if (max_zone_pfns[ZONE_NORMAL] < low)
max_zone_pfns[ZONE_NORMAL] = low;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
#endif
pr_info("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
nid, start_pfn, low);
}
free_area_init_nodes(max_zone_pfns);
}
void __init mem_init(void)
{
int nid;
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
/*
* Explicitly reset zone->managed_pages because highmem pages are
* freed before calling free_all_bootmem_node();
*/
reset_all_zones_managed_pages();
for (tmp = highstart_pfn; tmp < highend_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
num_physpages += totalhigh_pages;
#endif /* CONFIG_HIGHMEM */
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
num_physpages += pgdat->node_present_pages;
if (pgdat->node_spanned_pages)
free_all_bootmem_node(pgdat);
}
pr_info("Memory: %luk/%luk available\n",
(unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
num_physpages << (PAGE_SHIFT - 10));
show_mem(0);
return;
}
void free_initmem(void)
{
free_initmem_default(POISON_FREE_INITMEM);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
free_reserved_area(start, end, POISON_FREE_INITMEM, "initrd");
}
#endif
#ifdef CONFIG_OF_FLATTREE
void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
pr_err("%s(%llx, %llx)\n",
__func__, start, end);
}
#endif /* CONFIG_OF_FLATTREE */
| gpl-2.0 |
W4TCH0UT/zz_lettuce | drivers/clk/clk-u300.c | 2375 | 21878 | /*
* U300 clock implementation
* Copyright (C) 2007-2012 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* Author: Linus Walleij <linus.walleij@stericsson.com>
* Author: Jonas Aaberg <jonas.aberg@stericsson.com>
*/
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/spinlock.h>
#include <mach/syscon.h>
/*
* The clocking hierarchy currently looks like this.
* NOTE: the idea is NOT to show how the clocks are routed on the chip!
* The ideas is to show dependencies, so a clock higher up in the
* hierarchy has to be on in order for another clock to be on. Now,
* both CPU and DMA can actually be on top of the hierarchy, and that
* is not modeled currently. Instead we have the backbone AMBA bus on
* top. This bus cannot be programmed in any way but conceptually it
* needs to be active for the bridges and devices to transport data.
*
* Please be aware that a few clocks are hw controlled, which mean that
* the hw itself can turn on/off or change the rate of the clock when
* needed!
*
* AMBA bus
* |
* +- CPU
* +- FSMC NANDIF NAND Flash interface
* +- SEMI Shared Memory interface
* +- ISP Image Signal Processor (U335 only)
* +- CDS (U335 only)
* +- DMA Direct Memory Access Controller
* +- AAIF APP/ACC Inteface (Mobile Scalable Link, MSL)
* +- APEX
* +- VIDEO_ENC AVE2/3 Video Encoder
* +- XGAM Graphics Accelerator Controller
* +- AHB
* |
* +- ahb:0 AHB Bridge
* | |
* | +- ahb:1 INTCON Interrupt controller
* | +- ahb:3 MSPRO Memory Stick Pro controller
* | +- ahb:4 EMIF External Memory interface
* |
* +- fast:0 FAST bridge
* | |
* | +- fast:1 MMCSD MMC/SD card reader controller
* | +- fast:2 I2S0 PCM I2S channel 0 controller
* | +- fast:3 I2S1 PCM I2S channel 1 controller
* | +- fast:4 I2C0 I2C channel 0 controller
* | +- fast:5 I2C1 I2C channel 1 controller
* | +- fast:6 SPI SPI controller
* | +- fast:7 UART1 Secondary UART (U335 only)
* |
* +- slow:0 SLOW bridge
* |
* +- slow:1 SYSCON (not possible to control)
* +- slow:2 WDOG Watchdog
* +- slow:3 UART0 primary UART
* +- slow:4 TIMER_APP Application timer - used in Linux
* +- slow:5 KEYPAD controller
* +- slow:6 GPIO controller
* +- slow:7 RTC controller
* +- slow:8 BT Bus Tracer (not used currently)
* +- slow:9 EH Event Handler (not used currently)
* +- slow:a TIMER_ACC Access style timer (not used currently)
* +- slow:b PPM (U335 only, what is that?)
*/
/* Global syscon virtual base */
static void __iomem *syscon_vbase;
/**
* struct clk_syscon - U300 syscon clock
* @hw: corresponding clock hardware entry
* @hw_ctrld: whether this clock is hardware controlled (for refcount etc)
* and does not need any magic pokes to be enabled/disabled
* @reset: state holder, whether this block's reset line is asserted or not
* @res_reg: reset line enable/disable flag register
* @res_bit: bit for resetting or taking this consumer out of reset
* @en_reg: clock line enable/disable flag register
* @en_bit: bit for enabling/disabling this consumer clock line
* @clk_val: magic value to poke in the register to enable/disable
* this one clock
*/
struct clk_syscon {
struct clk_hw hw;
bool hw_ctrld;
bool reset;
void __iomem *res_reg;
u8 res_bit;
void __iomem *en_reg;
u8 en_bit;
u16 clk_val;
};
#define to_syscon(_hw) container_of(_hw, struct clk_syscon, hw)
static DEFINE_SPINLOCK(syscon_resetreg_lock);
/*
* Reset control functions. We remember if a block has been
* taken out of reset and don't remove the reset assertion again
* and vice versa. Currently we only remove resets so the
* enablement function is defined out.
*/
static void syscon_block_reset_enable(struct clk_syscon *sclk)
{
unsigned long iflags;
u16 val;
/* Not all blocks support resetting */
if (!sclk->res_reg)
return;
spin_lock_irqsave(&syscon_resetreg_lock, iflags);
val = readw(sclk->res_reg);
val |= BIT(sclk->res_bit);
writew(val, sclk->res_reg);
spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
sclk->reset = true;
}
static void syscon_block_reset_disable(struct clk_syscon *sclk)
{
unsigned long iflags;
u16 val;
/* Not all blocks support resetting */
if (!sclk->res_reg)
return;
spin_lock_irqsave(&syscon_resetreg_lock, iflags);
val = readw(sclk->res_reg);
val &= ~BIT(sclk->res_bit);
writew(val, sclk->res_reg);
spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
sclk->reset = false;
}
static int syscon_clk_prepare(struct clk_hw *hw)
{
struct clk_syscon *sclk = to_syscon(hw);
/* If the block is in reset, bring it out */
if (sclk->reset)
syscon_block_reset_disable(sclk);
return 0;
}
static void syscon_clk_unprepare(struct clk_hw *hw)
{
struct clk_syscon *sclk = to_syscon(hw);
/* Please don't force the console into reset */
if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
return;
/* When unpreparing, force block into reset */
if (!sclk->reset)
syscon_block_reset_enable(sclk);
}
static int syscon_clk_enable(struct clk_hw *hw)
{
struct clk_syscon *sclk = to_syscon(hw);
/* Don't touch the hardware controlled clocks */
if (sclk->hw_ctrld)
return 0;
/* These cannot be controlled */
if (sclk->clk_val == 0xFFFFU)
return 0;
writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCER);
return 0;
}
static void syscon_clk_disable(struct clk_hw *hw)
{
struct clk_syscon *sclk = to_syscon(hw);
/* Don't touch the hardware controlled clocks */
if (sclk->hw_ctrld)
return;
if (sclk->clk_val == 0xFFFFU)
return;
/* Please don't disable the console port */
if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
return;
writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCDR);
}
static int syscon_clk_is_enabled(struct clk_hw *hw)
{
struct clk_syscon *sclk = to_syscon(hw);
u16 val;
/* If no enable register defined, it's always-on */
if (!sclk->en_reg)
return 1;
val = readw(sclk->en_reg);
val &= BIT(sclk->en_bit);
return val ? 1 : 0;
}
static u16 syscon_get_perf(void)
{
u16 val;
val = readw(syscon_vbase + U300_SYSCON_CCR);
val &= U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
return val;
}
static unsigned long
syscon_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_syscon *sclk = to_syscon(hw);
u16 perf = syscon_get_perf();
switch(sclk->clk_val) {
case U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN:
case U300_SYSCON_SBCER_I2C0_CLK_EN:
case U300_SYSCON_SBCER_I2C1_CLK_EN:
case U300_SYSCON_SBCER_MMC_CLK_EN:
case U300_SYSCON_SBCER_SPI_CLK_EN:
/* The FAST clocks have one progression */
switch(perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
default:
return parent_rate; /* 26 MHz */
}
case U300_SYSCON_SBCER_DMAC_CLK_EN:
case U300_SYSCON_SBCER_NANDIF_CLK_EN:
case U300_SYSCON_SBCER_XGAM_CLK_EN:
/* AMBA interconnect peripherals */
switch(perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 6500000;
case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
return 26000000;
default:
return parent_rate; /* 52 MHz */
}
case U300_SYSCON_SBCER_SEMI_CLK_EN:
case U300_SYSCON_SBCER_EMIF_CLK_EN:
/* EMIF speeds */
switch(perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
return 52000000;
default:
return 104000000;
}
case U300_SYSCON_SBCER_CPU_CLK_EN:
/* And the fast CPU clock */
switch(perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
return 13000000;
case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
return 52000000;
case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
return 104000000;
default:
return parent_rate; /* 208 MHz */
}
default:
/*
* The SLOW clocks and default just inherit the rate of
* their parent (typically PLL13 13 MHz).
*/
return parent_rate;
}
}
static long
syscon_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_syscon *sclk = to_syscon(hw);
if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
return *prate;
/* We really only support setting the rate of the CPU clock */
if (rate <= 13000000)
return 13000000;
if (rate <= 52000000)
return 52000000;
if (rate <= 104000000)
return 104000000;
return 208000000;
}
static int syscon_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_syscon *sclk = to_syscon(hw);
u16 val;
/* We only support setting the rate of the CPU clock */
if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
return -EINVAL;
switch (rate) {
case 13000000:
val = U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER;
break;
case 52000000:
val = U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE;
break;
case 104000000:
val = U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH;
break;
case 208000000:
val = U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST;
break;
default:
return -EINVAL;
}
val |= readw(syscon_vbase + U300_SYSCON_CCR) &
~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK ;
writew(val, syscon_vbase + U300_SYSCON_CCR);
return 0;
}
static const struct clk_ops syscon_clk_ops = {
.prepare = syscon_clk_prepare,
.unprepare = syscon_clk_unprepare,
.enable = syscon_clk_enable,
.disable = syscon_clk_disable,
.is_enabled = syscon_clk_is_enabled,
.recalc_rate = syscon_clk_recalc_rate,
.round_rate = syscon_clk_round_rate,
.set_rate = syscon_clk_set_rate,
};
static struct clk * __init
syscon_clk_register(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
bool hw_ctrld,
void __iomem *res_reg, u8 res_bit,
void __iomem *en_reg, u8 en_bit,
u16 clk_val)
{
struct clk *clk;
struct clk_syscon *sclk;
struct clk_init_data init;
sclk = kzalloc(sizeof(struct clk_syscon), GFP_KERNEL);
if (!sclk) {
pr_err("could not allocate syscon clock %s\n",
name);
return ERR_PTR(-ENOMEM);
}
init.name = name;
init.ops = &syscon_clk_ops;
init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
sclk->hw.init = &init;
sclk->hw_ctrld = hw_ctrld;
/* Assume the block is in reset at registration */
sclk->reset = true;
sclk->res_reg = res_reg;
sclk->res_bit = res_bit;
sclk->en_reg = en_reg;
sclk->en_bit = en_bit;
sclk->clk_val = clk_val;
clk = clk_register(dev, &sclk->hw);
if (IS_ERR(clk))
kfree(sclk);
return clk;
}
/**
* struct clk_mclk - U300 MCLK clock (MMC/SD clock)
* @hw: corresponding clock hardware entry
* @is_mspro: if this is the memory stick clock rather than MMC/SD
*/
struct clk_mclk {
struct clk_hw hw;
bool is_mspro;
};
#define to_mclk(_hw) container_of(_hw, struct clk_mclk, hw)
static int mclk_clk_prepare(struct clk_hw *hw)
{
struct clk_mclk *mclk = to_mclk(hw);
u16 val;
/* The MMC and MSPRO clocks need some special set-up */
if (!mclk->is_mspro) {
/* Set default MMC clock divisor to 18.9 MHz */
writew(0x0054U, syscon_vbase + U300_SYSCON_MMF0R);
val = readw(syscon_vbase + U300_SYSCON_MMCR);
/* Disable the MMC feedback clock */
val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
/* Disable MSPRO frequency */
val &= ~U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
writew(val, syscon_vbase + U300_SYSCON_MMCR);
} else {
val = readw(syscon_vbase + U300_SYSCON_MMCR);
/* Disable the MMC feedback clock */
val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
/* Enable MSPRO frequency */
val |= U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
writew(val, syscon_vbase + U300_SYSCON_MMCR);
}
return 0;
}
static unsigned long
mclk_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
u16 perf = syscon_get_perf();
switch (perf) {
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
/*
* Here, the 208 MHz PLL gets shut down and the always
* on 13 MHz PLL used for RTC etc kicks into use
* instead.
*/
return 13000000;
case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
{
/*
* This clock is under program control. The register is
* divided in two nybbles, bit 7-4 gives cycles-1 to count
* high, bit 3-0 gives cycles-1 to count low. Distribute
* these with no more than 1 cycle difference between
* low and high and add low and high to get the actual
* divisor. The base PLL is 208 MHz. Writing 0x00 will
* divide by 1 and 1 so the highest frequency possible
* is 104 MHz.
*
* e.g. 0x54 =>
* f = 208 / ((5+1) + (4+1)) = 208 / 11 = 18.9 MHz
*/
u16 val = readw(syscon_vbase + U300_SYSCON_MMF0R) &
U300_SYSCON_MMF0R_MASK;
switch (val) {
case 0x0054:
return 18900000;
case 0x0044:
return 20800000;
case 0x0043:
return 23100000;
case 0x0033:
return 26000000;
case 0x0032:
return 29700000;
case 0x0022:
return 34700000;
case 0x0021:
return 41600000;
case 0x0011:
return 52000000;
case 0x0000:
return 104000000;
default:
break;
}
}
default:
break;
}
return parent_rate;
}
static long
mclk_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
if (rate <= 18900000)
return 18900000;
if (rate <= 20800000)
return 20800000;
if (rate <= 23100000)
return 23100000;
if (rate <= 26000000)
return 26000000;
if (rate <= 29700000)
return 29700000;
if (rate <= 34700000)
return 34700000;
if (rate <= 41600000)
return 41600000;
/* Highest rate */
return 52000000;
}
static int mclk_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
u16 val;
u16 reg;
switch (rate) {
case 18900000:
val = 0x0054;
break;
case 20800000:
val = 0x0044;
break;
case 23100000:
val = 0x0043;
break;
case 26000000:
val = 0x0033;
break;
case 29700000:
val = 0x0032;
break;
case 34700000:
val = 0x0022;
break;
case 41600000:
val = 0x0021;
break;
case 52000000:
val = 0x0011;
break;
case 104000000:
val = 0x0000;
break;
default:
return -EINVAL;
}
reg = readw(syscon_vbase + U300_SYSCON_MMF0R) &
~U300_SYSCON_MMF0R_MASK;
writew(reg | val, syscon_vbase + U300_SYSCON_MMF0R);
return 0;
}
static const struct clk_ops mclk_ops = {
.prepare = mclk_clk_prepare,
.recalc_rate = mclk_clk_recalc_rate,
.round_rate = mclk_clk_round_rate,
.set_rate = mclk_clk_set_rate,
};
static struct clk * __init
mclk_clk_register(struct device *dev, const char *name,
const char *parent_name, bool is_mspro)
{
struct clk *clk;
struct clk_mclk *mclk;
struct clk_init_data init;
mclk = kzalloc(sizeof(struct clk_mclk), GFP_KERNEL);
if (!mclk) {
pr_err("could not allocate MMC/SD clock %s\n",
name);
return ERR_PTR(-ENOMEM);
}
init.name = "mclk";
init.ops = &mclk_ops;
init.flags = 0;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
mclk->hw.init = &init;
mclk->is_mspro = is_mspro;
clk = clk_register(dev, &mclk->hw);
if (IS_ERR(clk))
kfree(mclk);
return clk;
}
void __init u300_clk_init(void __iomem *base)
{
u16 val;
struct clk *clk;
syscon_vbase = base;
/* Set system to run at PLL208, max performance, a known state. */
val = readw(syscon_vbase + U300_SYSCON_CCR);
val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
writew(val, syscon_vbase + U300_SYSCON_CCR);
/* Wait for the PLL208 to lock if not locked in yet */
while (!(readw(syscon_vbase + U300_SYSCON_CSR) &
U300_SYSCON_CSR_PLL208_LOCK_IND));
/* Power management enable */
val = readw(syscon_vbase + U300_SYSCON_PMCR);
val |= U300_SYSCON_PMCR_PWR_MGNT_ENABLE;
writew(val, syscon_vbase + U300_SYSCON_PMCR);
/* These are always available (RTC and PLL13) */
clk = clk_register_fixed_rate(NULL, "app_32_clk", NULL,
CLK_IS_ROOT, 32768);
/* The watchdog sits directly on the 32 kHz clock */
clk_register_clkdev(clk, NULL, "coh901327_wdog");
clk = clk_register_fixed_rate(NULL, "pll13", NULL,
CLK_IS_ROOT, 13000000);
/* These derive from PLL208 */
clk = clk_register_fixed_rate(NULL, "pll208", NULL,
CLK_IS_ROOT, 208000000);
clk = clk_register_fixed_factor(NULL, "app_208_clk", "pll208",
0, 1, 1);
clk = clk_register_fixed_factor(NULL, "app_104_clk", "pll208",
0, 1, 2);
clk = clk_register_fixed_factor(NULL, "app_52_clk", "pll208",
0, 1, 4);
/* The 52 MHz is divided down to 26 MHz */
clk = clk_register_fixed_factor(NULL, "app_26_clk", "app_52_clk",
0, 1, 2);
/* Directly on the AMBA interconnect */
clk = syscon_clk_register(NULL, "cpu_clk", "app_208_clk", 0, true,
syscon_vbase + U300_SYSCON_RRR, 3,
syscon_vbase + U300_SYSCON_CERR, 3,
U300_SYSCON_SBCER_CPU_CLK_EN);
clk = syscon_clk_register(NULL, "dmac_clk", "app_52_clk", 0, true,
syscon_vbase + U300_SYSCON_RRR, 4,
syscon_vbase + U300_SYSCON_CERR, 4,
U300_SYSCON_SBCER_DMAC_CLK_EN);
clk_register_clkdev(clk, NULL, "dma");
clk = syscon_clk_register(NULL, "fsmc_clk", "app_52_clk", 0, false,
syscon_vbase + U300_SYSCON_RRR, 6,
syscon_vbase + U300_SYSCON_CERR, 6,
U300_SYSCON_SBCER_NANDIF_CLK_EN);
clk_register_clkdev(clk, NULL, "fsmc-nand");
clk = syscon_clk_register(NULL, "xgam_clk", "app_52_clk", 0, true,
syscon_vbase + U300_SYSCON_RRR, 8,
syscon_vbase + U300_SYSCON_CERR, 8,
U300_SYSCON_SBCER_XGAM_CLK_EN);
clk_register_clkdev(clk, NULL, "xgam");
clk = syscon_clk_register(NULL, "semi_clk", "app_104_clk", 0, false,
syscon_vbase + U300_SYSCON_RRR, 9,
syscon_vbase + U300_SYSCON_CERR, 9,
U300_SYSCON_SBCER_SEMI_CLK_EN);
clk_register_clkdev(clk, NULL, "semi");
/* AHB bridge clocks */
clk = syscon_clk_register(NULL, "ahb_subsys_clk", "app_52_clk", 0, true,
syscon_vbase + U300_SYSCON_RRR, 10,
syscon_vbase + U300_SYSCON_CERR, 10,
U300_SYSCON_SBCER_AHB_SUBSYS_BRIDGE_CLK_EN);
clk = syscon_clk_register(NULL, "intcon_clk", "ahb_subsys_clk", 0, false,
syscon_vbase + U300_SYSCON_RRR, 12,
syscon_vbase + U300_SYSCON_CERR, 12,
/* Cannot be enabled, just taken out of reset */
0xFFFFU);
clk_register_clkdev(clk, NULL, "intcon");
clk = syscon_clk_register(NULL, "emif_clk", "ahb_subsys_clk", 0, false,
syscon_vbase + U300_SYSCON_RRR, 5,
syscon_vbase + U300_SYSCON_CERR, 5,
U300_SYSCON_SBCER_EMIF_CLK_EN);
clk_register_clkdev(clk, NULL, "pl172");
/* FAST bridge clocks */
clk = syscon_clk_register(NULL, "fast_clk", "app_26_clk", 0, true,
syscon_vbase + U300_SYSCON_RFR, 0,
syscon_vbase + U300_SYSCON_CEFR, 0,
U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN);
clk = syscon_clk_register(NULL, "i2c0_p_clk", "fast_clk", 0, false,
syscon_vbase + U300_SYSCON_RFR, 1,
syscon_vbase + U300_SYSCON_CEFR, 1,
U300_SYSCON_SBCER_I2C0_CLK_EN);
clk_register_clkdev(clk, NULL, "stu300.0");
clk = syscon_clk_register(NULL, "i2c1_p_clk", "fast_clk", 0, false,
syscon_vbase + U300_SYSCON_RFR, 2,
syscon_vbase + U300_SYSCON_CEFR, 2,
U300_SYSCON_SBCER_I2C1_CLK_EN);
clk_register_clkdev(clk, NULL, "stu300.1");
clk = syscon_clk_register(NULL, "mmc_p_clk", "fast_clk", 0, false,
syscon_vbase + U300_SYSCON_RFR, 5,
syscon_vbase + U300_SYSCON_CEFR, 5,
U300_SYSCON_SBCER_MMC_CLK_EN);
clk_register_clkdev(clk, "apb_pclk", "mmci");
clk = syscon_clk_register(NULL, "spi_p_clk", "fast_clk", 0, false,
syscon_vbase + U300_SYSCON_RFR, 6,
syscon_vbase + U300_SYSCON_CEFR, 6,
U300_SYSCON_SBCER_SPI_CLK_EN);
/* The SPI has no external clock for the outward bus, uses the pclk */
clk_register_clkdev(clk, NULL, "pl022");
clk_register_clkdev(clk, "apb_pclk", "pl022");
/* SLOW bridge clocks */
clk = syscon_clk_register(NULL, "slow_clk", "pll13", 0, true,
syscon_vbase + U300_SYSCON_RSR, 0,
syscon_vbase + U300_SYSCON_CESR, 0,
U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN);
clk = syscon_clk_register(NULL, "uart0_clk", "slow_clk", 0, false,
syscon_vbase + U300_SYSCON_RSR, 1,
syscon_vbase + U300_SYSCON_CESR, 1,
U300_SYSCON_SBCER_UART_CLK_EN);
/* Same clock is used for APB and outward bus */
clk_register_clkdev(clk, NULL, "uart0");
clk_register_clkdev(clk, "apb_pclk", "uart0");
clk = syscon_clk_register(NULL, "gpio_clk", "slow_clk", 0, false,
syscon_vbase + U300_SYSCON_RSR, 4,
syscon_vbase + U300_SYSCON_CESR, 4,
U300_SYSCON_SBCER_GPIO_CLK_EN);
clk_register_clkdev(clk, NULL, "u300-gpio");
clk = syscon_clk_register(NULL, "keypad_clk", "slow_clk", 0, false,
syscon_vbase + U300_SYSCON_RSR, 5,
syscon_vbase + U300_SYSCON_CESR, 6,
U300_SYSCON_SBCER_KEYPAD_CLK_EN);
clk_register_clkdev(clk, NULL, "coh901461-keypad");
clk = syscon_clk_register(NULL, "rtc_clk", "slow_clk", 0, true,
syscon_vbase + U300_SYSCON_RSR, 6,
/* No clock enable register bit */
NULL, 0, 0xFFFFU);
clk_register_clkdev(clk, NULL, "rtc-coh901331");
clk = syscon_clk_register(NULL, "app_tmr_clk", "slow_clk", 0, false,
syscon_vbase + U300_SYSCON_RSR, 7,
syscon_vbase + U300_SYSCON_CESR, 7,
U300_SYSCON_SBCER_APP_TMR_CLK_EN);
clk_register_clkdev(clk, NULL, "apptimer");
clk = syscon_clk_register(NULL, "acc_tmr_clk", "slow_clk", 0, false,
syscon_vbase + U300_SYSCON_RSR, 8,
syscon_vbase + U300_SYSCON_CESR, 8,
U300_SYSCON_SBCER_ACC_TMR_CLK_EN);
clk_register_clkdev(clk, NULL, "timer");
/* Then this special MMC/SD clock */
clk = mclk_clk_register(NULL, "mmc_clk", "mmc_p_clk", false);
clk_register_clkdev(clk, NULL, "mmci");
}
| gpl-2.0 |
mauelsha/linux | drivers/hwmon/lm73.c | 2375 | 7805 | /*
* LM73 Sensor driver
* Based on LM75
*
* Copyright (C) 2007, CenoSYS (www.cenosys.com).
* Copyright (C) 2009, Bollore telecom (www.bolloretelecom.eu).
*
* Guillaume Ligneul <guillaume.ligneul@gmail.com>
* Adrien Demarez <adrien.demarez@bolloretelecom.eu>
* Jeremy Laine <jeremy.laine@bolloretelecom.eu>
* Chris Verges <kg4ysn@gmail.com>
*
* This software program is licensed subject to the GNU General Public License
* (GPL).Version 2,June 1991, available at
* http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
/* Addresses scanned */
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c,
0x4d, 0x4e, I2C_CLIENT_END };
/* LM73 registers */
#define LM73_REG_INPUT 0x00
#define LM73_REG_CONF 0x01
#define LM73_REG_MAX 0x02
#define LM73_REG_MIN 0x03
#define LM73_REG_CTRL 0x04
#define LM73_REG_ID 0x07
#define LM73_ID 0x9001 /* 0x0190, byte-swapped */
#define DRVNAME "lm73"
#define LM73_TEMP_MIN (-256000 / 250)
#define LM73_TEMP_MAX (255750 / 250)
#define LM73_CTRL_RES_SHIFT 5
#define LM73_CTRL_RES_MASK (BIT(5) | BIT(6))
#define LM73_CTRL_TO_MASK BIT(7)
#define LM73_CTRL_HI_SHIFT 2
#define LM73_CTRL_LO_SHIFT 1
static const unsigned short lm73_convrates[] = {
14, /* 11-bits (0.25000 C/LSB): RES1 Bit = 0, RES0 Bit = 0 */
28, /* 12-bits (0.12500 C/LSB): RES1 Bit = 0, RES0 Bit = 1 */
56, /* 13-bits (0.06250 C/LSB): RES1 Bit = 1, RES0 Bit = 0 */
112, /* 14-bits (0.03125 C/LSB): RES1 Bit = 1, RES0 Bit = 1 */
};
struct lm73_data {
struct i2c_client *client;
struct mutex lock;
u8 ctrl; /* control register value */
};
/*-----------------------------------------------------------------------*/
static ssize_t set_temp(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm73_data *data = dev_get_drvdata(dev);
long temp;
short value;
s32 err;
int status = kstrtol(buf, 10, &temp);
if (status < 0)
return status;
/* Write value */
value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
err = i2c_smbus_write_word_swapped(data->client, attr->index, value);
return (err < 0) ? err : count;
}
static ssize_t show_temp(struct device *dev, struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm73_data *data = dev_get_drvdata(dev);
int temp;
s32 err = i2c_smbus_read_word_swapped(data->client, attr->index);
if (err < 0)
return err;
/* use integer division instead of equivalent right shift to
guarantee arithmetic shift and preserve the sign */
temp = (((s16) err) * 250) / 32;
return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
}
static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct lm73_data *data = dev_get_drvdata(dev);
unsigned long convrate;
s32 err;
int res = 0;
err = kstrtoul(buf, 10, &convrate);
if (err < 0)
return err;
/*
* Convert the desired conversion rate into register bits.
* res is already initialized, and everything past the second-to-last
* value in the array is treated as belonging to the last value
* in the array.
*/
while (res < (ARRAY_SIZE(lm73_convrates) - 1) &&
convrate > lm73_convrates[res])
res++;
mutex_lock(&data->lock);
data->ctrl &= LM73_CTRL_TO_MASK;
data->ctrl |= res << LM73_CTRL_RES_SHIFT;
err = i2c_smbus_write_byte_data(data->client, LM73_REG_CTRL,
data->ctrl);
mutex_unlock(&data->lock);
if (err < 0)
return err;
return count;
}
static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
char *buf)
{
struct lm73_data *data = dev_get_drvdata(dev);
int res;
res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
return scnprintf(buf, PAGE_SIZE, "%hu\n", lm73_convrates[res]);
}
static ssize_t show_maxmin_alarm(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm73_data *data = dev_get_drvdata(dev);
s32 ctrl;
mutex_lock(&data->lock);
ctrl = i2c_smbus_read_byte_data(data->client, LM73_REG_CTRL);
if (ctrl < 0)
goto abort;
data->ctrl = ctrl;
mutex_unlock(&data->lock);
return scnprintf(buf, PAGE_SIZE, "%d\n", (ctrl >> attr->index) & 1);
abort:
mutex_unlock(&data->lock);
return ctrl;
}
/*-----------------------------------------------------------------------*/
/* sysfs attributes for hwmon */
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
show_temp, set_temp, LM73_REG_MAX);
static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO,
show_temp, set_temp, LM73_REG_MIN);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
show_temp, NULL, LM73_REG_INPUT);
static SENSOR_DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO,
show_convrate, set_convrate, 0);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
show_maxmin_alarm, NULL, LM73_CTRL_HI_SHIFT);
static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
static struct attribute *lm73_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_update_interval.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(lm73);
/*-----------------------------------------------------------------------*/
/* device probe and removal */
static int
lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm73_data *data;
int ctrl;
data = devm_kzalloc(dev, sizeof(struct lm73_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
mutex_init(&data->lock);
ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
if (ctrl < 0)
return ctrl;
data->ctrl = ctrl;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm73_groups);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
dev_info(dev, "sensor '%s'\n", client->name);
return 0;
}
static const struct i2c_device_id lm73_ids[] = {
{ "lm73", 0 },
{ /* LIST END */ }
};
MODULE_DEVICE_TABLE(i2c, lm73_ids);
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm73_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
int id, ctrl, conf;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
/*
* Do as much detection as possible with byte reads first, as word
* reads can confuse other devices.
*/
ctrl = i2c_smbus_read_byte_data(new_client, LM73_REG_CTRL);
if (ctrl < 0 || (ctrl & 0x10))
return -ENODEV;
conf = i2c_smbus_read_byte_data(new_client, LM73_REG_CONF);
if (conf < 0 || (conf & 0x0c))
return -ENODEV;
id = i2c_smbus_read_byte_data(new_client, LM73_REG_ID);
if (id < 0 || id != (LM73_ID & 0xff))
return -ENODEV;
/* Check device ID */
id = i2c_smbus_read_word_data(new_client, LM73_REG_ID);
if (id < 0 || id != LM73_ID)
return -ENODEV;
strlcpy(info->type, "lm73", I2C_NAME_SIZE);
return 0;
}
static struct i2c_driver lm73_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm73",
},
.probe = lm73_probe,
.id_table = lm73_ids,
.detect = lm73_detect,
.address_list = normal_i2c,
};
module_i2c_driver(lm73_driver);
MODULE_AUTHOR("Guillaume Ligneul <guillaume.ligneul@gmail.com>");
MODULE_DESCRIPTION("LM73 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
kgp700/exyroid-sgs2-nics | drivers/usb/host/ehci-pmcmsp.c | 2375 | 9237 | /*
* PMC MSP EHCI (Host Controller Driver) for USB.
*
* (C) Copyright 2006-2010 PMC-Sierra Inc
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
*/
/* includes */
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/usb.h>
#include <msp_usb.h>
/* stream disable*/
#define USB_CTRL_MODE_STREAM_DISABLE 0x10
/* threshold */
#define USB_CTRL_FIFO_THRESH 0x00300000
/* register offset for usb_mode */
#define USB_EHCI_REG_USB_MODE 0x68
/* register offset for usb fifo */
#define USB_EHCI_REG_USB_FIFO 0x24
/* register offset for usb status */
#define USB_EHCI_REG_USB_STATUS 0x44
/* serial/parallel transceiver */
#define USB_EHCI_REG_BIT_STAT_STS (1<<29)
/* TWI USB0 host device pin */
#define MSP_PIN_USB0_HOST_DEV 49
/* TWI USB1 host device pin */
#define MSP_PIN_USB1_HOST_DEV 50
static void usb_hcd_tdi_set_mode(struct ehci_hcd *ehci)
{
u8 *base;
u8 *statreg;
u8 *fiforeg;
u32 val;
struct ehci_regs *reg_base = ehci->regs;
/* get register base */
base = (u8 *)reg_base + USB_EHCI_REG_USB_MODE;
statreg = (u8 *)reg_base + USB_EHCI_REG_USB_STATUS;
fiforeg = (u8 *)reg_base + USB_EHCI_REG_USB_FIFO;
/* Disable controller mode stream */
val = ehci_readl(ehci, (u32 *)base);
ehci_writel(ehci, (val | USB_CTRL_MODE_STREAM_DISABLE),
(u32 *)base);
/* clear STS to select parallel transceiver interface */
val = ehci_readl(ehci, (u32 *)statreg);
val = val & ~USB_EHCI_REG_BIT_STAT_STS;
ehci_writel(ehci, val, (u32 *)statreg);
/* write to set the proper fifo threshold */
ehci_writel(ehci, USB_CTRL_FIFO_THRESH, (u32 *)fiforeg);
/* set TWI GPIO USB_HOST_DEV pin high */
gpio_direction_output(MSP_PIN_USB0_HOST_DEV, 1);
#ifdef CONFIG_MSP_HAS_DUAL_USB
gpio_direction_output(MSP_PIN_USB1_HOST_DEV, 1);
#endif
}
/* called during probe() after chip reset completes */
static int ehci_msp_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
ehci->big_endian_mmio = 1;
ehci->big_endian_desc = 1;
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
hcd->has_tt = 1;
retval = ehci_halt(ehci);
if (retval)
return retval;
ehci_reset(ehci);
/* data structure init */
retval = ehci_init(hcd);
if (retval)
return retval;
usb_hcd_tdi_set_mode(ehci);
ehci_port_power(ehci, 0);
return retval;
}
/* configure so an HC device and id are always provided
* always called with process context; sleeping is OK
*/
static int usb_hcd_msp_map_regs(struct mspusb_device *dev)
{
struct resource *res;
struct platform_device *pdev = &dev->dev;
u32 res_len;
int retval;
/* MAB register space */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res == NULL)
return -ENOMEM;
res_len = res->end - res->start + 1;
if (!request_mem_region(res->start, res_len, "mab regs"))
return -EBUSY;
dev->mab_regs = ioremap_nocache(res->start, res_len);
if (dev->mab_regs == NULL) {
retval = -ENOMEM;
goto err1;
}
/* MSP USB register space */
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
if (res == NULL) {
retval = -ENOMEM;
goto err2;
}
res_len = res->end - res->start + 1;
if (!request_mem_region(res->start, res_len, "usbid regs")) {
retval = -EBUSY;
goto err2;
}
dev->usbid_regs = ioremap_nocache(res->start, res_len);
if (dev->usbid_regs == NULL) {
retval = -ENOMEM;
goto err3;
}
return 0;
err3:
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
res_len = res->end - res->start + 1;
release_mem_region(res->start, res_len);
err2:
iounmap(dev->mab_regs);
err1:
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
res_len = res->end - res->start + 1;
release_mem_region(res->start, res_len);
dev_err(&pdev->dev, "Failed to map non-EHCI regs.\n");
return retval;
}
/**
* usb_hcd_msp_probe - initialize PMC MSP-based HCDs
* Context: !in_interrupt()
*
* Allocates basic resources for this USB host controller, and
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*
*/
int usb_hcd_msp_probe(const struct hc_driver *driver,
struct platform_device *dev)
{
int retval;
struct usb_hcd *hcd;
struct resource *res;
struct ehci_hcd *ehci ;
hcd = usb_create_hcd(driver, &dev->dev, "pmcmsp");
if (!hcd)
return -ENOMEM;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (res == NULL) {
pr_debug("No IOMEM resource info for %s.\n", dev->name);
retval = -ENOMEM;
goto err1;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = res->end - res->start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, dev->name)) {
retval = -EBUSY;
goto err1;
}
hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
pr_debug("ioremap failed");
retval = -ENOMEM;
goto err2;
}
res = platform_get_resource(dev, IORESOURCE_IRQ, 0);
if (res == NULL) {
dev_err(&dev->dev, "No IRQ resource info for %s.\n", dev->name);
retval = -ENOMEM;
goto err3;
}
/* Map non-EHCI register spaces */
retval = usb_hcd_msp_map_regs(to_mspusb_device(dev));
if (retval != 0)
goto err3;
ehci = hcd_to_ehci(hcd);
ehci->big_endian_mmio = 1;
ehci->big_endian_desc = 1;
retval = usb_add_hcd(hcd, res->start, IRQF_SHARED);
if (retval == 0)
return 0;
usb_remove_hcd(hcd);
err3:
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return retval;
}
/**
* usb_hcd_msp_remove - shutdown processing for PMC MSP-based HCDs
* @dev: USB Host Controller being removed
* Context: !in_interrupt()
*
* Reverses the effect of usb_hcd_msp_probe(), first invoking
* the HCD's stop() method. It is always called from a thread
* context, normally "rmmod", "apmd", or something similar.
*
* may be called without controller electrically present
* may be called with controller, bus, and devices active
*/
void usb_hcd_msp_remove(struct usb_hcd *hcd, struct platform_device *dev)
{
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
#ifdef CONFIG_MSP_HAS_DUAL_USB
/*
* Wrapper around the main ehci_irq. Since both USB host controllers are
* sharing the same IRQ, need to first determine whether we're the intended
* recipient of this interrupt.
*/
static irqreturn_t ehci_msp_irq(struct usb_hcd *hcd)
{
u32 int_src;
struct device *dev = hcd->self.controller;
struct platform_device *pdev;
struct mspusb_device *mdev;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
/* need to reverse-map a couple of containers to get our device */
pdev = to_platform_device(dev);
mdev = to_mspusb_device(pdev);
/* Check to see if this interrupt is for this host controller */
int_src = ehci_readl(ehci, &mdev->mab_regs->int_stat);
if (int_src & (1 << pdev->id))
return ehci_irq(hcd);
/* Not for this device */
return IRQ_NONE;
}
#endif /* DUAL_USB */
static const struct hc_driver ehci_msp_hc_driver = {
.description = hcd_name,
.product_desc = "PMC MSP EHCI",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
#ifdef CONFIG_MSP_HAS_DUAL_USB
.irq = ehci_msp_irq,
#else
.irq = ehci_irq,
#endif
.flags = HCD_MEMORY | HCD_USB2,
/*
* basic lifecycle operations
*/
.reset = ehci_msp_setup,
.start = ehci_run,
.shutdown = ehci_shutdown,
.start = ehci_run,
.stop = ehci_stop,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int ehci_hcd_msp_drv_probe(struct platform_device *pdev)
{
int ret;
pr_debug("In ehci_hcd_msp_drv_probe");
if (usb_disabled())
return -ENODEV;
gpio_request(MSP_PIN_USB0_HOST_DEV, "USB0_HOST_DEV_GPIO");
#ifdef CONFIG_MSP_HAS_DUAL_USB
gpio_request(MSP_PIN_USB1_HOST_DEV, "USB1_HOST_DEV_GPIO");
#endif
ret = usb_hcd_msp_probe(&ehci_msp_hc_driver, pdev);
return ret;
}
static int ehci_hcd_msp_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_hcd_msp_remove(hcd, pdev);
/* free TWI GPIO USB_HOST_DEV pin */
gpio_free(MSP_PIN_USB0_HOST_DEV);
#ifdef CONFIG_MSP_HAS_DUAL_USB
gpio_free(MSP_PIN_USB1_HOST_DEV);
#endif
return 0;
}
MODULE_ALIAS("pmcmsp-ehci");
static struct platform_driver ehci_hcd_msp_driver = {
.probe = ehci_hcd_msp_drv_probe,
.remove = ehci_hcd_msp_drv_remove,
.driver = {
.name = "pmcmsp-ehci",
.owner = THIS_MODULE,
},
};
| gpl-2.0 |
andi34/kernel_samsung_espresso | drivers/mmc/host/sdhci-spear.c | 2887 | 7476 | /*
* drivers/mmc/host/sdhci-spear.c
*
* Support of SDHCI platform devices for spear soc family
*
* Copyright (C) 2010 ST Microelectronics
* Viresh Kumar<viresh.kumar@st.com>
*
* Inspired by sdhci-pltfm.c
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdhci-spear.h>
#include <linux/io.h>
#include "sdhci.h"
struct spear_sdhci {
struct clk *clk;
struct sdhci_plat_data *data;
};
/* sdhci ops */
static struct sdhci_ops sdhci_pltfm_ops = {
/* Nothing to do for now. */
};
/* gpio card detection interrupt handler */
static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
unsigned long gpio_irq_type;
int val;
val = gpio_get_value(sdhci->data->card_int_gpio);
/* val == 1 -> card removed, val == 0 -> card inserted */
/* if card removed - set irq for low level, else vice versa */
gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH;
irq_set_irq_type(irq, gpio_irq_type);
if (sdhci->data->card_power_gpio >= 0) {
if (!sdhci->data->power_always_enb) {
/* if card inserted, give power, otherwise remove it */
val = sdhci->data->power_active_high ? !val : val ;
gpio_set_value(sdhci->data->card_power_gpio, val);
}
}
/* inform sdhci driver about card insertion/removal */
tasklet_schedule(&host->card_tasklet);
return IRQ_HANDLED;
}
static int __devinit sdhci_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct resource *iomem;
struct spear_sdhci *sdhci;
int ret;
BUG_ON(pdev == NULL);
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "memory resource not defined\n");
goto err;
}
if (!request_mem_region(iomem->start, resource_size(iomem),
"spear-sdhci")) {
ret = -EBUSY;
dev_dbg(&pdev->dev, "cannot request region\n");
goto err;
}
sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL);
if (!sdhci) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
goto err_kzalloc;
}
/* clk enable */
sdhci->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(sdhci->clk)) {
ret = PTR_ERR(sdhci->clk);
dev_dbg(&pdev->dev, "Error getting clock\n");
goto err_clk_get;
}
ret = clk_enable(sdhci->clk);
if (ret) {
dev_dbg(&pdev->dev, "Error enabling clock\n");
goto err_clk_enb;
}
/* overwrite platform_data */
sdhci->data = dev_get_platdata(&pdev->dev);
pdev->dev.platform_data = sdhci;
if (pdev->dev.parent)
host = sdhci_alloc_host(pdev->dev.parent, 0);
else
host = sdhci_alloc_host(&pdev->dev, 0);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
dev_dbg(&pdev->dev, "error allocating host\n");
goto err_alloc_host;
}
host->hw_name = "sdhci";
host->ops = &sdhci_pltfm_ops;
host->irq = platform_get_irq(pdev, 0);
host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
host->ioaddr = ioremap(iomem->start, resource_size(iomem));
if (!host->ioaddr) {
ret = -ENOMEM;
dev_dbg(&pdev->dev, "failed to remap registers\n");
goto err_ioremap;
}
ret = sdhci_add_host(host);
if (ret) {
dev_dbg(&pdev->dev, "error adding host\n");
goto err_add_host;
}
platform_set_drvdata(pdev, host);
/*
* It is optional to use GPIOs for sdhci Power control & sdhci card
* interrupt detection. If sdhci->data is NULL, then use original sdhci
* lines otherwise GPIO lines.
* If GPIO is selected for power control, then power should be disabled
* after card removal and should be enabled when card insertion
* interrupt occurs
*/
if (!sdhci->data)
return 0;
if (sdhci->data->card_power_gpio >= 0) {
int val = 0;
ret = gpio_request(sdhci->data->card_power_gpio, "sdhci");
if (ret < 0) {
dev_dbg(&pdev->dev, "gpio request fail: %d\n",
sdhci->data->card_power_gpio);
goto err_pgpio_request;
}
if (sdhci->data->power_always_enb)
val = sdhci->data->power_active_high;
else
val = !sdhci->data->power_active_high;
ret = gpio_direction_output(sdhci->data->card_power_gpio, val);
if (ret) {
dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
sdhci->data->card_power_gpio);
goto err_pgpio_direction;
}
gpio_set_value(sdhci->data->card_power_gpio, 1);
}
if (sdhci->data->card_int_gpio >= 0) {
ret = gpio_request(sdhci->data->card_int_gpio, "sdhci");
if (ret < 0) {
dev_dbg(&pdev->dev, "gpio request fail: %d\n",
sdhci->data->card_int_gpio);
goto err_igpio_request;
}
ret = gpio_direction_input(sdhci->data->card_int_gpio);
if (ret) {
dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
sdhci->data->card_int_gpio);
goto err_igpio_direction;
}
ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio),
sdhci_gpio_irq, IRQF_TRIGGER_LOW,
mmc_hostname(host->mmc), pdev);
if (ret) {
dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
sdhci->data->card_int_gpio);
goto err_igpio_request_irq;
}
}
return 0;
err_igpio_request_irq:
err_igpio_direction:
if (sdhci->data->card_int_gpio >= 0)
gpio_free(sdhci->data->card_int_gpio);
err_igpio_request:
err_pgpio_direction:
if (sdhci->data->card_power_gpio >= 0)
gpio_free(sdhci->data->card_power_gpio);
err_pgpio_request:
platform_set_drvdata(pdev, NULL);
sdhci_remove_host(host, 1);
err_add_host:
iounmap(host->ioaddr);
err_ioremap:
sdhci_free_host(host);
err_alloc_host:
clk_disable(sdhci->clk);
err_clk_enb:
clk_put(sdhci->clk);
err_clk_get:
kfree(sdhci);
err_kzalloc:
release_mem_region(iomem->start, resource_size(iomem));
err:
dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
return ret;
}
static int __devexit sdhci_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
int dead;
u32 scratch;
if (sdhci->data) {
if (sdhci->data->card_int_gpio >= 0) {
free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
gpio_free(sdhci->data->card_int_gpio);
}
if (sdhci->data->card_power_gpio >= 0)
gpio_free(sdhci->data->card_power_gpio);
}
platform_set_drvdata(pdev, NULL);
dead = 0;
scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
dead = 1;
sdhci_remove_host(host, dead);
iounmap(host->ioaddr);
sdhci_free_host(host);
clk_disable(sdhci->clk);
clk_put(sdhci->clk);
kfree(sdhci);
if (iomem)
release_mem_region(iomem->start, resource_size(iomem));
return 0;
}
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
.owner = THIS_MODULE,
},
.probe = sdhci_probe,
.remove = __devexit_p(sdhci_remove),
};
static int __init sdhci_init(void)
{
return platform_driver_register(&sdhci_driver);
}
module_init(sdhci_init);
static void __exit sdhci_exit(void)
{
platform_driver_unregister(&sdhci_driver);
}
module_exit(sdhci_exit);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
VegaDevTeam/android_kernel_n9009 | drivers/leds/leds-msm-pdm.c | 3399 | 5732 | /* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
/* Early-suspend level */
#define LED_SUSPEND_LEVEL 1
#endif
#define PDM_DUTY_MAXVAL BIT(16)
#define PDM_DUTY_REFVAL BIT(15)
struct pdm_led_data {
struct led_classdev cdev;
void __iomem *perph_base;
int pdm_offset;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct early_suspend early_suspend;
#endif
};
static void msm_led_brightness_set_percent(struct pdm_led_data *led,
int duty_per)
{
u16 duty_val;
duty_val = PDM_DUTY_REFVAL - ((PDM_DUTY_MAXVAL * duty_per) / 100);
if (!duty_per)
duty_val--;
writel_relaxed(duty_val, led->perph_base + led->pdm_offset);
}
static void msm_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct pdm_led_data *led =
container_of(led_cdev, struct pdm_led_data, cdev);
msm_led_brightness_set_percent(led, (value * 100) / LED_FULL);
}
#ifdef CONFIG_PM_SLEEP
static int msm_led_pdm_suspend(struct device *dev)
{
struct pdm_led_data *led = dev_get_drvdata(dev);
msm_led_brightness_set_percent(led, 0);
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void msm_led_pdm_early_suspend(struct early_suspend *h)
{
struct pdm_led_data *led = container_of(h,
struct pdm_led_data, early_suspend);
msm_led_pdm_suspend(led->cdev.dev->parent);
}
#endif
static const struct dev_pm_ops msm_led_pdm_pm_ops = {
#ifndef CONFIG_HAS_EARLYSUSPEND
.suspend = msm_led_pdm_suspend,
#endif
};
#endif
static int __devinit msm_pdm_led_probe(struct platform_device *pdev)
{
const struct led_info *pdata = pdev->dev.platform_data;
struct pdm_led_data *led;
struct resource *res, *ioregion;
u32 tcxo_pdm_ctl;
int rc;
if (!pdata) {
pr_err("platform data is invalid\n");
return -EINVAL;
}
if (pdev->id > 2) {
pr_err("pdm id is invalid\n");
return -EINVAL;
}
led = kzalloc(sizeof(struct pdm_led_data), GFP_KERNEL);
if (!led)
return -ENOMEM;
/* Enable runtime PM ops, start in ACTIVE mode */
rc = pm_runtime_set_active(&pdev->dev);
if (rc < 0)
dev_dbg(&pdev->dev, "unable to set runtime pm state\n");
pm_runtime_enable(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
pr_err("get resource failed\n");
rc = -EINVAL;
goto err_get_res;
}
ioregion = request_mem_region(res->start, resource_size(res),
pdev->name);
if (!ioregion) {
pr_err("request for mem region failed\n");
rc = -ENOMEM;
goto err_get_res;
}
led->perph_base = ioremap(res->start, resource_size(res));
if (!led->perph_base) {
pr_err("ioremap failed\n");
rc = -ENOMEM;
goto err_ioremap;
}
/* Pulse Density Modulation(PDM) ids start with 0 and
* every PDM register takes 4 bytes
*/
led->pdm_offset = ((pdev->id) + 1) * 4;
/* program tcxo_pdm_ctl register to enable pdm*/
tcxo_pdm_ctl = readl_relaxed(led->perph_base);
tcxo_pdm_ctl |= (1 << pdev->id);
writel_relaxed(tcxo_pdm_ctl, led->perph_base);
/* Start with LED in off state */
msm_led_brightness_set_percent(led, 0);
led->cdev.brightness_set = msm_led_brightness_set;
led->cdev.name = pdata->name ? : "leds-msm-pdm";
rc = led_classdev_register(&pdev->dev, &led->cdev);
if (rc) {
pr_err("led class registration failed\n");
goto err_led_reg;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
led->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN +
LED_SUSPEND_LEVEL;
led->early_suspend.suspend = msm_led_pdm_early_suspend;
register_early_suspend(&led->early_suspend);
#endif
platform_set_drvdata(pdev, led);
return 0;
err_led_reg:
iounmap(led->perph_base);
err_ioremap:
release_mem_region(res->start, resource_size(res));
err_get_res:
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
kfree(led);
return rc;
}
static int __devexit msm_pdm_led_remove(struct platform_device *pdev)
{
struct pdm_led_data *led = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&led->early_suspend);
#endif
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_disable(&pdev->dev);
led_classdev_unregister(&led->cdev);
msm_led_brightness_set_percent(led, 0);
iounmap(led->perph_base);
release_mem_region(res->start, resource_size(res));
kfree(led);
return 0;
}
static struct platform_driver msm_pdm_led_driver = {
.probe = msm_pdm_led_probe,
.remove = __devexit_p(msm_pdm_led_remove),
.driver = {
.name = "leds-msm-pdm",
.owner = THIS_MODULE,
#ifdef CONFIG_PM_SLEEP
.pm = &msm_led_pdm_pm_ops,
#endif
},
};
static int __init msm_pdm_led_init(void)
{
return platform_driver_register(&msm_pdm_led_driver);
}
module_init(msm_pdm_led_init);
static void __exit msm_pdm_led_exit(void)
{
platform_driver_unregister(&msm_pdm_led_driver);
}
module_exit(msm_pdm_led_exit);
MODULE_DESCRIPTION("MSM PDM LEDs driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:leds-msm-pdm");
| gpl-2.0 |
xInterlopeRx/android_kernel_samsung_lt02ltespr | arch/mips/kernel/unaligned.c | 4679 | 13820 | /*
* Handle unaligned accesses by emulation.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
* handler does not try to handle the case when the program counter points
* to an address not aligned to a word boundary.
*
* Putting data to unaligned addresses is a bad practice even on Intel where
* only the performance is affected. Much worse is that such code is non-
* portable. Due to several programs that die on MIPS due to alignment
* problems I decided to implement this handler anyway though I originally
* didn't intend to do this at all for user code.
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
* problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
* across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
* #include <sys/sysmips.h>
*
* ...
* sysmips(MIPS_FIXADE, x);
* ...
*
* The argument x is 0 for disabling software emulation, enabled otherwise.
*
* Below a little program to play around with this feature.
*
* #include <stdio.h>
* #include <sys/sysmips.h>
*
* struct foo {
* unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
* struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
* unsigned int *p = (unsigned int *) (x.bar + 3);
* int i;
*
* if (argc > 1)
* sysmips(MIPS_FIXADE, atoi(argv[1]));
*
* printf("*p = %08lx\n", *p);
*
* *p = 0xdeadface;
*
* for(i = 0; i <= 7; i++)
* printf("%02x ", x.bar[i]);
* printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
* exception for the R6000.
* A store crossing a page boundary might be executed only partially.
* Undo the partial store in this case.
*/
#include <linux/mm.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cop2.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
#define STR(x) __STR(x)
#define __STR(x) #x
enum {
UNALIGNED_ACTION_QUIET,
UNALIGNED_ACTION_SIGNAL,
UNALIGNED_ACTION_SHOW,
};
#ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions;
static u32 unaligned_action;
#else
#define unaligned_action UNALIGNED_ACTION_QUIET
#endif
extern void show_registers(struct pt_regs *regs);
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
/*
* This load never faults.
*/
__get_user(insn.word, pc);
switch (insn.i_format.opcode) {
/*
* These are instructions that a compiler doesn't generate. We
* can assume therefore that the code is MIPS-aware and
* really buggy. Emulating these instructions would break the
* semantics anyway.
*/
case ll_op:
case lld_op:
case sc_op:
case scd_op:
/*
* For these instructions the only way to create an address
* error is an attempted access to kernel/supervisor address
* space.
*/
case ldl_op:
case ldr_op:
case lwl_op:
case lwr_op:
case sdl_op:
case sdr_op:
case swl_op:
case swr_op:
case lb_op:
case lbu_op:
case sb_op:
goto sigbus;
/*
* The remaining opcodes are the ones that are really of interest.
*/
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlb\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlb\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lw_op:
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lhu_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (
".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlbu\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlbu\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lwu_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"dsll\t%0, %0, 32\n\t"
"dsrl\t%0, %0, 32\n\t"
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case ld_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 8))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tldl\t%0, (%2)\n"
"2:\tldr\t%0, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tldl\t%0, 7(%2)\n"
"2:\tldr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case sh_op:
if (!access_ok(VERIFY_WRITE, addr, 2))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 1(%2)\n\t"
"srl\t$1, %1, 0x8\n"
"2:\tsb\t$1, 0(%2)\n\t"
".set\tat\n\t"
#endif
#ifdef __LITTLE_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 0(%2)\n\t"
"srl\t$1,%1, 0x8\n"
"2:\tsb\t$1, 1(%2)\n\t"
".set\tat\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sw_op:
if (!access_ok(VERIFY_WRITE, addr, 4))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tswl\t%1,(%2)\n"
"2:\tswr\t%1, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tswl\t%1, 3(%2)\n"
"2:\tswr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sd_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_WRITE, addr, 8))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tsdl\t%1,(%2)\n"
"2:\tsdr\t%1, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tsdl\t%1, 7(%2)\n"
"2:\tsdr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case lwc1_op:
case ldc1_op:
case swc1_op:
case sdc1_op:
/*
* I herewith declare: this does not happen. So send SIGBUS.
*/
goto sigbus;
/*
* COP2 is available to implementor for application specific use.
* It's up to applications to register a notifier chain and do
* whatever they have to do, including possible sending of signals.
*/
case lwc2_op:
cu2_notifier_call_chain(CU2_LWC2_OP, regs);
break;
case ldc2_op:
cu2_notifier_call_chain(CU2_LDC2_OP, regs);
break;
case swc2_op:
cu2_notifier_call_chain(CU2_SWC2_OP, regs);
break;
case sdc2_op:
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
break;
default:
/*
* Pheeee... We encountered an yet unknown instruction or
* cache coherence problem. Die sucker, die ...
*/
goto sigill;
}
#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
return;
fault:
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV, current);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS, current);
return;
sigill:
die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_ade(struct pt_regs *regs)
{
unsigned int __user *pc;
mm_segment_t seg;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
*/
if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
goto sigbus;
pc = (unsigned int __user *) exception_epc(regs);
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
else if (unaligned_action == UNALIGNED_ACTION_SHOW)
show_registers(regs);
/*
* Do branch emulation only if we didn't forward the exception.
* This is all so but ugly ...
*/
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
set_fs(seg);
return;
sigbus:
die_if_kernel("Kernel unaligned instruction access", regs);
force_sig(SIGBUS, current);
/*
* XXX On return from the signal handler we should advance the epc
*/
}
#ifdef CONFIG_DEBUG_FS
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_unaligned(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
mips_debugfs_dir, &unaligned_instructions);
if (!d)
return -ENOMEM;
d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
if (!d)
return -ENOMEM;
return 0;
}
__initcall(debugfs_unaligned);
#endif
| gpl-2.0 |
N3uTr0nRom/N3uTr0n_kernel | drivers/hwmon/w83793.c | 4935 | 62242 | /*
* w83793.c - Linux kernel driver for hardware monitoring
* Copyright (C) 2006 Winbond Electronics Corp.
* Yuan Mu
* Rudolf Marek <r.marek@assembler.cz>
* Copyright (C) 2009-2010 Sven Anders <anders@anduras.de>, ANDURAS AG.
* Watchdog driver part
* (Based partially on fschmd driver,
* Copyright 2007-2008 by Hans de Goede)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation - version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*/
/*
* Supports following chips:
*
* Chip #vin #fanin #pwm #temp wchipid vendid i2c ISA
* w83793 10 12 8 6 0x7b 0x5ca3 yes no
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/kref.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
/* Default values */
#define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
I2C_CLIENT_END };
/* Insmod parameters */
static unsigned short force_subclients[4];
module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
static int timeout = WATCHDOG_TIMEOUT; /* default timeout in minutes */
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in minutes. 2<= timeout <=255 (default="
__MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout,
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
/*
* Address 0x00, 0x0d, 0x0e, 0x0f in all three banks are reserved
* as ID, Bank Select registers
*/
#define W83793_REG_BANKSEL 0x00
#define W83793_REG_VENDORID 0x0d
#define W83793_REG_CHIPID 0x0e
#define W83793_REG_DEVICEID 0x0f
#define W83793_REG_CONFIG 0x40
#define W83793_REG_MFC 0x58
#define W83793_REG_FANIN_CTRL 0x5c
#define W83793_REG_FANIN_SEL 0x5d
#define W83793_REG_I2C_ADDR 0x0b
#define W83793_REG_I2C_SUBADDR 0x0c
#define W83793_REG_VID_INA 0x05
#define W83793_REG_VID_INB 0x06
#define W83793_REG_VID_LATCHA 0x07
#define W83793_REG_VID_LATCHB 0x08
#define W83793_REG_VID_CTRL 0x59
#define W83793_REG_WDT_LOCK 0x01
#define W83793_REG_WDT_ENABLE 0x02
#define W83793_REG_WDT_STATUS 0x03
#define W83793_REG_WDT_TIMEOUT 0x04
static u16 W83793_REG_TEMP_MODE[2] = { 0x5e, 0x5f };
#define TEMP_READ 0
#define TEMP_CRIT 1
#define TEMP_CRIT_HYST 2
#define TEMP_WARN 3
#define TEMP_WARN_HYST 4
/*
* only crit and crit_hyst affect real-time alarm status
* current crit crit_hyst warn warn_hyst
*/
static u16 W83793_REG_TEMP[][5] = {
{0x1c, 0x78, 0x79, 0x7a, 0x7b},
{0x1d, 0x7c, 0x7d, 0x7e, 0x7f},
{0x1e, 0x80, 0x81, 0x82, 0x83},
{0x1f, 0x84, 0x85, 0x86, 0x87},
{0x20, 0x88, 0x89, 0x8a, 0x8b},
{0x21, 0x8c, 0x8d, 0x8e, 0x8f},
};
#define W83793_REG_TEMP_LOW_BITS 0x22
#define W83793_REG_BEEP(index) (0x53 + (index))
#define W83793_REG_ALARM(index) (0x4b + (index))
#define W83793_REG_CLR_CHASSIS 0x4a /* SMI MASK4 */
#define W83793_REG_IRQ_CTRL 0x50
#define W83793_REG_OVT_CTRL 0x51
#define W83793_REG_OVT_BEEP 0x52
#define IN_READ 0
#define IN_MAX 1
#define IN_LOW 2
static const u16 W83793_REG_IN[][3] = {
/* Current, High, Low */
{0x10, 0x60, 0x61}, /* Vcore A */
{0x11, 0x62, 0x63}, /* Vcore B */
{0x12, 0x64, 0x65}, /* Vtt */
{0x14, 0x6a, 0x6b}, /* VSEN1 */
{0x15, 0x6c, 0x6d}, /* VSEN2 */
{0x16, 0x6e, 0x6f}, /* +3VSEN */
{0x17, 0x70, 0x71}, /* +12VSEN */
{0x18, 0x72, 0x73}, /* 5VDD */
{0x19, 0x74, 0x75}, /* 5VSB */
{0x1a, 0x76, 0x77}, /* VBAT */
};
/* Low Bits of Vcore A/B Vtt Read/High/Low */
static const u16 W83793_REG_IN_LOW_BITS[] = { 0x1b, 0x68, 0x69 };
static u8 scale_in[] = { 2, 2, 2, 16, 16, 16, 8, 24, 24, 16 };
static u8 scale_in_add[] = { 0, 0, 0, 0, 0, 0, 0, 150, 150, 0 };
#define W83793_REG_FAN(index) (0x23 + 2 * (index)) /* High byte */
#define W83793_REG_FAN_MIN(index) (0x90 + 2 * (index)) /* High byte */
#define W83793_REG_PWM_DEFAULT 0xb2
#define W83793_REG_PWM_ENABLE 0x207
#define W83793_REG_PWM_UPTIME 0xc3 /* Unit in 0.1 second */
#define W83793_REG_PWM_DOWNTIME 0xc4 /* Unit in 0.1 second */
#define W83793_REG_TEMP_CRITICAL 0xc5
#define PWM_DUTY 0
#define PWM_START 1
#define PWM_NONSTOP 2
#define PWM_STOP_TIME 3
#define W83793_REG_PWM(index, nr) (((nr) == 0 ? 0xb3 : \
(nr) == 1 ? 0x220 : 0x218) + (index))
/* bit field, fan1 is bit0, fan2 is bit1 ... */
#define W83793_REG_TEMP_FAN_MAP(index) (0x201 + (index))
#define W83793_REG_TEMP_TOL(index) (0x208 + (index))
#define W83793_REG_TEMP_CRUISE(index) (0x210 + (index))
#define W83793_REG_PWM_STOP_TIME(index) (0x228 + (index))
#define W83793_REG_SF2_TEMP(index, nr) (0x230 + ((index) << 4) + (nr))
#define W83793_REG_SF2_PWM(index, nr) (0x238 + ((index) << 4) + (nr))
static inline unsigned long FAN_FROM_REG(u16 val)
{
if ((val >= 0xfff) || (val == 0))
return 0;
return 1350000UL / val;
}
static inline u16 FAN_TO_REG(long rpm)
{
if (rpm <= 0)
return 0x0fff;
return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
}
static inline unsigned long TIME_FROM_REG(u8 reg)
{
return reg * 100;
}
static inline u8 TIME_TO_REG(unsigned long val)
{
return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
}
static inline long TEMP_FROM_REG(s8 reg)
{
return reg * 1000;
}
static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
{
return SENSORS_LIMIT((val + (val < 0 ? -500 : 500)) / 1000, min, max);
}
struct w83793_data {
struct i2c_client *lm75[2];
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_nonvolatile; /* In jiffies, last time we update the
* nonvolatile registers
*/
u8 bank;
u8 vrm;
u8 vid[2];
u8 in[10][3]; /* Register value, read/high/low */
u8 in_low_bits[3]; /* Additional resolution for VCore A/B Vtt */
u16 has_fan; /* Only fan1- fan5 has own pins */
u16 fan[12]; /* Register value combine */
u16 fan_min[12]; /* Register value combine */
s8 temp[6][5]; /* current, crit, crit_hyst,warn, warn_hyst */
u8 temp_low_bits; /* Additional resolution TD1-TD4 */
u8 temp_mode[2]; /* byte 0: Temp D1-D4 mode each has 2 bits
* byte 1: Temp R1,R2 mode, each has 1 bit
*/
u8 temp_critical; /* If reached all fan will be at full speed */
u8 temp_fan_map[6]; /* Temp controls which pwm fan, bit field */
u8 has_pwm;
u8 has_temp;
u8 has_vid;
u8 pwm_enable; /* Register value, each Temp has 1 bit */
u8 pwm_uptime; /* Register value */
u8 pwm_downtime; /* Register value */
u8 pwm_default; /* All fan default pwm, next poweron valid */
u8 pwm[8][3]; /* Register value */
u8 pwm_stop_time[8];
u8 temp_cruise[6];
u8 alarms[5]; /* realtime status registers */
u8 beeps[5];
u8 beep_enable;
u8 tolerance[3]; /* Temp tolerance(Smart Fan I/II) */
u8 sf2_pwm[6][7]; /* Smart FanII: Fan duty cycle */
u8 sf2_temp[6][7]; /* Smart FanII: Temp level point */
/* watchdog */
struct i2c_client *client;
struct mutex watchdog_lock;
struct list_head list; /* member of the watchdog_data_list */
struct kref kref;
struct miscdevice watchdog_miscdev;
unsigned long watchdog_is_open;
char watchdog_expect_close;
char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
unsigned int watchdog_caused_reboot;
int watchdog_timeout; /* watchdog timeout in minutes */
};
/*
* Somewhat ugly :( global data pointer list with all devices, so that
* we can find our device data as when using misc_register. There is no
* other method to get to one's device data from the open file-op and
* for usage in the reboot notifier callback.
*/
static LIST_HEAD(watchdog_data_list);
/* Note this lock not only protect list access, but also data.kref access */
static DEFINE_MUTEX(watchdog_data_mutex);
/*
* Release our data struct when we're detached from the i2c client *and* all
* references to our watchdog device are released
*/
static void w83793_release_resources(struct kref *ref)
{
struct w83793_data *data = container_of(ref, struct w83793_data, kref);
kfree(data);
}
static u8 w83793_read_value(struct i2c_client *client, u16 reg);
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int w83793_remove(struct i2c_client *client);
static void w83793_init_client(struct i2c_client *client);
static void w83793_update_nonvolatile(struct device *dev);
static struct w83793_data *w83793_update_device(struct device *dev);
static const struct i2c_device_id w83793_id[] = {
{ "w83793", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83793_id);
static struct i2c_driver w83793_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "w83793",
},
.probe = w83793_probe,
.remove = w83793_remove,
.id_table = w83793_id,
.detect = w83793_detect,
.address_list = normal_i2c,
};
static ssize_t
show_vrm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
static ssize_t
show_vid(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
return sprintf(buf, "%d\n", vid_from_reg(data->vid[index], data->vrm));
}
static ssize_t
store_vrm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83793_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
data->vrm = val;
return count;
}
#define ALARM_STATUS 0
#define BEEP_ENABLE 1
static ssize_t
show_alarm_beep(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index >> 3;
int bit = sensor_attr->index & 0x07;
u8 val;
if (nr == ALARM_STATUS) {
val = (data->alarms[index] >> (bit)) & 1;
} else { /* BEEP_ENABLE */
val = (data->beeps[index] >> (bit)) & 1;
}
return sprintf(buf, "%u\n", val);
}
static ssize_t
store_beep(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index >> 3;
int shift = sensor_attr->index & 0x07;
u8 beep_bit = 1 << shift;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->beeps[index] = w83793_read_value(client, W83793_REG_BEEP(index));
data->beeps[index] &= ~beep_bit;
data->beeps[index] |= val << shift;
w83793_write_value(client, W83793_REG_BEEP(index), data->beeps[index]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_beep_enable(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%u\n", (data->beep_enable >> 1) & 0x01);
}
static ssize_t
store_beep_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->beep_enable = w83793_read_value(client, W83793_REG_OVT_BEEP)
& 0xfd;
data->beep_enable |= val << 1;
w83793_write_value(client, W83793_REG_OVT_BEEP, data->beep_enable);
mutex_unlock(&data->update_lock);
return count;
}
/* Write any value to clear chassis alarm */
static ssize_t
store_chassis_clear_legacy(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
u8 val;
dev_warn(dev, "Attribute chassis is deprecated, "
"use intrusion0_alarm instead\n");
mutex_lock(&data->update_lock);
val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
val |= 0x80;
w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
mutex_unlock(&data->update_lock);
return count;
}
/* Write 0 to clear chassis alarm */
static ssize_t
store_chassis_clear(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
unsigned long val;
u8 reg;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val)
return -EINVAL;
mutex_lock(&data->update_lock);
reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
data->valid = 0; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
#define FAN_INPUT 0
#define FAN_MIN 1
static ssize_t
show_fan(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u16 val;
if (nr == FAN_INPUT)
val = data->fan[index] & 0x0fff;
else
val = data->fan_min[index] & 0x0fff;
return sprintf(buf, "%lu\n", FAN_FROM_REG(val));
}
static ssize_t
store_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
val = FAN_TO_REG(val);
mutex_lock(&data->update_lock);
data->fan_min[index] = val;
w83793_write_value(client, W83793_REG_FAN_MIN(index),
(val >> 8) & 0xff);
w83793_write_value(client, W83793_REG_FAN_MIN(index) + 1, val & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
struct w83793_data *data = w83793_update_device(dev);
u16 val;
int nr = sensor_attr->nr;
int index = sensor_attr->index;
if (nr == PWM_STOP_TIME)
val = TIME_FROM_REG(data->pwm_stop_time[index]);
else
val = (data->pwm[index][nr] & 0x3f) << 2;
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
if (nr == PWM_STOP_TIME) {
val = TIME_TO_REG(val);
data->pwm_stop_time[index] = val;
w83793_write_value(client, W83793_REG_PWM_STOP_TIME(index),
val);
} else {
val = SENSORS_LIMIT(val, 0, 0xff) >> 2;
data->pwm[index][nr] =
w83793_read_value(client, W83793_REG_PWM(index, nr)) & 0xc0;
data->pwm[index][nr] |= val;
w83793_write_value(client, W83793_REG_PWM(index, nr),
data->pwm[index][nr]);
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
long temp = TEMP_FROM_REG(data->temp[index][nr]);
if (nr == TEMP_READ && index < 4) { /* Only TD1-TD4 have low bits */
int low = ((data->temp_low_bits >> (index * 2)) & 0x03) * 250;
temp += temp > 0 ? low : -low;
}
return sprintf(buf, "%ld\n", temp);
}
static ssize_t
store_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
long tmp;
int err;
err = kstrtol(buf, 10, &tmp);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp[index][nr] = TEMP_TO_REG(tmp, -128, 127);
w83793_write_value(client, W83793_REG_TEMP[index][nr],
data->temp[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
/*
* TD1-TD4
* each has 4 mode:(2 bits)
* 0: Stop monitor
* 1: Use internal temp sensor(default)
* 2: Reserved
* 3: Use sensor in Intel CPU and get result by PECI
*
* TR1-TR2
* each has 2 mode:(1 bit)
* 0: Disable temp sensor monitor
* 1: To enable temp sensors monitor
*/
/* 0 disable, 6 PECI */
static u8 TO_TEMP_MODE[] = { 0, 0, 0, 6 };
static ssize_t
show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83793_data *data = w83793_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
u8 mask = (index < 4) ? 0x03 : 0x01;
u8 shift = (index < 4) ? (2 * index) : (index - 4);
u8 tmp;
index = (index < 4) ? 0 : 1;
tmp = (data->temp_mode[index] >> shift) & mask;
/* for the internal sensor, found out if diode or thermistor */
if (tmp == 1)
tmp = index == 0 ? 3 : 4;
else
tmp = TO_TEMP_MODE[tmp];
return sprintf(buf, "%d\n", tmp);
}
static ssize_t
store_temp_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
u8 mask = (index < 4) ? 0x03 : 0x01;
u8 shift = (index < 4) ? (2 * index) : (index - 4);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
/* transform the sysfs interface values into table above */
if ((val == 6) && (index < 4)) {
val -= 3;
} else if ((val == 3 && index < 4)
|| (val == 4 && index >= 4)) {
/* transform diode or thermistor into internal enable */
val = !!val;
} else {
return -EINVAL;
}
index = (index < 4) ? 0 : 1;
mutex_lock(&data->update_lock);
data->temp_mode[index] =
w83793_read_value(client, W83793_REG_TEMP_MODE[index]);
data->temp_mode[index] &= ~(mask << shift);
data->temp_mode[index] |= val << shift;
w83793_write_value(client, W83793_REG_TEMP_MODE[index],
data->temp_mode[index]);
mutex_unlock(&data->update_lock);
return count;
}
#define SETUP_PWM_DEFAULT 0
#define SETUP_PWM_UPTIME 1 /* Unit in 0.1s */
#define SETUP_PWM_DOWNTIME 2 /* Unit in 0.1s */
#define SETUP_TEMP_CRITICAL 3
static ssize_t
show_sf_setup(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct w83793_data *data = w83793_update_device(dev);
u32 val = 0;
if (nr == SETUP_PWM_DEFAULT)
val = (data->pwm_default & 0x3f) << 2;
else if (nr == SETUP_PWM_UPTIME)
val = TIME_FROM_REG(data->pwm_uptime);
else if (nr == SETUP_PWM_DOWNTIME)
val = TIME_FROM_REG(data->pwm_downtime);
else if (nr == SETUP_TEMP_CRITICAL)
val = TEMP_FROM_REG(data->temp_critical & 0x7f);
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_sf_setup(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
if (nr == SETUP_PWM_DEFAULT) {
data->pwm_default =
w83793_read_value(client, W83793_REG_PWM_DEFAULT) & 0xc0;
data->pwm_default |= SENSORS_LIMIT(val, 0, 0xff) >> 2;
w83793_write_value(client, W83793_REG_PWM_DEFAULT,
data->pwm_default);
} else if (nr == SETUP_PWM_UPTIME) {
data->pwm_uptime = TIME_TO_REG(val);
data->pwm_uptime += data->pwm_uptime == 0 ? 1 : 0;
w83793_write_value(client, W83793_REG_PWM_UPTIME,
data->pwm_uptime);
} else if (nr == SETUP_PWM_DOWNTIME) {
data->pwm_downtime = TIME_TO_REG(val);
data->pwm_downtime += data->pwm_downtime == 0 ? 1 : 0;
w83793_write_value(client, W83793_REG_PWM_DOWNTIME,
data->pwm_downtime);
} else { /* SETUP_TEMP_CRITICAL */
data->temp_critical =
w83793_read_value(client, W83793_REG_TEMP_CRITICAL) & 0x80;
data->temp_critical |= TEMP_TO_REG(val, 0, 0x7f);
w83793_write_value(client, W83793_REG_TEMP_CRITICAL,
data->temp_critical);
}
mutex_unlock(&data->update_lock);
return count;
}
/*
* Temp SmartFan control
* TEMP_FAN_MAP
* Temp channel control which pwm fan, bitfield, bit 0 indicate pwm1...
* It's possible two or more temp channels control the same fan, w83793
* always prefers to pick the most critical request and applies it to
* the related Fan.
* It's possible one fan is not in any mapping of 6 temp channels, this
* means the fan is manual mode
*
* TEMP_PWM_ENABLE
* Each temp channel has its own SmartFan mode, and temp channel
* control fans that are set by TEMP_FAN_MAP
* 0: SmartFanII mode
* 1: Thermal Cruise Mode
*
* TEMP_CRUISE
* Target temperature in thermal cruise mode, w83793 will try to turn
* fan speed to keep the temperature of target device around this
* temperature.
*
* TEMP_TOLERANCE
* If Temp higher or lower than target with this tolerance, w83793
* will take actions to speed up or slow down the fan to keep the
* temperature within the tolerance range.
*/
#define TEMP_FAN_MAP 0
#define TEMP_PWM_ENABLE 1
#define TEMP_CRUISE 2
#define TEMP_TOLERANCE 3
static ssize_t
show_sf_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u32 val;
if (nr == TEMP_FAN_MAP) {
val = data->temp_fan_map[index];
} else if (nr == TEMP_PWM_ENABLE) {
/* +2 to transfrom into 2 and 3 to conform with sysfs intf */
val = ((data->pwm_enable >> index) & 0x01) + 2;
} else if (nr == TEMP_CRUISE) {
val = TEMP_FROM_REG(data->temp_cruise[index] & 0x7f);
} else { /* TEMP_TOLERANCE */
val = data->tolerance[index >> 1] >> ((index & 0x01) ? 4 : 0);
val = TEMP_FROM_REG(val & 0x0f);
}
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_sf_ctrl(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
if (nr == TEMP_FAN_MAP) {
val = SENSORS_LIMIT(val, 0, 255);
w83793_write_value(client, W83793_REG_TEMP_FAN_MAP(index), val);
data->temp_fan_map[index] = val;
} else if (nr == TEMP_PWM_ENABLE) {
if (val == 2 || val == 3) {
data->pwm_enable =
w83793_read_value(client, W83793_REG_PWM_ENABLE);
if (val - 2)
data->pwm_enable |= 1 << index;
else
data->pwm_enable &= ~(1 << index);
w83793_write_value(client, W83793_REG_PWM_ENABLE,
data->pwm_enable);
} else {
mutex_unlock(&data->update_lock);
return -EINVAL;
}
} else if (nr == TEMP_CRUISE) {
data->temp_cruise[index] =
w83793_read_value(client, W83793_REG_TEMP_CRUISE(index));
data->temp_cruise[index] &= 0x80;
data->temp_cruise[index] |= TEMP_TO_REG(val, 0, 0x7f);
w83793_write_value(client, W83793_REG_TEMP_CRUISE(index),
data->temp_cruise[index]);
} else { /* TEMP_TOLERANCE */
int i = index >> 1;
u8 shift = (index & 0x01) ? 4 : 0;
data->tolerance[i] =
w83793_read_value(client, W83793_REG_TEMP_TOL(i));
data->tolerance[i] &= ~(0x0f << shift);
data->tolerance[i] |= TEMP_TO_REG(val, 0, 0x0f) << shift;
w83793_write_value(client, W83793_REG_TEMP_TOL(i),
data->tolerance[i]);
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf2_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%d\n", (data->sf2_pwm[index][nr] & 0x3f) << 2);
}
static ssize_t
store_sf2_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
val = SENSORS_LIMIT(val, 0, 0xff) >> 2;
mutex_lock(&data->update_lock);
data->sf2_pwm[index][nr] =
w83793_read_value(client, W83793_REG_SF2_PWM(index, nr)) & 0xc0;
data->sf2_pwm[index][nr] |= val;
w83793_write_value(client, W83793_REG_SF2_PWM(index, nr),
data->sf2_pwm[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf2_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
return sprintf(buf, "%ld\n",
TEMP_FROM_REG(data->sf2_temp[index][nr] & 0x7f));
}
static ssize_t
store_sf2_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
val = TEMP_TO_REG(val, 0, 0x7f);
mutex_lock(&data->update_lock);
data->sf2_temp[index][nr] =
w83793_read_value(client, W83793_REG_SF2_TEMP(index, nr)) & 0x80;
data->sf2_temp[index][nr] |= val;
w83793_write_value(client, W83793_REG_SF2_TEMP(index, nr),
data->sf2_temp[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
/* only Vcore A/B and Vtt have additional 2 bits precision */
static ssize_t
show_in(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83793_data *data = w83793_update_device(dev);
u16 val = data->in[index][nr];
if (index < 3) {
val <<= 2;
val += (data->in_low_bits[nr] >> (index * 2)) & 0x3;
}
/* voltage inputs 5VDD and 5VSB needs 150mV offset */
val = val * scale_in[index] + scale_in_add[index];
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_in(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
val = (val + scale_in[index] / 2) / scale_in[index];
mutex_lock(&data->update_lock);
if (index > 2) {
/* fix the limit values of 5VDD and 5VSB to ALARM mechanism */
if (nr == 1 || nr == 2)
val -= scale_in_add[index] / scale_in[index];
val = SENSORS_LIMIT(val, 0, 255);
} else {
val = SENSORS_LIMIT(val, 0, 0x3FF);
data->in_low_bits[nr] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[nr]);
data->in_low_bits[nr] &= ~(0x03 << (2 * index));
data->in_low_bits[nr] |= (val & 0x03) << (2 * index);
w83793_write_value(client, W83793_REG_IN_LOW_BITS[nr],
data->in_low_bits[nr]);
val >>= 2;
}
data->in[index][nr] = val;
w83793_write_value(client, W83793_REG_IN[index][nr],
data->in[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define NOT_USED -1
#define SENSOR_ATTR_IN(index) \
SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \
IN_READ, index), \
SENSOR_ATTR_2(in##index##_max, S_IRUGO | S_IWUSR, show_in, \
store_in, IN_MAX, index), \
SENSOR_ATTR_2(in##index##_min, S_IRUGO | S_IWUSR, show_in, \
store_in, IN_LOW, index), \
SENSOR_ATTR_2(in##index##_alarm, S_IRUGO, show_alarm_beep, \
NULL, ALARM_STATUS, index + ((index > 2) ? 1 : 0)), \
SENSOR_ATTR_2(in##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, \
index + ((index > 2) ? 1 : 0))
#define SENSOR_ATTR_FAN(index) \
SENSOR_ATTR_2(fan##index##_alarm, S_IRUGO, show_alarm_beep, \
NULL, ALARM_STATUS, index + 17), \
SENSOR_ATTR_2(fan##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, index + 17), \
SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \
NULL, FAN_INPUT, index - 1), \
SENSOR_ATTR_2(fan##index##_min, S_IWUSR | S_IRUGO, \
show_fan, store_fan_min, FAN_MIN, index - 1)
#define SENSOR_ATTR_PWM(index) \
SENSOR_ATTR_2(pwm##index, S_IWUSR | S_IRUGO, show_pwm, \
store_pwm, PWM_DUTY, index - 1), \
SENSOR_ATTR_2(pwm##index##_nonstop, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_NONSTOP, index - 1), \
SENSOR_ATTR_2(pwm##index##_start, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_START, index - 1), \
SENSOR_ATTR_2(pwm##index##_stop_time, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_STOP_TIME, index - 1)
#define SENSOR_ATTR_TEMP(index) \
SENSOR_ATTR_2(temp##index##_type, S_IRUGO | S_IWUSR, \
show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
NULL, TEMP_READ, index - 1), \
SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_temp, \
store_temp, TEMP_CRIT, index - 1), \
SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \
show_temp, store_temp, TEMP_CRIT_HYST, index - 1), \
SENSOR_ATTR_2(temp##index##_warn, S_IRUGO | S_IWUSR, show_temp, \
store_temp, TEMP_WARN, index - 1), \
SENSOR_ATTR_2(temp##index##_warn_hyst, S_IRUGO | S_IWUSR, \
show_temp, store_temp, TEMP_WARN_HYST, index - 1), \
SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \
show_alarm_beep, NULL, ALARM_STATUS, index + 11), \
SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, index + 11), \
SENSOR_ATTR_2(temp##index##_auto_channels_pwm, \
S_IRUGO | S_IWUSR, show_sf_ctrl, store_sf_ctrl, \
TEMP_FAN_MAP, index - 1), \
SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \
show_sf_ctrl, store_sf_ctrl, TEMP_PWM_ENABLE, \
index - 1), \
SENSOR_ATTR_2(thermal_cruise##index, S_IRUGO | S_IWUSR, \
show_sf_ctrl, store_sf_ctrl, TEMP_CRUISE, index - 1), \
SENSOR_ATTR_2(tolerance##index, S_IRUGO | S_IWUSR, show_sf_ctrl,\
store_sf_ctrl, TEMP_TOLERANCE, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point1_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 0, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point2_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 1, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point3_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 2, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point4_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 3, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point5_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 4, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point6_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 5, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point7_pwm, S_IRUGO | S_IWUSR, \
show_sf2_pwm, store_sf2_pwm, 6, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point1_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 0, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point2_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 1, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point3_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 2, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point4_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 3, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point5_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 4, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point6_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 5, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point7_temp, S_IRUGO | S_IWUSR,\
show_sf2_temp, store_sf2_temp, 6, index - 1)
static struct sensor_device_attribute_2 w83793_sensor_attr_2[] = {
SENSOR_ATTR_IN(0),
SENSOR_ATTR_IN(1),
SENSOR_ATTR_IN(2),
SENSOR_ATTR_IN(3),
SENSOR_ATTR_IN(4),
SENSOR_ATTR_IN(5),
SENSOR_ATTR_IN(6),
SENSOR_ATTR_IN(7),
SENSOR_ATTR_IN(8),
SENSOR_ATTR_IN(9),
SENSOR_ATTR_FAN(1),
SENSOR_ATTR_FAN(2),
SENSOR_ATTR_FAN(3),
SENSOR_ATTR_FAN(4),
SENSOR_ATTR_FAN(5),
SENSOR_ATTR_PWM(1),
SENSOR_ATTR_PWM(2),
SENSOR_ATTR_PWM(3),
};
static struct sensor_device_attribute_2 w83793_temp[] = {
SENSOR_ATTR_TEMP(1),
SENSOR_ATTR_TEMP(2),
SENSOR_ATTR_TEMP(3),
SENSOR_ATTR_TEMP(4),
SENSOR_ATTR_TEMP(5),
SENSOR_ATTR_TEMP(6),
};
/* Fan6-Fan12 */
static struct sensor_device_attribute_2 w83793_left_fan[] = {
SENSOR_ATTR_FAN(6),
SENSOR_ATTR_FAN(7),
SENSOR_ATTR_FAN(8),
SENSOR_ATTR_FAN(9),
SENSOR_ATTR_FAN(10),
SENSOR_ATTR_FAN(11),
SENSOR_ATTR_FAN(12),
};
/* Pwm4-Pwm8 */
static struct sensor_device_attribute_2 w83793_left_pwm[] = {
SENSOR_ATTR_PWM(4),
SENSOR_ATTR_PWM(5),
SENSOR_ATTR_PWM(6),
SENSOR_ATTR_PWM(7),
SENSOR_ATTR_PWM(8),
};
static struct sensor_device_attribute_2 w83793_vid[] = {
SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
};
static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm);
static struct sensor_device_attribute_2 sda_single_files[] = {
SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
store_chassis_clear_legacy, ALARM_STATUS, 30),
SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
store_chassis_clear, ALARM_STATUS, 30),
SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
store_beep_enable, NOT_USED, NOT_USED),
SENSOR_ATTR_2(pwm_default, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_DEFAULT, NOT_USED),
SENSOR_ATTR_2(pwm_uptime, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_UPTIME, NOT_USED),
SENSOR_ATTR_2(pwm_downtime, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_DOWNTIME, NOT_USED),
SENSOR_ATTR_2(temp_critical, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_TEMP_CRITICAL, NOT_USED),
};
static void w83793_init_client(struct i2c_client *client)
{
if (reset)
w83793_write_value(client, W83793_REG_CONFIG, 0x80);
/* Start monitoring */
w83793_write_value(client, W83793_REG_CONFIG,
w83793_read_value(client, W83793_REG_CONFIG) | 0x01);
}
/*
* Watchdog routines
*/
static int watchdog_set_timeout(struct w83793_data *data, int timeout)
{
int ret, mtimeout;
mtimeout = DIV_ROUND_UP(timeout, 60);
if (mtimeout > 255)
return -EINVAL;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
data->watchdog_timeout = mtimeout;
/* Set Timeout value (in Minutes) */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
ret = mtimeout * 60;
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_get_timeout(struct w83793_data *data)
{
int timeout;
mutex_lock(&data->watchdog_lock);
timeout = data->watchdog_timeout * 60;
mutex_unlock(&data->watchdog_lock);
return timeout;
}
static int watchdog_trigger(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Set Timeout value (in Minutes) */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_enable(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Set initial timeout */
w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
data->watchdog_timeout);
/* Enable Soft Watchdog */
w83793_write_value(data->client, W83793_REG_WDT_LOCK, 0x55);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_disable(struct w83793_data *data)
{
int ret = 0;
mutex_lock(&data->watchdog_lock);
if (!data->client) {
ret = -ENODEV;
goto leave;
}
/* Disable Soft Watchdog */
w83793_write_value(data->client, W83793_REG_WDT_LOCK, 0xAA);
leave:
mutex_unlock(&data->watchdog_lock);
return ret;
}
static int watchdog_open(struct inode *inode, struct file *filp)
{
struct w83793_data *pos, *data = NULL;
int watchdog_is_open;
/*
* We get called from drivers/char/misc.c with misc_mtx hold, and we
* call misc_register() from w83793_probe() with watchdog_data_mutex
* hold, as misc_register() takes the misc_mtx lock, this is a possible
* deadlock, so we use mutex_trylock here.
*/
if (!mutex_trylock(&watchdog_data_mutex))
return -ERESTARTSYS;
list_for_each_entry(pos, &watchdog_data_list, list) {
if (pos->watchdog_miscdev.minor == iminor(inode)) {
data = pos;
break;
}
}
/* Check, if device is already open */
watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
/*
* Increase data reference counter (if not already done).
* Note we can never not have found data, so we don't check for this
*/
if (!watchdog_is_open)
kref_get(&data->kref);
mutex_unlock(&watchdog_data_mutex);
/* Check, if device is already open and possibly issue error */
if (watchdog_is_open)
return -EBUSY;
/* Enable Soft Watchdog */
watchdog_enable(data);
/* Store pointer to data into filp's private data */
filp->private_data = data;
return nonseekable_open(inode, filp);
}
static int watchdog_close(struct inode *inode, struct file *filp)
{
struct w83793_data *data = filp->private_data;
if (data->watchdog_expect_close) {
watchdog_disable(data);
data->watchdog_expect_close = 0;
} else {
watchdog_trigger(data);
dev_crit(&data->client->dev,
"unexpected close, not stopping watchdog!\n");
}
clear_bit(0, &data->watchdog_is_open);
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
mutex_unlock(&watchdog_data_mutex);
return 0;
}
static ssize_t watchdog_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
ssize_t ret;
struct w83793_data *data = filp->private_data;
if (count) {
if (!nowayout) {
size_t i;
/* Clear it in case it was set with a previous write */
data->watchdog_expect_close = 0;
for (i = 0; i != count; i++) {
char c;
if (get_user(c, buf + i))
return -EFAULT;
if (c == 'V')
data->watchdog_expect_close = 1;
}
}
ret = watchdog_trigger(data);
if (ret < 0)
return ret;
}
return count;
}
static long watchdog_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct watchdog_info ident = {
.options = WDIOF_KEEPALIVEPING |
WDIOF_SETTIMEOUT |
WDIOF_CARDRESET,
.identity = "w83793 watchdog"
};
int val, ret = 0;
struct w83793_data *data = filp->private_data;
switch (cmd) {
case WDIOC_GETSUPPORT:
if (!nowayout)
ident.options |= WDIOF_MAGICCLOSE;
if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
ret = -EFAULT;
break;
case WDIOC_GETSTATUS:
val = data->watchdog_caused_reboot ? WDIOF_CARDRESET : 0;
ret = put_user(val, (int __user *)arg);
break;
case WDIOC_GETBOOTSTATUS:
ret = put_user(0, (int __user *)arg);
break;
case WDIOC_KEEPALIVE:
ret = watchdog_trigger(data);
break;
case WDIOC_GETTIMEOUT:
val = watchdog_get_timeout(data);
ret = put_user(val, (int __user *)arg);
break;
case WDIOC_SETTIMEOUT:
if (get_user(val, (int __user *)arg)) {
ret = -EFAULT;
break;
}
ret = watchdog_set_timeout(data, val);
if (ret > 0)
ret = put_user(ret, (int __user *)arg);
break;
case WDIOC_SETOPTIONS:
if (get_user(val, (int __user *)arg)) {
ret = -EFAULT;
break;
}
if (val & WDIOS_DISABLECARD)
ret = watchdog_disable(data);
else if (val & WDIOS_ENABLECARD)
ret = watchdog_enable(data);
else
ret = -EINVAL;
break;
default:
ret = -ENOTTY;
}
return ret;
}
static const struct file_operations watchdog_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = watchdog_open,
.release = watchdog_close,
.write = watchdog_write,
.unlocked_ioctl = watchdog_ioctl,
};
/*
* Notifier for system down
*/
static int watchdog_notify_sys(struct notifier_block *this, unsigned long code,
void *unused)
{
struct w83793_data *data = NULL;
if (code == SYS_DOWN || code == SYS_HALT) {
/* Disable each registered watchdog */
mutex_lock(&watchdog_data_mutex);
list_for_each_entry(data, &watchdog_data_list, list) {
if (data->watchdog_miscdev.minor)
watchdog_disable(data);
}
mutex_unlock(&watchdog_data_mutex);
}
return NOTIFY_DONE;
}
/*
* The WDT needs to learn about soft shutdowns in order to
* turn the timebomb registers off.
*/
static struct notifier_block watchdog_notifier = {
.notifier_call = watchdog_notify_sys,
};
/*
* Init / remove routines
*/
static int w83793_remove(struct i2c_client *client)
{
struct w83793_data *data = i2c_get_clientdata(client);
struct device *dev = &client->dev;
int i, tmp;
/* Unregister the watchdog (if registered) */
if (data->watchdog_miscdev.minor) {
misc_deregister(&data->watchdog_miscdev);
if (data->watchdog_is_open) {
dev_warn(&client->dev,
"i2c client detached with watchdog open! "
"Stopping watchdog.\n");
watchdog_disable(data);
}
mutex_lock(&watchdog_data_mutex);
list_del(&data->list);
mutex_unlock(&watchdog_data_mutex);
/* Tell the watchdog code the client is gone */
mutex_lock(&data->watchdog_lock);
data->client = NULL;
mutex_unlock(&data->watchdog_lock);
}
/* Reset Configuration Register to Disable Watch Dog Registers */
tmp = w83793_read_value(client, W83793_REG_CONFIG);
w83793_write_value(client, W83793_REG_CONFIG, tmp & ~0x04);
unregister_reboot_notifier(&watchdog_notifier);
hwmon_device_unregister(data->hwmon_dev);
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
device_remove_file(dev,
&w83793_sensor_attr_2[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
device_remove_file(dev, &sda_single_files[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
device_remove_file(dev, &w83793_vid[i].dev_attr);
device_remove_file(dev, &dev_attr_vrm);
for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
device_remove_file(dev, &w83793_left_fan[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
/* Decrease data reference counter */
mutex_lock(&watchdog_data_mutex);
kref_put(&data->kref, w83793_release_resources);
mutex_unlock(&watchdog_data_mutex);
return 0;
}
static int
w83793_detect_subclients(struct i2c_client *client)
{
int i, id, err;
int address = client->addr;
u8 tmp;
struct i2c_adapter *adapter = client->adapter;
struct w83793_data *data = i2c_get_clientdata(client);
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
for (i = 2; i <= 3; i++) {
if (force_subclients[i] < 0x48
|| force_subclients[i] > 0x4f) {
dev_err(&client->dev,
"invalid subclient "
"address %d; must be 0x48-0x4f\n",
force_subclients[i]);
err = -EINVAL;
goto ERROR_SC_0;
}
}
w83793_write_value(client, W83793_REG_I2C_SUBADDR,
(force_subclients[2] & 0x07) |
((force_subclients[3] & 0x07) << 4));
}
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
if (!(tmp & 0x08))
data->lm75[0] = i2c_new_dummy(adapter, 0x48 + (tmp & 0x7));
if (!(tmp & 0x80)) {
if ((data->lm75[0] != NULL)
&& ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
dev_err(&client->dev,
"duplicate addresses 0x%x, "
"use force_subclients\n", data->lm75[0]->addr);
err = -ENODEV;
goto ERROR_SC_1;
}
data->lm75[1] = i2c_new_dummy(adapter,
0x48 + ((tmp >> 4) & 0x7));
}
return 0;
/* Undo inits in case of errors */
ERROR_SC_1:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
ERROR_SC_0:
return err;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int w83793_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
u8 tmp, bank, chip_id;
struct i2c_adapter *adapter = client->adapter;
unsigned short address = client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
tmp = bank & 0x80 ? 0x5c : 0xa3;
/* Check Winbond vendor ID */
if (tmp != i2c_smbus_read_byte_data(client, W83793_REG_VENDORID)) {
pr_debug("w83793: Detection failed at check vendor id\n");
return -ENODEV;
}
/*
* If Winbond chip, address of chip and W83793_REG_I2C_ADDR
* should match
*/
if ((bank & 0x07) == 0
&& i2c_smbus_read_byte_data(client, W83793_REG_I2C_ADDR) !=
(address << 1)) {
pr_debug("w83793: Detection failed at check i2c addr\n");
return -ENODEV;
}
/* Determine the chip type now */
chip_id = i2c_smbus_read_byte_data(client, W83793_REG_CHIPID);
if (chip_id != 0x7b)
return -ENODEV;
strlcpy(info->type, "w83793", I2C_NAME_SIZE);
return 0;
}
static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
struct w83793_data *data;
int i, tmp, val, err;
int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
int files_pwm = ARRAY_SIZE(w83793_left_pwm) / 5;
int files_temp = ARRAY_SIZE(w83793_temp) / 6;
data = kzalloc(sizeof(struct w83793_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
mutex_init(&data->update_lock);
mutex_init(&data->watchdog_lock);
INIT_LIST_HEAD(&data->list);
kref_init(&data->kref);
/*
* Store client pointer in our data struct for watchdog usage
* (where the client is found through a data ptr instead of the
* otherway around)
*/
data->client = client;
err = w83793_detect_subclients(client);
if (err)
goto free_mem;
/* Initialize the chip */
w83793_init_client(client);
/*
* Only fan 1-5 has their own input pins,
* Pwm 1-3 has their own pins
*/
data->has_fan = 0x1f;
data->has_pwm = 0x07;
tmp = w83793_read_value(client, W83793_REG_MFC);
val = w83793_read_value(client, W83793_REG_FANIN_CTRL);
/* check the function of pins 49-56 */
if (tmp & 0x80) {
data->has_vid |= 0x2; /* has VIDB */
} else {
data->has_pwm |= 0x18; /* pwm 4,5 */
if (val & 0x01) { /* fan 6 */
data->has_fan |= 0x20;
data->has_pwm |= 0x20;
}
if (val & 0x02) { /* fan 7 */
data->has_fan |= 0x40;
data->has_pwm |= 0x40;
}
if (!(tmp & 0x40) && (val & 0x04)) { /* fan 8 */
data->has_fan |= 0x80;
data->has_pwm |= 0x80;
}
}
/* check the function of pins 37-40 */
if (!(tmp & 0x29))
data->has_vid |= 0x1; /* has VIDA */
if (0x08 == (tmp & 0x0c)) {
if (val & 0x08) /* fan 9 */
data->has_fan |= 0x100;
if (val & 0x10) /* fan 10 */
data->has_fan |= 0x200;
}
if (0x20 == (tmp & 0x30)) {
if (val & 0x20) /* fan 11 */
data->has_fan |= 0x400;
if (val & 0x40) /* fan 12 */
data->has_fan |= 0x800;
}
if ((tmp & 0x01) && (val & 0x04)) { /* fan 8, second location */
data->has_fan |= 0x80;
data->has_pwm |= 0x80;
}
tmp = w83793_read_value(client, W83793_REG_FANIN_SEL);
if ((tmp & 0x01) && (val & 0x08)) { /* fan 9, second location */
data->has_fan |= 0x100;
}
if ((tmp & 0x02) && (val & 0x10)) { /* fan 10, second location */
data->has_fan |= 0x200;
}
if ((tmp & 0x04) && (val & 0x20)) { /* fan 11, second location */
data->has_fan |= 0x400;
}
if ((tmp & 0x08) && (val & 0x40)) { /* fan 12, second location */
data->has_fan |= 0x800;
}
/* check the temp1-6 mode, ignore former AMDSI selected inputs */
tmp = w83793_read_value(client, W83793_REG_TEMP_MODE[0]);
if (tmp & 0x01)
data->has_temp |= 0x01;
if (tmp & 0x04)
data->has_temp |= 0x02;
if (tmp & 0x10)
data->has_temp |= 0x04;
if (tmp & 0x40)
data->has_temp |= 0x08;
tmp = w83793_read_value(client, W83793_REG_TEMP_MODE[1]);
if (tmp & 0x01)
data->has_temp |= 0x10;
if (tmp & 0x02)
data->has_temp |= 0x20;
/* Register sysfs hooks */
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) {
err = device_create_file(dev,
&w83793_sensor_attr_2[i].dev_attr);
if (err)
goto exit_remove;
}
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++) {
if (!(data->has_vid & (1 << i)))
continue;
err = device_create_file(dev, &w83793_vid[i].dev_attr);
if (err)
goto exit_remove;
}
if (data->has_vid) {
data->vrm = vid_which_vrm();
err = device_create_file(dev, &dev_attr_vrm);
if (err)
goto exit_remove;
}
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
err = device_create_file(dev, &sda_single_files[i].dev_attr);
if (err)
goto exit_remove;
}
for (i = 0; i < 6; i++) {
int j;
if (!(data->has_temp & (1 << i)))
continue;
for (j = 0; j < files_temp; j++) {
err = device_create_file(dev,
&w83793_temp[(i) * files_temp
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
for (i = 5; i < 12; i++) {
int j;
if (!(data->has_fan & (1 << i)))
continue;
for (j = 0; j < files_fan; j++) {
err = device_create_file(dev,
&w83793_left_fan[(i - 5) * files_fan
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
for (i = 3; i < 8; i++) {
int j;
if (!(data->has_pwm & (1 << i)))
continue;
for (j = 0; j < files_pwm; j++) {
err = device_create_file(dev,
&w83793_left_pwm[(i - 3) * files_pwm
+ j].dev_attr);
if (err)
goto exit_remove;
}
}
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
/* Watchdog initialization */
/* Register boot notifier */
err = register_reboot_notifier(&watchdog_notifier);
if (err != 0) {
dev_err(&client->dev,
"cannot register reboot notifier (err=%d)\n", err);
goto exit_devunreg;
}
/*
* Enable Watchdog registers.
* Set Configuration Register to Enable Watch Dog Registers
* (Bit 2) = XXXX, X1XX.
*/
tmp = w83793_read_value(client, W83793_REG_CONFIG);
w83793_write_value(client, W83793_REG_CONFIG, tmp | 0x04);
/* Set the default watchdog timeout */
data->watchdog_timeout = timeout;
/* Check, if last reboot was caused by watchdog */
data->watchdog_caused_reboot =
w83793_read_value(data->client, W83793_REG_WDT_STATUS) & 0x01;
/* Disable Soft Watchdog during initialiation */
watchdog_disable(data);
/*
* We take the data_mutex lock early so that watchdog_open() cannot
* run when misc_register() has completed, but we've not yet added
* our data to the watchdog_data_list (and set the default timeout)
*/
mutex_lock(&watchdog_data_mutex);
for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
/* Register our watchdog part */
snprintf(data->watchdog_name, sizeof(data->watchdog_name),
"watchdog%c", (i == 0) ? '\0' : ('0' + i));
data->watchdog_miscdev.name = data->watchdog_name;
data->watchdog_miscdev.fops = &watchdog_fops;
data->watchdog_miscdev.minor = watchdog_minors[i];
err = misc_register(&data->watchdog_miscdev);
if (err == -EBUSY)
continue;
if (err) {
data->watchdog_miscdev.minor = 0;
dev_err(&client->dev,
"Registering watchdog chardev: %d\n", err);
break;
}
list_add(&data->list, &watchdog_data_list);
dev_info(&client->dev,
"Registered watchdog chardev major 10, minor: %d\n",
watchdog_minors[i]);
break;
}
if (i == ARRAY_SIZE(watchdog_minors)) {
data->watchdog_miscdev.minor = 0;
dev_warn(&client->dev, "Couldn't register watchdog chardev "
"(due to no free minor)\n");
}
mutex_unlock(&watchdog_data_mutex);
return 0;
/* Unregister hwmon device */
exit_devunreg:
hwmon_device_unregister(data->hwmon_dev);
/* Unregister sysfs hooks */
exit_remove:
for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++)
device_remove_file(dev, &w83793_sensor_attr_2[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++)
device_remove_file(dev, &sda_single_files[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
device_remove_file(dev, &w83793_vid[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
device_remove_file(dev, &w83793_left_fan[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_left_pwm); i++)
device_remove_file(dev, &w83793_left_pwm[i].dev_attr);
for (i = 0; i < ARRAY_SIZE(w83793_temp); i++)
device_remove_file(dev, &w83793_temp[i].dev_attr);
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
free_mem:
kfree(data);
exit:
return err;
}
static void w83793_update_nonvolatile(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
int i, j;
/*
* They are somewhat "stable" registers, and to update them every time
* takes so much time, it's just not worthy. Update them in a long
* interval to avoid exception.
*/
if (!(time_after(jiffies, data->last_nonvolatile + HZ * 300)
|| !data->valid))
return;
/* update voltage limits */
for (i = 1; i < 3; i++) {
for (j = 0; j < ARRAY_SIZE(data->in); j++) {
data->in[j][i] =
w83793_read_value(client, W83793_REG_IN[j][i]);
}
data->in_low_bits[i] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[i]);
}
for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
/* Update the Fan measured value and limits */
if (!(data->has_fan & (1 << i)))
continue;
data->fan_min[i] =
w83793_read_value(client, W83793_REG_FAN_MIN(i)) << 8;
data->fan_min[i] |=
w83793_read_value(client, W83793_REG_FAN_MIN(i) + 1);
}
for (i = 0; i < ARRAY_SIZE(data->temp_fan_map); i++) {
if (!(data->has_temp & (1 << i)))
continue;
data->temp_fan_map[i] =
w83793_read_value(client, W83793_REG_TEMP_FAN_MAP(i));
for (j = 1; j < 5; j++) {
data->temp[i][j] =
w83793_read_value(client, W83793_REG_TEMP[i][j]);
}
data->temp_cruise[i] =
w83793_read_value(client, W83793_REG_TEMP_CRUISE(i));
for (j = 0; j < 7; j++) {
data->sf2_pwm[i][j] =
w83793_read_value(client, W83793_REG_SF2_PWM(i, j));
data->sf2_temp[i][j] =
w83793_read_value(client,
W83793_REG_SF2_TEMP(i, j));
}
}
for (i = 0; i < ARRAY_SIZE(data->temp_mode); i++)
data->temp_mode[i] =
w83793_read_value(client, W83793_REG_TEMP_MODE[i]);
for (i = 0; i < ARRAY_SIZE(data->tolerance); i++) {
data->tolerance[i] =
w83793_read_value(client, W83793_REG_TEMP_TOL(i));
}
for (i = 0; i < ARRAY_SIZE(data->pwm); i++) {
if (!(data->has_pwm & (1 << i)))
continue;
data->pwm[i][PWM_NONSTOP] =
w83793_read_value(client, W83793_REG_PWM(i, PWM_NONSTOP));
data->pwm[i][PWM_START] =
w83793_read_value(client, W83793_REG_PWM(i, PWM_START));
data->pwm_stop_time[i] =
w83793_read_value(client, W83793_REG_PWM_STOP_TIME(i));
}
data->pwm_default = w83793_read_value(client, W83793_REG_PWM_DEFAULT);
data->pwm_enable = w83793_read_value(client, W83793_REG_PWM_ENABLE);
data->pwm_uptime = w83793_read_value(client, W83793_REG_PWM_UPTIME);
data->pwm_downtime = w83793_read_value(client, W83793_REG_PWM_DOWNTIME);
data->temp_critical =
w83793_read_value(client, W83793_REG_TEMP_CRITICAL);
data->beep_enable = w83793_read_value(client, W83793_REG_OVT_BEEP);
for (i = 0; i < ARRAY_SIZE(data->beeps); i++)
data->beeps[i] = w83793_read_value(client, W83793_REG_BEEP(i));
data->last_nonvolatile = jiffies;
}
static struct w83793_data *w83793_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83793_data *data = i2c_get_clientdata(client);
int i;
mutex_lock(&data->update_lock);
if (!(time_after(jiffies, data->last_updated + HZ * 2)
|| !data->valid))
goto END;
/* Update the voltages measured value and limits */
for (i = 0; i < ARRAY_SIZE(data->in); i++)
data->in[i][IN_READ] =
w83793_read_value(client, W83793_REG_IN[i][IN_READ]);
data->in_low_bits[IN_READ] =
w83793_read_value(client, W83793_REG_IN_LOW_BITS[IN_READ]);
for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
if (!(data->has_fan & (1 << i)))
continue;
data->fan[i] =
w83793_read_value(client, W83793_REG_FAN(i)) << 8;
data->fan[i] |=
w83793_read_value(client, W83793_REG_FAN(i) + 1);
}
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
if (!(data->has_temp & (1 << i)))
continue;
data->temp[i][TEMP_READ] =
w83793_read_value(client, W83793_REG_TEMP[i][TEMP_READ]);
}
data->temp_low_bits =
w83793_read_value(client, W83793_REG_TEMP_LOW_BITS);
for (i = 0; i < ARRAY_SIZE(data->pwm); i++) {
if (data->has_pwm & (1 << i))
data->pwm[i][PWM_DUTY] =
w83793_read_value(client,
W83793_REG_PWM(i, PWM_DUTY));
}
for (i = 0; i < ARRAY_SIZE(data->alarms); i++)
data->alarms[i] =
w83793_read_value(client, W83793_REG_ALARM(i));
if (data->has_vid & 0x01)
data->vid[0] = w83793_read_value(client, W83793_REG_VID_INA);
if (data->has_vid & 0x02)
data->vid[1] = w83793_read_value(client, W83793_REG_VID_INB);
w83793_update_nonvolatile(dev);
data->last_updated = jiffies;
data->valid = 1;
END:
mutex_unlock(&data->update_lock);
return data;
}
/*
* Ignore the possibility that somebody change bank outside the driver
* Must be called with data->update_lock held, except during initialization
*/
static u8 w83793_read_value(struct i2c_client *client, u16 reg)
{
struct w83793_data *data = i2c_get_clientdata(client);
u8 res = 0xff;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
if (data->bank != new_bank) {
if (i2c_smbus_write_byte_data
(client, W83793_REG_BANKSEL, new_bank) >= 0)
data->bank = new_bank;
else {
dev_err(&client->dev,
"set bank to %d failed, fall back "
"to bank %d, read reg 0x%x error\n",
new_bank, data->bank, reg);
res = 0x0; /* read 0x0 from the chip */
goto END;
}
}
res = i2c_smbus_read_byte_data(client, reg & 0xff);
END:
return res;
}
/* Must be called with data->update_lock held, except during initialization */
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value)
{
struct w83793_data *data = i2c_get_clientdata(client);
int res;
u8 new_bank = reg >> 8;
new_bank |= data->bank & 0xfc;
if (data->bank != new_bank) {
res = i2c_smbus_write_byte_data(client, W83793_REG_BANKSEL,
new_bank);
if (res < 0) {
dev_err(&client->dev,
"set bank to %d failed, fall back "
"to bank %d, write reg 0x%x error\n",
new_bank, data->bank, reg);
goto END;
}
data->bank = new_bank;
}
res = i2c_smbus_write_byte_data(client, reg & 0xff, value);
END:
return res;
}
module_i2c_driver(w83793_driver);
MODULE_AUTHOR("Yuan Mu, Sven Anders");
MODULE_DESCRIPTION("w83793 driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ench0/android_kernel_samsung_hltet | drivers/block/ub.c | 4935 | 62716 | /*
* The low performance USB storage driver (ub).
*
* Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
* Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
*
* This work is a part of Linux kernel, is derived from it,
* and is not licensed separately. See file COPYING for details.
*
* TODO (sorted by decreasing priority)
* -- Return sense now that rq allows it (we always auto-sense anyway).
* -- set readonly flag for CDs, set removable flag for CF readers
* -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
* -- verify the 13 conditions and do bulk resets
* -- highmem
* -- move top_sense and work_bcs into separate allocations (if they survive)
* for cache purists and esoteric architectures.
* -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ?
* -- prune comments, they are too volumnous
* -- Resove XXX's
* -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb_usual.h>
#include <linux/blkdev.h>
#include <linux/timer.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <scsi/scsi.h>
#define DRV_NAME "ub"
#define UB_MAJOR 180
/*
* The command state machine is the key model for understanding of this driver.
*
* The general rule is that all transitions are done towards the bottom
* of the diagram, thus preventing any loops.
*
* An exception to that is how the STAT state is handled. A counter allows it
* to be re-entered along the path marked with [C].
*
* +--------+
* ! INIT !
* +--------+
* !
* ub_scsi_cmd_start fails ->--------------------------------------\
* ! !
* V !
* +--------+ !
* ! CMD ! !
* +--------+ !
* ! +--------+ !
* was -EPIPE -->-------------------------------->! CLEAR ! !
* ! +--------+ !
* ! ! !
* was error -->------------------------------------- ! --------->\
* ! ! !
* /--<-- cmd->dir == NONE ? ! !
* ! ! ! !
* ! V ! !
* ! +--------+ ! !
* ! ! DATA ! ! !
* ! +--------+ ! !
* ! ! +---------+ ! !
* ! was -EPIPE -->--------------->! CLR2STS ! ! !
* ! ! +---------+ ! !
* ! ! ! ! !
* ! ! was error -->---- ! --------->\
* ! was error -->--------------------- ! ------------- ! --------->\
* ! ! ! ! !
* ! V ! ! !
* \--->+--------+ ! ! !
* ! STAT !<--------------------------/ ! !
* /--->+--------+ ! !
* ! ! ! !
* [C] was -EPIPE -->-----------\ ! !
* ! ! ! ! !
* +<---- len == 0 ! ! !
* ! ! ! ! !
* ! was error -->--------------------------------------!---------->\
* ! ! ! ! !
* +<---- bad CSW ! ! !
* +<---- bad tag ! ! !
* ! ! V ! !
* ! ! +--------+ ! !
* ! ! ! CLRRS ! ! !
* ! ! +--------+ ! !
* ! ! ! ! !
* \------- ! --------------------[C]--------\ ! !
* ! ! ! !
* cmd->error---\ +--------+ ! !
* ! +--------------->! SENSE !<----------/ !
* STAT_FAIL----/ +--------+ !
* ! ! V
* ! V +--------+
* \--------------------------------\--------------------->! DONE !
* +--------+
*/
/*
* This many LUNs per USB device.
* Every one of them takes a host, see UB_MAX_HOSTS.
*/
#define UB_MAX_LUNS 9
/*
*/
#define UB_PARTS_PER_LUN 8
#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */
#define UB_SENSE_SIZE 18
/*
*/
struct ub_dev;
#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */
#define UB_MAX_SECTORS 64
/*
* A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
* even if a webcam hogs the bus, but some devices need time to spin up.
*/
#define UB_URB_TIMEOUT (HZ*2)
#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */
#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */
#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */
/*
* An instance of a SCSI command in transit.
*/
#define UB_DIR_NONE 0
#define UB_DIR_READ 1
#define UB_DIR_ILLEGAL2 2
#define UB_DIR_WRITE 3
#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
(((c)==UB_DIR_READ)? 'r': 'n'))
enum ub_scsi_cmd_state {
UB_CMDST_INIT, /* Initial state */
UB_CMDST_CMD, /* Command submitted */
UB_CMDST_DATA, /* Data phase */
UB_CMDST_CLR2STS, /* Clearing before requesting status */
UB_CMDST_STAT, /* Status phase */
UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */
UB_CMDST_CLRRS, /* Clearing before retrying status */
UB_CMDST_SENSE, /* Sending Request Sense */
UB_CMDST_DONE /* Final state */
};
struct ub_scsi_cmd {
unsigned char cdb[UB_MAX_CDB_SIZE];
unsigned char cdb_len;
unsigned char dir; /* 0 - none, 1 - read, 3 - write. */
enum ub_scsi_cmd_state state;
unsigned int tag;
struct ub_scsi_cmd *next;
int error; /* Return code - valid upon done */
unsigned int act_len; /* Return size */
unsigned char key, asc, ascq; /* May be valid if error==-EIO */
int stat_count; /* Retries getting status. */
unsigned int timeo; /* jiffies until rq->timeout changes */
unsigned int len; /* Requested length */
unsigned int current_sg;
unsigned int nsg; /* sgv[nsg] */
struct scatterlist sgv[UB_MAX_REQ_SG];
struct ub_lun *lun;
void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
void *back;
};
struct ub_request {
struct request *rq;
unsigned int current_try;
unsigned int nsg; /* sgv[nsg] */
struct scatterlist sgv[UB_MAX_REQ_SG];
};
/*
*/
struct ub_capacity {
unsigned long nsec; /* Linux size - 512 byte sectors */
unsigned int bsize; /* Linux hardsect_size */
unsigned int bshift; /* Shift between 512 and hard sects */
};
/*
* This is a direct take-off from linux/include/completion.h
* The difference is that I do not wait on this thing, just poll.
* When I want to wait (ub_probe), I just use the stock completion.
*
* Note that INIT_COMPLETION takes no lock. It is correct. But why
* in the bloody hell that thing takes struct instead of pointer to struct
* is quite beyond me. I just copied it from the stock completion.
*/
struct ub_completion {
unsigned int done;
spinlock_t lock;
};
static DEFINE_MUTEX(ub_mutex);
static inline void ub_init_completion(struct ub_completion *x)
{
x->done = 0;
spin_lock_init(&x->lock);
}
#define UB_INIT_COMPLETION(x) ((x).done = 0)
static void ub_complete(struct ub_completion *x)
{
unsigned long flags;
spin_lock_irqsave(&x->lock, flags);
x->done++;
spin_unlock_irqrestore(&x->lock, flags);
}
static int ub_is_completed(struct ub_completion *x)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&x->lock, flags);
ret = x->done;
spin_unlock_irqrestore(&x->lock, flags);
return ret;
}
/*
*/
struct ub_scsi_cmd_queue {
int qlen, qmax;
struct ub_scsi_cmd *head, *tail;
};
/*
* The block device instance (one per LUN).
*/
struct ub_lun {
struct ub_dev *udev;
struct list_head link;
struct gendisk *disk;
int id; /* Host index */
int num; /* LUN number */
char name[16];
int changed; /* Media was changed */
int removable;
int readonly;
struct ub_request urq;
/* Use Ingo's mempool if or when we have more than one command. */
/*
* Currently we never need more than one command for the whole device.
* However, giving every LUN a command is a cheap and automatic way
* to enforce fairness between them.
*/
int cmda[1];
struct ub_scsi_cmd cmdv[1];
struct ub_capacity capacity;
};
/*
* The USB device instance.
*/
struct ub_dev {
spinlock_t *lock;
atomic_t poison; /* The USB device is disconnected */
int openc; /* protected by ub_lock! */
/* kref is too implicit for our taste */
int reset; /* Reset is running */
int bad_resid;
unsigned int tagcnt;
char name[12];
struct usb_device *dev;
struct usb_interface *intf;
struct list_head luns;
unsigned int send_bulk_pipe; /* cached pipe values */
unsigned int recv_bulk_pipe;
unsigned int send_ctrl_pipe;
unsigned int recv_ctrl_pipe;
struct tasklet_struct tasklet;
struct ub_scsi_cmd_queue cmd_queue;
struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */
unsigned char top_sense[UB_SENSE_SIZE];
struct ub_completion work_done;
struct urb work_urb;
struct timer_list work_timer;
int last_pipe; /* What might need clearing */
__le32 signature; /* Learned signature */
struct bulk_cb_wrap work_bcb;
struct bulk_cs_wrap work_bcs;
struct usb_ctrlrequest work_cr;
struct work_struct reset_work;
wait_queue_head_t reset_wait;
};
/*
*/
static void ub_cleanup(struct ub_dev *sc);
static int ub_request_fn_1(struct ub_lun *lun, struct request *rq);
static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_end_rq(struct request *rq, unsigned int status);
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd);
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_urb_complete(struct urb *urb);
static void ub_scsi_action(unsigned long _dev);
static void ub_scsi_dispatch(struct ub_dev *sc);
static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
int stalled_pipe);
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
static void ub_reset_enter(struct ub_dev *sc, int try);
static void ub_reset_task(struct work_struct *work);
static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
struct ub_capacity *ret);
static int ub_sync_reset(struct ub_dev *sc);
static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
static int ub_probe_lun(struct ub_dev *sc, int lnum);
/*
*/
#ifdef CONFIG_USB_LIBUSUAL
#define ub_usb_ids usb_storage_usb_ids
#else
static const struct usb_device_id ub_usb_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
{ }
};
MODULE_DEVICE_TABLE(usb, ub_usb_ids);
#endif /* CONFIG_USB_LIBUSUAL */
/*
* Find me a way to identify "next free minor" for add_disk(),
* and the array disappears the next day. However, the number of
* hosts has something to do with the naming and /proc/partitions.
* This has to be thought out in detail before changing.
* If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
*/
#define UB_MAX_HOSTS 26
static char ub_hostv[UB_MAX_HOSTS];
#define UB_QLOCK_NUM 5
static spinlock_t ub_qlockv[UB_QLOCK_NUM];
static int ub_qlock_next = 0;
static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
/*
* The id allocator.
*
* This also stores the host for indexing by minor, which is somewhat dirty.
*/
static int ub_id_get(void)
{
unsigned long flags;
int i;
spin_lock_irqsave(&ub_lock, flags);
for (i = 0; i < UB_MAX_HOSTS; i++) {
if (ub_hostv[i] == 0) {
ub_hostv[i] = 1;
spin_unlock_irqrestore(&ub_lock, flags);
return i;
}
}
spin_unlock_irqrestore(&ub_lock, flags);
return -1;
}
static void ub_id_put(int id)
{
unsigned long flags;
if (id < 0 || id >= UB_MAX_HOSTS) {
printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
return;
}
spin_lock_irqsave(&ub_lock, flags);
if (ub_hostv[id] == 0) {
spin_unlock_irqrestore(&ub_lock, flags);
printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
return;
}
ub_hostv[id] = 0;
spin_unlock_irqrestore(&ub_lock, flags);
}
/*
* This is necessitated by the fact that blk_cleanup_queue does not
* necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
* Since our blk_init_queue() passes a spinlock common with ub_dev,
* we have life time issues when ub_cleanup frees ub_dev.
*/
static spinlock_t *ub_next_lock(void)
{
unsigned long flags;
spinlock_t *ret;
spin_lock_irqsave(&ub_lock, flags);
ret = &ub_qlockv[ub_qlock_next];
ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
spin_unlock_irqrestore(&ub_lock, flags);
return ret;
}
/*
* Downcount for deallocation. This rides on two assumptions:
* - once something is poisoned, its refcount cannot grow
* - opens cannot happen at this time (del_gendisk was done)
* If the above is true, we can drop the lock, which we need for
* blk_cleanup_queue(): the silly thing may attempt to sleep.
* [Actually, it never needs to sleep for us, but it calls might_sleep()]
*/
static void ub_put(struct ub_dev *sc)
{
unsigned long flags;
spin_lock_irqsave(&ub_lock, flags);
--sc->openc;
if (sc->openc == 0 && atomic_read(&sc->poison)) {
spin_unlock_irqrestore(&ub_lock, flags);
ub_cleanup(sc);
} else {
spin_unlock_irqrestore(&ub_lock, flags);
}
}
/*
* Final cleanup and deallocation.
*/
static void ub_cleanup(struct ub_dev *sc)
{
struct list_head *p;
struct ub_lun *lun;
struct request_queue *q;
while (!list_empty(&sc->luns)) {
p = sc->luns.next;
lun = list_entry(p, struct ub_lun, link);
list_del(p);
/* I don't think queue can be NULL. But... Stolen from sx8.c */
if ((q = lun->disk->queue) != NULL)
blk_cleanup_queue(q);
/*
* If we zero disk->private_data BEFORE put_disk, we have
* to check for NULL all over the place in open, release,
* check_media and revalidate, because the block level
* semaphore is well inside the put_disk.
* But we cannot zero after the call, because *disk is gone.
* The sd.c is blatantly racy in this area.
*/
/* disk->private_data = NULL; */
put_disk(lun->disk);
lun->disk = NULL;
ub_id_put(lun->id);
kfree(lun);
}
usb_set_intfdata(sc->intf, NULL);
usb_put_intf(sc->intf);
usb_put_dev(sc->dev);
kfree(sc);
}
/*
* The "command allocator".
*/
static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun)
{
struct ub_scsi_cmd *ret;
if (lun->cmda[0])
return NULL;
ret = &lun->cmdv[0];
lun->cmda[0] = 1;
return ret;
}
static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd)
{
if (cmd != &lun->cmdv[0]) {
printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
lun->name, cmd);
return;
}
if (!lun->cmda[0]) {
printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name);
return;
}
lun->cmda[0] = 0;
}
/*
* The command queue.
*/
static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
if (t->qlen++ == 0) {
t->head = cmd;
t->tail = cmd;
} else {
t->tail->next = cmd;
t->tail = cmd;
}
if (t->qlen > t->qmax)
t->qmax = t->qlen;
}
static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
if (t->qlen++ == 0) {
t->head = cmd;
t->tail = cmd;
} else {
cmd->next = t->head;
t->head = cmd;
}
if (t->qlen > t->qmax)
t->qmax = t->qlen;
}
static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
{
struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
struct ub_scsi_cmd *cmd;
if (t->qlen == 0)
return NULL;
if (--t->qlen == 0)
t->tail = NULL;
cmd = t->head;
t->head = cmd->next;
cmd->next = NULL;
return cmd;
}
#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head)
/*
* The request function is our main entry point
*/
static void ub_request_fn(struct request_queue *q)
{
struct ub_lun *lun = q->queuedata;
struct request *rq;
while ((rq = blk_peek_request(q)) != NULL) {
if (ub_request_fn_1(lun, rq) != 0) {
blk_stop_queue(q);
break;
}
}
}
static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
{
struct ub_dev *sc = lun->udev;
struct ub_scsi_cmd *cmd;
struct ub_request *urq;
int n_elem;
if (atomic_read(&sc->poison)) {
blk_start_request(rq);
ub_end_rq(rq, DID_NO_CONNECT << 16);
return 0;
}
if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) {
blk_start_request(rq);
ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
return 0;
}
if (lun->urq.rq != NULL)
return -1;
if ((cmd = ub_get_cmd(lun)) == NULL)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
blk_start_request(rq);
urq = &lun->urq;
memset(urq, 0, sizeof(struct ub_request));
urq->rq = rq;
/*
* get scatterlist from block layer
*/
sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG);
n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]);
if (n_elem < 0) {
/* Impossible, because blk_rq_map_sg should not hit ENOMEM. */
printk(KERN_INFO "%s: failed request map (%d)\n",
lun->name, n_elem);
goto drop;
}
if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */
printk(KERN_WARNING "%s: request with %d segments\n",
lun->name, n_elem);
goto drop;
}
urq->nsg = n_elem;
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
ub_cmd_build_packet(sc, lun, cmd, urq);
} else {
ub_cmd_build_block(sc, lun, cmd, urq);
}
cmd->state = UB_CMDST_INIT;
cmd->lun = lun;
cmd->done = ub_rw_cmd_done;
cmd->back = urq;
cmd->tag = sc->tagcnt++;
if (ub_submit_scsi(sc, cmd) != 0)
goto drop;
return 0;
drop:
ub_put_cmd(lun, cmd);
ub_end_rq(rq, DID_ERROR << 16);
return 0;
}
static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq)
{
struct request *rq = urq->rq;
unsigned int block, nblks;
if (rq_data_dir(rq) == WRITE)
cmd->dir = UB_DIR_WRITE;
else
cmd->dir = UB_DIR_READ;
cmd->nsg = urq->nsg;
memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
/*
* build the command
*
* The call to blk_queue_logical_block_size() guarantees that request
* is aligned, but it is given in terms of 512 byte units, always.
*/
block = blk_rq_pos(rq) >> lun->capacity.bshift;
nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
cmd->cdb[2] = block >> 24;
cmd->cdb[3] = block >> 16;
cmd->cdb[4] = block >> 8;
cmd->cdb[5] = block;
cmd->cdb[7] = nblks >> 8;
cmd->cdb[8] = nblks;
cmd->cdb_len = 10;
cmd->len = blk_rq_bytes(rq);
}
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq)
{
struct request *rq = urq->rq;
if (blk_rq_bytes(rq) == 0) {
cmd->dir = UB_DIR_NONE;
} else {
if (rq_data_dir(rq) == WRITE)
cmd->dir = UB_DIR_WRITE;
else
cmd->dir = UB_DIR_READ;
}
cmd->nsg = urq->nsg;
memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);
memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
cmd->cdb_len = rq->cmd_len;
cmd->len = blk_rq_bytes(rq);
/*
* To reapply this to every URB is not as incorrect as it looks.
* In return, we avoid any complicated tracking calculations.
*/
cmd->timeo = rq->timeout;
}
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct ub_lun *lun = cmd->lun;
struct ub_request *urq = cmd->back;
struct request *rq;
unsigned int scsi_status;
rq = urq->rq;
if (cmd->error == 0) {
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
if (cmd->act_len >= rq->resid_len)
rq->resid_len = 0;
else
rq->resid_len -= cmd->act_len;
scsi_status = 0;
} else {
if (cmd->act_len != cmd->len) {
scsi_status = SAM_STAT_CHECK_CONDITION;
} else {
scsi_status = 0;
}
}
} else {
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
rq->sense_len = UB_SENSE_SIZE;
if (sc->top_sense[0] != 0)
scsi_status = SAM_STAT_CHECK_CONDITION;
else
scsi_status = DID_ERROR << 16;
} else {
if (cmd->error == -EIO &&
(cmd->key == 0 ||
cmd->key == MEDIUM_ERROR ||
cmd->key == UNIT_ATTENTION)) {
if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0)
return;
}
scsi_status = SAM_STAT_CHECK_CONDITION;
}
}
urq->rq = NULL;
ub_put_cmd(lun, cmd);
ub_end_rq(rq, scsi_status);
blk_start_queue(lun->disk->queue);
}
static void ub_end_rq(struct request *rq, unsigned int scsi_status)
{
int error;
if (scsi_status == 0) {
error = 0;
} else {
error = -EIO;
rq->errors = scsi_status;
}
__blk_end_request_all(rq, error);
}
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd)
{
if (atomic_read(&sc->poison))
return -ENXIO;
ub_reset_enter(sc, urq->current_try);
if (urq->current_try >= 3)
return -EIO;
urq->current_try++;
/* Remove this if anyone complains of flooding. */
printk(KERN_DEBUG "%s: dir %c len/act %d/%d "
"[sense %x %02x %02x] retry %d\n",
sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len,
cmd->key, cmd->asc, cmd->ascq, urq->current_try);
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
ub_cmd_build_block(sc, lun, cmd, urq);
cmd->state = UB_CMDST_INIT;
cmd->lun = lun;
cmd->done = ub_rw_cmd_done;
cmd->back = urq;
cmd->tag = sc->tagcnt++;
#if 0 /* Wasteful */
return ub_submit_scsi(sc, cmd);
#else
ub_cmdq_add(sc, cmd);
return 0;
#endif
}
/*
* Submit a regular SCSI operation (not an auto-sense).
*
* The Iron Law of Good Submit Routine is:
* Zero return - callback is done, Nonzero return - callback is not done.
* No exceptions.
*
* Host is assumed locked.
*/
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
if (cmd->state != UB_CMDST_INIT ||
(cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
return -EINVAL;
}
ub_cmdq_add(sc, cmd);
/*
* We can call ub_scsi_dispatch(sc) right away here, but it's a little
* safer to jump to a tasklet, in case upper layers do something silly.
*/
tasklet_schedule(&sc->tasklet);
return 0;
}
/*
* Submit the first URB for the queued command.
* This function does not deal with queueing in any way.
*/
static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct bulk_cb_wrap *bcb;
int rc;
bcb = &sc->work_bcb;
/*
* ``If the allocation length is eighteen or greater, and a device
* server returns less than eithteen bytes of data, the application
* client should assume that the bytes not transferred would have been
* zeroes had the device server returned those bytes.''
*
* We zero sense for all commands so that when a packet request
* fails it does not return a stale sense.
*/
memset(&sc->top_sense, 0, UB_SENSE_SIZE);
/* set up the command wrapper */
bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
bcb->Tag = cmd->tag; /* Endianness is not important */
bcb->DataTransferLength = cpu_to_le32(cmd->len);
bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0;
bcb->Length = cmd->cdb_len;
/* copy the command payload */
memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
UB_INIT_COMPLETION(sc->work_done);
sc->last_pipe = sc->send_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
ub_complete(&sc->work_done);
return rc;
}
sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
add_timer(&sc->work_timer);
cmd->state = UB_CMDST_CMD;
return 0;
}
/*
* Timeout handler.
*/
static void ub_urb_timeout(unsigned long arg)
{
struct ub_dev *sc = (struct ub_dev *) arg;
unsigned long flags;
spin_lock_irqsave(sc->lock, flags);
if (!ub_is_completed(&sc->work_done))
usb_unlink_urb(&sc->work_urb);
spin_unlock_irqrestore(sc->lock, flags);
}
/*
* Completion routine for the work URB.
*
* This can be called directly from usb_submit_urb (while we have
* the sc->lock taken) and from an interrupt (while we do NOT have
* the sc->lock taken). Therefore, bounce this off to a tasklet.
*/
static void ub_urb_complete(struct urb *urb)
{
struct ub_dev *sc = urb->context;
ub_complete(&sc->work_done);
tasklet_schedule(&sc->tasklet);
}
static void ub_scsi_action(unsigned long _dev)
{
struct ub_dev *sc = (struct ub_dev *) _dev;
unsigned long flags;
spin_lock_irqsave(sc->lock, flags);
ub_scsi_dispatch(sc);
spin_unlock_irqrestore(sc->lock, flags);
}
static void ub_scsi_dispatch(struct ub_dev *sc)
{
struct ub_scsi_cmd *cmd;
int rc;
while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) {
if (cmd->state == UB_CMDST_DONE) {
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
} else if (cmd->state == UB_CMDST_INIT) {
if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
break;
cmd->error = rc;
cmd->state = UB_CMDST_DONE;
} else {
if (!ub_is_completed(&sc->work_done))
break;
del_timer(&sc->work_timer);
ub_scsi_urb_compl(sc, cmd);
}
}
}
static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct urb *urb = &sc->work_urb;
struct bulk_cs_wrap *bcs;
int endp;
int len;
int rc;
if (atomic_read(&sc->poison)) {
ub_state_done(sc, cmd, -ENODEV);
return;
}
endp = usb_pipeendpoint(sc->last_pipe);
if (usb_pipein(sc->last_pipe))
endp |= USB_DIR_IN;
if (cmd->state == UB_CMDST_CLEAR) {
if (urb->status == -EPIPE) {
/*
* STALL while clearning STALL.
* The control pipe clears itself - nothing to do.
*/
printk(KERN_NOTICE "%s: stall on control pipe\n",
sc->name);
goto Bad_End;
}
/*
* We ignore the result for the halt clear.
*/
usb_reset_endpoint(sc->dev, endp);
ub_state_sense(sc, cmd);
} else if (cmd->state == UB_CMDST_CLR2STS) {
if (urb->status == -EPIPE) {
printk(KERN_NOTICE "%s: stall on control pipe\n",
sc->name);
goto Bad_End;
}
/*
* We ignore the result for the halt clear.
*/
usb_reset_endpoint(sc->dev, endp);
ub_state_stat(sc, cmd);
} else if (cmd->state == UB_CMDST_CLRRS) {
if (urb->status == -EPIPE) {
printk(KERN_NOTICE "%s: stall on control pipe\n",
sc->name);
goto Bad_End;
}
/*
* We ignore the result for the halt clear.
*/
usb_reset_endpoint(sc->dev, endp);
ub_state_stat_counted(sc, cmd);
} else if (cmd->state == UB_CMDST_CMD) {
switch (urb->status) {
case 0:
break;
case -EOVERFLOW:
goto Bad_End;
case -EPIPE:
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
"unable to submit clear (%d)\n",
sc->name, rc);
/*
* This is typically ENOMEM or some other such shit.
* Retrying is pointless. Just do Bad End on it...
*/
ub_state_done(sc, cmd, rc);
return;
}
cmd->state = UB_CMDST_CLEAR;
return;
case -ESHUTDOWN: /* unplug */
case -EILSEQ: /* unplug timeout on uhci */
ub_state_done(sc, cmd, -ENODEV);
return;
default:
goto Bad_End;
}
if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
goto Bad_End;
}
if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) {
ub_state_stat(sc, cmd);
return;
}
// udelay(125); // usb-storage has this
ub_data_start(sc, cmd);
} else if (cmd->state == UB_CMDST_DATA) {
if (urb->status == -EPIPE) {
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
"unable to submit clear (%d)\n",
sc->name, rc);
ub_state_done(sc, cmd, rc);
return;
}
cmd->state = UB_CMDST_CLR2STS;
return;
}
if (urb->status == -EOVERFLOW) {
/*
* A babble? Failure, but we must transfer CSW now.
*/
cmd->error = -EOVERFLOW; /* A cheap trick... */
ub_state_stat(sc, cmd);
return;
}
if (cmd->dir == UB_DIR_WRITE) {
/*
* Do not continue writes in case of a failure.
* Doing so would cause sectors to be mixed up,
* which is worse than sectors lost.
*
* We must try to read the CSW, or many devices
* get confused.
*/
len = urb->actual_length;
if (urb->status != 0 ||
len != cmd->sgv[cmd->current_sg].length) {
cmd->act_len += len;
cmd->error = -EIO;
ub_state_stat(sc, cmd);
return;
}
} else {
/*
* If an error occurs on read, we record it, and
* continue to fetch data in order to avoid bubble.
*
* As a small shortcut, we stop if we detect that
* a CSW mixed into data.
*/
if (urb->status != 0)
cmd->error = -EIO;
len = urb->actual_length;
if (urb->status != 0 ||
len != cmd->sgv[cmd->current_sg].length) {
if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN)
goto Bad_End;
}
}
cmd->act_len += urb->actual_length;
if (++cmd->current_sg < cmd->nsg) {
ub_data_start(sc, cmd);
return;
}
ub_state_stat(sc, cmd);
} else if (cmd->state == UB_CMDST_STAT) {
if (urb->status == -EPIPE) {
rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
if (rc != 0) {
printk(KERN_NOTICE "%s: "
"unable to submit clear (%d)\n",
sc->name, rc);
ub_state_done(sc, cmd, rc);
return;
}
/*
* Having a stall when getting CSW is an error, so
* make sure uppper levels are not oblivious to it.
*/
cmd->error = -EIO; /* A cheap trick... */
cmd->state = UB_CMDST_CLRRS;
return;
}
/* Catch everything, including -EOVERFLOW and other nasties. */
if (urb->status != 0)
goto Bad_End;
if (urb->actual_length == 0) {
ub_state_stat_counted(sc, cmd);
return;
}
/*
* Check the returned Bulk protocol status.
* The status block has to be validated first.
*/
bcs = &sc->work_bcs;
if (sc->signature == cpu_to_le32(0)) {
/*
* This is the first reply, so do not perform the check.
* Instead, remember the signature the device uses
* for future checks. But do not allow a nul.
*/
sc->signature = bcs->Signature;
if (sc->signature == cpu_to_le32(0)) {
ub_state_stat_counted(sc, cmd);
return;
}
} else {
if (bcs->Signature != sc->signature) {
ub_state_stat_counted(sc, cmd);
return;
}
}
if (bcs->Tag != cmd->tag) {
/*
* This usually happens when we disagree with the
* device's microcode about something. For instance,
* a few of them throw this after timeouts. They buffer
* commands and reply at commands we timed out before.
* Without flushing these replies we loop forever.
*/
ub_state_stat_counted(sc, cmd);
return;
}
if (!sc->bad_resid) {
len = le32_to_cpu(bcs->Residue);
if (len != cmd->len - cmd->act_len) {
/*
* Only start ignoring if this cmd ended well.
*/
if (cmd->len == cmd->act_len) {
printk(KERN_NOTICE "%s: "
"bad residual %d of %d, ignoring\n",
sc->name, len, cmd->len);
sc->bad_resid = 1;
}
}
}
switch (bcs->Status) {
case US_BULK_STAT_OK:
break;
case US_BULK_STAT_FAIL:
ub_state_sense(sc, cmd);
return;
case US_BULK_STAT_PHASE:
goto Bad_End;
default:
printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
sc->name, bcs->Status);
ub_state_done(sc, cmd, -EINVAL);
return;
}
/* Not zeroing error to preserve a babble indicator */
if (cmd->error != 0) {
ub_state_sense(sc, cmd);
return;
}
cmd->state = UB_CMDST_DONE;
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
} else if (cmd->state == UB_CMDST_SENSE) {
ub_state_done(sc, cmd, -EIO);
} else {
printk(KERN_WARNING "%s: wrong command state %d\n",
sc->name, cmd->state);
ub_state_done(sc, cmd, -EINVAL);
return;
}
return;
Bad_End: /* Little Excel is dead */
ub_state_done(sc, cmd, -EIO);
}
/*
* Factorization helper for the command state machine:
* Initiate a data segment transfer.
*/
static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct scatterlist *sg = &cmd->sgv[cmd->current_sg];
int pipe;
int rc;
UB_INIT_COMPLETION(sc->work_done);
if (cmd->dir == UB_DIR_READ)
pipe = sc->recv_bulk_pipe;
else
pipe = sc->send_bulk_pipe;
sc->last_pipe = pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg),
sg->length, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return;
}
if (cmd->timeo)
sc->work_timer.expires = jiffies + cmd->timeo;
else
sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
add_timer(&sc->work_timer);
cmd->state = UB_CMDST_DATA;
}
/*
* Factorization helper for the command state machine:
* Finish the command.
*/
static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
{
cmd->error = rc;
cmd->state = UB_CMDST_DONE;
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
}
/*
* Factorization helper for the command state machine:
* Submit a CSW read.
*/
static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
int rc;
UB_INIT_COMPLETION(sc->work_done);
sc->last_pipe = sc->recv_bulk_pipe;
usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
&sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
/* XXX Clear stalls */
ub_complete(&sc->work_done);
ub_state_done(sc, cmd, rc);
return -1;
}
if (cmd->timeo)
sc->work_timer.expires = jiffies + cmd->timeo;
else
sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
add_timer(&sc->work_timer);
return 0;
}
/*
* Factorization helper for the command state machine:
* Submit a CSW read and go to STAT state.
*/
static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
if (__ub_state_stat(sc, cmd) != 0)
return;
cmd->stat_count = 0;
cmd->state = UB_CMDST_STAT;
}
/*
* Factorization helper for the command state machine:
* Submit a CSW read and go to STAT state with counter (along [C] path).
*/
static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
if (++cmd->stat_count >= 4) {
ub_state_sense(sc, cmd);
return;
}
if (__ub_state_stat(sc, cmd) != 0)
return;
cmd->state = UB_CMDST_STAT;
}
/*
* Factorization helper for the command state machine:
* Submit a REQUEST SENSE and go to SENSE state.
*/
static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct ub_scsi_cmd *scmd;
struct scatterlist *sg;
int rc;
if (cmd->cdb[0] == REQUEST_SENSE) {
rc = -EPIPE;
goto error;
}
scmd = &sc->top_rqs_cmd;
memset(scmd, 0, sizeof(struct ub_scsi_cmd));
scmd->cdb[0] = REQUEST_SENSE;
scmd->cdb[4] = UB_SENSE_SIZE;
scmd->cdb_len = 6;
scmd->dir = UB_DIR_READ;
scmd->state = UB_CMDST_INIT;
scmd->nsg = 1;
sg = &scmd->sgv[0];
sg_init_table(sg, UB_MAX_REQ_SG);
sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE,
(unsigned long)sc->top_sense & (PAGE_SIZE-1));
scmd->len = UB_SENSE_SIZE;
scmd->lun = cmd->lun;
scmd->done = ub_top_sense_done;
scmd->back = cmd;
scmd->tag = sc->tagcnt++;
cmd->state = UB_CMDST_SENSE;
ub_cmdq_insert(sc, scmd);
return;
error:
ub_state_done(sc, cmd, rc);
}
/*
* A helper for the command's state machine:
* Submit a stall clear.
*/
static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
int stalled_pipe)
{
int endp;
struct usb_ctrlrequest *cr;
int rc;
endp = usb_pipeendpoint(stalled_pipe);
if (usb_pipein (stalled_pipe))
endp |= USB_DIR_IN;
cr = &sc->work_cr;
cr->bRequestType = USB_RECIP_ENDPOINT;
cr->bRequest = USB_REQ_CLEAR_FEATURE;
cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
cr->wIndex = cpu_to_le16(endp);
cr->wLength = cpu_to_le16(0);
UB_INIT_COMPLETION(sc->work_done);
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
ub_complete(&sc->work_done);
return rc;
}
sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
add_timer(&sc->work_timer);
return 0;
}
/*
*/
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
{
unsigned char *sense = sc->top_sense;
struct ub_scsi_cmd *cmd;
/*
* Find the command which triggered the unit attention or a check,
* save the sense into it, and advance its state machine.
*/
if ((cmd = ub_cmdq_peek(sc)) == NULL) {
printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
return;
}
if (cmd != scmd->back) {
printk(KERN_WARNING "%s: "
"sense done for wrong command 0x%x\n",
sc->name, cmd->tag);
return;
}
if (cmd->state != UB_CMDST_SENSE) {
printk(KERN_WARNING "%s: sense done with bad cmd state %d\n",
sc->name, cmd->state);
return;
}
/*
* Ignoring scmd->act_len, because the buffer was pre-zeroed.
*/
cmd->key = sense[2] & 0x0F;
cmd->asc = sense[12];
cmd->ascq = sense[13];
ub_scsi_urb_compl(sc, cmd);
}
/*
* Reset management
*/
static void ub_reset_enter(struct ub_dev *sc, int try)
{
if (sc->reset) {
/* This happens often on multi-LUN devices. */
return;
}
sc->reset = try + 1;
#if 0 /* Not needed because the disconnect waits for us. */
unsigned long flags;
spin_lock_irqsave(&ub_lock, flags);
sc->openc++;
spin_unlock_irqrestore(&ub_lock, flags);
#endif
#if 0 /* We let them stop themselves. */
struct ub_lun *lun;
list_for_each_entry(lun, &sc->luns, link) {
blk_stop_queue(lun->disk->queue);
}
#endif
schedule_work(&sc->reset_work);
}
static void ub_reset_task(struct work_struct *work)
{
struct ub_dev *sc = container_of(work, struct ub_dev, reset_work);
unsigned long flags;
struct ub_lun *lun;
int rc;
if (!sc->reset) {
printk(KERN_WARNING "%s: Running reset unrequested\n",
sc->name);
return;
}
if (atomic_read(&sc->poison)) {
;
} else if ((sc->reset & 1) == 0) {
ub_sync_reset(sc);
msleep(700); /* usb-storage sleeps 6s (!) */
ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
ub_probe_clear_stall(sc, sc->send_bulk_pipe);
} else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
;
} else {
rc = usb_lock_device_for_reset(sc->dev, sc->intf);
if (rc < 0) {
printk(KERN_NOTICE
"%s: usb_lock_device_for_reset failed (%d)\n",
sc->name, rc);
} else {
rc = usb_reset_device(sc->dev);
if (rc < 0) {
printk(KERN_NOTICE "%s: "
"usb_lock_device_for_reset failed (%d)\n",
sc->name, rc);
}
usb_unlock_device(sc->dev);
}
}
/*
* In theory, no commands can be running while reset is active,
* so nobody can ask for another reset, and so we do not need any
* queues of resets or anything. We do need a spinlock though,
* to interact with block layer.
*/
spin_lock_irqsave(sc->lock, flags);
sc->reset = 0;
tasklet_schedule(&sc->tasklet);
list_for_each_entry(lun, &sc->luns, link) {
blk_start_queue(lun->disk->queue);
}
wake_up(&sc->reset_wait);
spin_unlock_irqrestore(sc->lock, flags);
}
/*
* XXX Reset brackets are too much hassle to implement, so just stub them
* in order to prevent forced unbinding (which deadlocks solid when our
* ->disconnect method waits for the reset to complete and this kills keventd).
*
* XXX Tell Alan to move usb_unlock_device inside of usb_reset_device,
* or else the post_reset is invoked, and restats I/O on a locked device.
*/
static int ub_pre_reset(struct usb_interface *iface) {
return 0;
}
static int ub_post_reset(struct usb_interface *iface) {
return 0;
}
/*
* This is called from a process context.
*/
static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun)
{
lun->readonly = 0; /* XXX Query this from the device */
lun->capacity.nsec = 0;
lun->capacity.bsize = 512;
lun->capacity.bshift = 0;
if (ub_sync_tur(sc, lun) != 0)
return; /* Not ready */
lun->changed = 0;
if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
/*
* The retry here means something is wrong, either with the
* device, with the transport, or with our code.
* We keep this because sd.c has retries for capacity.
*/
if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) {
lun->capacity.nsec = 0;
lun->capacity.bsize = 512;
lun->capacity.bshift = 0;
}
}
}
/*
* The open funcion.
* This is mostly needed to keep refcounting, but also to support
* media checks on removable media drives.
*/
static int ub_bd_open(struct block_device *bdev, fmode_t mode)
{
struct ub_lun *lun = bdev->bd_disk->private_data;
struct ub_dev *sc = lun->udev;
unsigned long flags;
int rc;
spin_lock_irqsave(&ub_lock, flags);
if (atomic_read(&sc->poison)) {
spin_unlock_irqrestore(&ub_lock, flags);
return -ENXIO;
}
sc->openc++;
spin_unlock_irqrestore(&ub_lock, flags);
if (lun->removable || lun->readonly)
check_disk_change(bdev);
/*
* The sd.c considers ->media_present and ->changed not equivalent,
* under some pretty murky conditions (a failure of READ CAPACITY).
* We may need it one day.
*/
if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) {
rc = -ENOMEDIUM;
goto err_open;
}
if (lun->readonly && (mode & FMODE_WRITE)) {
rc = -EROFS;
goto err_open;
}
return 0;
err_open:
ub_put(sc);
return rc;
}
static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode)
{
int ret;
mutex_lock(&ub_mutex);
ret = ub_bd_open(bdev, mode);
mutex_unlock(&ub_mutex);
return ret;
}
/*
*/
static int ub_bd_release(struct gendisk *disk, fmode_t mode)
{
struct ub_lun *lun = disk->private_data;
struct ub_dev *sc = lun->udev;
mutex_lock(&ub_mutex);
ub_put(sc);
mutex_unlock(&ub_mutex);
return 0;
}
/*
* The ioctl interface.
*/
static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
void __user *usermem = (void __user *) arg;
int ret;
mutex_lock(&ub_mutex);
ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
mutex_unlock(&ub_mutex);
return ret;
}
/*
* This is called by check_disk_change if we reported a media change.
* The main onjective here is to discover the features of the media such as
* the capacity, read-only status, etc. USB storage generally does not
* need to be spun up, but if we needed it, this would be the place.
*
* This call can sleep.
*
* The return code is not used.
*/
static int ub_bd_revalidate(struct gendisk *disk)
{
struct ub_lun *lun = disk->private_data;
ub_revalidate(lun->udev, lun);
/* XXX Support sector size switching like in sr.c */
blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
set_capacity(disk, lun->capacity.nsec);
// set_disk_ro(sdkp->disk, lun->readonly);
return 0;
}
/*
* The check is called by the block layer to verify if the media
* is still available. It is supposed to be harmless, lightweight and
* non-intrusive in case the media was not changed.
*
* This call can sleep.
*
* The return code is bool!
*/
static unsigned int ub_bd_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct ub_lun *lun = disk->private_data;
if (!lun->removable)
return 0;
/*
* We clean checks always after every command, so this is not
* as dangerous as it looks. If the TEST_UNIT_READY fails here,
* the device is actually not ready with operator or software
* intervention required. One dangerous item might be a drive which
* spins itself down, and come the time to write dirty pages, this
* will fail, then block layer discards the data. Since we never
* spin drives up, such devices simply cannot be used with ub anyway.
*/
if (ub_sync_tur(lun->udev, lun) != 0) {
lun->changed = 1;
return DISK_EVENT_MEDIA_CHANGE;
}
return lun->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static const struct block_device_operations ub_bd_fops = {
.owner = THIS_MODULE,
.open = ub_bd_unlocked_open,
.release = ub_bd_release,
.ioctl = ub_bd_ioctl,
.check_events = ub_bd_check_events,
.revalidate_disk = ub_bd_revalidate,
};
/*
* Common ->done routine for commands executed synchronously.
*/
static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
struct completion *cop = cmd->back;
complete(cop);
}
/*
* Test if the device has a check condition on it, synchronously.
*/
static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun)
{
struct ub_scsi_cmd *cmd;
enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
unsigned long flags;
struct completion compl;
int rc;
init_completion(&compl);
rc = -ENOMEM;
if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
goto err_alloc;
cmd->cdb[0] = TEST_UNIT_READY;
cmd->cdb_len = 6;
cmd->dir = UB_DIR_NONE;
cmd->state = UB_CMDST_INIT;
cmd->lun = lun; /* This may be NULL, but that's ok */
cmd->done = ub_probe_done;
cmd->back = &compl;
spin_lock_irqsave(sc->lock, flags);
cmd->tag = sc->tagcnt++;
rc = ub_submit_scsi(sc, cmd);
spin_unlock_irqrestore(sc->lock, flags);
if (rc != 0)
goto err_submit;
wait_for_completion(&compl);
rc = cmd->error;
if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */
rc = cmd->key;
err_submit:
kfree(cmd);
err_alloc:
return rc;
}
/*
* Read the SCSI capacity synchronously (for probing).
*/
static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
struct ub_capacity *ret)
{
struct ub_scsi_cmd *cmd;
struct scatterlist *sg;
char *p;
enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
unsigned long flags;
unsigned int bsize, shift;
unsigned long nsec;
struct completion compl;
int rc;
init_completion(&compl);
rc = -ENOMEM;
if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
goto err_alloc;
p = (char *)cmd + sizeof(struct ub_scsi_cmd);
cmd->cdb[0] = 0x25;
cmd->cdb_len = 10;
cmd->dir = UB_DIR_READ;
cmd->state = UB_CMDST_INIT;
cmd->nsg = 1;
sg = &cmd->sgv[0];
sg_init_table(sg, UB_MAX_REQ_SG);
sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1));
cmd->len = 8;
cmd->lun = lun;
cmd->done = ub_probe_done;
cmd->back = &compl;
spin_lock_irqsave(sc->lock, flags);
cmd->tag = sc->tagcnt++;
rc = ub_submit_scsi(sc, cmd);
spin_unlock_irqrestore(sc->lock, flags);
if (rc != 0)
goto err_submit;
wait_for_completion(&compl);
if (cmd->error != 0) {
rc = -EIO;
goto err_read;
}
if (cmd->act_len != 8) {
rc = -EIO;
goto err_read;
}
/* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
nsec = be32_to_cpu(*(__be32 *)p) + 1;
bsize = be32_to_cpu(*(__be32 *)(p + 4));
switch (bsize) {
case 512: shift = 0; break;
case 1024: shift = 1; break;
case 2048: shift = 2; break;
case 4096: shift = 3; break;
default:
rc = -EDOM;
goto err_inv_bsize;
}
ret->bsize = bsize;
ret->bshift = shift;
ret->nsec = nsec << shift;
rc = 0;
err_inv_bsize:
err_read:
err_submit:
kfree(cmd);
err_alloc:
return rc;
}
/*
*/
static void ub_probe_urb_complete(struct urb *urb)
{
struct completion *cop = urb->context;
complete(cop);
}
static void ub_probe_timeout(unsigned long arg)
{
struct completion *cop = (struct completion *) arg;
complete(cop);
}
/*
* Reset with a Bulk reset.
*/
static int ub_sync_reset(struct ub_dev *sc)
{
int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
struct usb_ctrlrequest *cr;
struct completion compl;
struct timer_list timer;
int rc;
init_completion(&compl);
cr = &sc->work_cr;
cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
cr->bRequest = US_BULK_RESET_REQUEST;
cr->wValue = cpu_to_le16(0);
cr->wIndex = cpu_to_le16(ifnum);
cr->wLength = cpu_to_le16(0);
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
printk(KERN_WARNING
"%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
return rc;
}
init_timer(&timer);
timer.function = ub_probe_timeout;
timer.data = (unsigned long) &compl;
timer.expires = jiffies + UB_CTRL_TIMEOUT;
add_timer(&timer);
wait_for_completion(&compl);
del_timer_sync(&timer);
usb_kill_urb(&sc->work_urb);
return sc->work_urb.status;
}
/*
* Get number of LUNs by the way of Bulk GetMaxLUN command.
*/
static int ub_sync_getmaxlun(struct ub_dev *sc)
{
int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
unsigned char *p;
enum { ALLOC_SIZE = 1 };
struct usb_ctrlrequest *cr;
struct completion compl;
struct timer_list timer;
int nluns;
int rc;
init_completion(&compl);
rc = -ENOMEM;
if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
goto err_alloc;
*p = 55;
cr = &sc->work_cr;
cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE;
cr->bRequest = US_BULK_GET_MAX_LUN;
cr->wValue = cpu_to_le16(0);
cr->wIndex = cpu_to_le16(ifnum);
cr->wLength = cpu_to_le16(1);
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe,
(unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0)
goto err_submit;
init_timer(&timer);
timer.function = ub_probe_timeout;
timer.data = (unsigned long) &compl;
timer.expires = jiffies + UB_CTRL_TIMEOUT;
add_timer(&timer);
wait_for_completion(&compl);
del_timer_sync(&timer);
usb_kill_urb(&sc->work_urb);
if ((rc = sc->work_urb.status) < 0)
goto err_io;
if (sc->work_urb.actual_length != 1) {
nluns = 0;
} else {
if ((nluns = *p) == 55) {
nluns = 0;
} else {
/* GetMaxLUN returns the maximum LUN number */
nluns += 1;
if (nluns > UB_MAX_LUNS)
nluns = UB_MAX_LUNS;
}
}
kfree(p);
return nluns;
err_io:
err_submit:
kfree(p);
err_alloc:
return rc;
}
/*
* Clear initial stalls.
*/
static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
{
int endp;
struct usb_ctrlrequest *cr;
struct completion compl;
struct timer_list timer;
int rc;
init_completion(&compl);
endp = usb_pipeendpoint(stalled_pipe);
if (usb_pipein (stalled_pipe))
endp |= USB_DIR_IN;
cr = &sc->work_cr;
cr->bRequestType = USB_RECIP_ENDPOINT;
cr->bRequest = USB_REQ_CLEAR_FEATURE;
cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
cr->wIndex = cpu_to_le16(endp);
cr->wLength = cpu_to_le16(0);
usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
(unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
printk(KERN_WARNING
"%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
return rc;
}
init_timer(&timer);
timer.function = ub_probe_timeout;
timer.data = (unsigned long) &compl;
timer.expires = jiffies + UB_CTRL_TIMEOUT;
add_timer(&timer);
wait_for_completion(&compl);
del_timer_sync(&timer);
usb_kill_urb(&sc->work_urb);
usb_reset_endpoint(sc->dev, endp);
return 0;
}
/*
* Get the pipe settings.
*/
static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
struct usb_interface *intf)
{
struct usb_host_interface *altsetting = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_in = NULL;
struct usb_endpoint_descriptor *ep_out = NULL;
struct usb_endpoint_descriptor *ep;
int i;
/*
* Find the endpoints we need.
* We are expecting a minimum of 2 endpoints - in and out (bulk).
* We will ignore any others.
*/
for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
ep = &altsetting->endpoint[i].desc;
/* Is it a BULK endpoint? */
if (usb_endpoint_xfer_bulk(ep)) {
/* BULK in or out? */
if (usb_endpoint_dir_in(ep)) {
if (ep_in == NULL)
ep_in = ep;
} else {
if (ep_out == NULL)
ep_out = ep;
}
}
}
if (ep_in == NULL || ep_out == NULL) {
printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name);
return -ENODEV;
}
/* Calculate and store the pipe values */
sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
sc->send_bulk_pipe = usb_sndbulkpipe(dev,
usb_endpoint_num(ep_out));
sc->recv_bulk_pipe = usb_rcvbulkpipe(dev,
usb_endpoint_num(ep_in));
return 0;
}
/*
* Probing is done in the process context, which allows us to cheat
* and not to build a state machine for the discovery.
*/
static int ub_probe(struct usb_interface *intf,
const struct usb_device_id *dev_id)
{
struct ub_dev *sc;
int nluns;
int rc;
int i;
if (usb_usual_check_type(dev_id, USB_US_TYPE_UB))
return -ENXIO;
rc = -ENOMEM;
if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
goto err_core;
sc->lock = ub_next_lock();
INIT_LIST_HEAD(&sc->luns);
usb_init_urb(&sc->work_urb);
tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
atomic_set(&sc->poison, 0);
INIT_WORK(&sc->reset_work, ub_reset_task);
init_waitqueue_head(&sc->reset_wait);
init_timer(&sc->work_timer);
sc->work_timer.data = (unsigned long) sc;
sc->work_timer.function = ub_urb_timeout;
ub_init_completion(&sc->work_done);
sc->work_done.done = 1; /* A little yuk, but oh well... */
sc->dev = interface_to_usbdev(intf);
sc->intf = intf;
// sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
usb_set_intfdata(intf, sc);
usb_get_dev(sc->dev);
/*
* Since we give the interface struct to the block level through
* disk->driverfs_dev, we have to pin it. Otherwise, block_uevent
* oopses on close after a disconnect (kernels 2.6.16 and up).
*/
usb_get_intf(sc->intf);
snprintf(sc->name, 12, DRV_NAME "(%d.%d)",
sc->dev->bus->busnum, sc->dev->devnum);
/* XXX Verify that we can handle the device (from descriptors) */
if (ub_get_pipes(sc, sc->dev, intf) != 0)
goto err_dev_desc;
/*
* At this point, all USB initialization is done, do upper layer.
* We really hate halfway initialized structures, so from the
* invariants perspective, this ub_dev is fully constructed at
* this point.
*/
/*
* This is needed to clear toggles. It is a problem only if we do
* `rmmod ub && modprobe ub` without disconnects, but we like that.
*/
#if 0 /* iPod Mini fails if we do this (big white iPod works) */
ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
ub_probe_clear_stall(sc, sc->send_bulk_pipe);
#endif
/*
* The way this is used by the startup code is a little specific.
* A SCSI check causes a USB stall. Our common case code sees it
* and clears the check, after which the device is ready for use.
* But if a check was not present, any command other than
* TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
*
* If we neglect to clear the SCSI check, the first real command fails
* (which is the capacity readout). We clear that and retry, but why
* causing spurious retries for no reason.
*
* Revalidation may start with its own TEST_UNIT_READY, but that one
* has to succeed, so we clear checks with an additional one here.
* In any case it's not our business how revaliadation is implemented.
*/
for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */
if ((rc = ub_sync_tur(sc, NULL)) <= 0) break;
if (rc != 0x6) break;
msleep(10);
}
nluns = 1;
for (i = 0; i < 3; i++) {
if ((rc = ub_sync_getmaxlun(sc)) < 0)
break;
if (rc != 0) {
nluns = rc;
break;
}
msleep(100);
}
for (i = 0; i < nluns; i++) {
ub_probe_lun(sc, i);
}
return 0;
err_dev_desc:
usb_set_intfdata(intf, NULL);
usb_put_intf(sc->intf);
usb_put_dev(sc->dev);
kfree(sc);
err_core:
return rc;
}
static int ub_probe_lun(struct ub_dev *sc, int lnum)
{
struct ub_lun *lun;
struct request_queue *q;
struct gendisk *disk;
int rc;
rc = -ENOMEM;
if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
goto err_alloc;
lun->num = lnum;
rc = -ENOSR;
if ((lun->id = ub_id_get()) == -1)
goto err_id;
lun->udev = sc;
snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)",
lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num);
lun->removable = 1; /* XXX Query this from the device */
lun->changed = 1; /* ub_revalidate clears only */
ub_revalidate(sc, lun);
rc = -ENOMEM;
if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL)
goto err_diskalloc;
sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a');
disk->major = UB_MAJOR;
disk->first_minor = lun->id * UB_PARTS_PER_LUN;
disk->fops = &ub_bd_fops;
disk->private_data = lun;
disk->driverfs_dev = &sc->intf->dev;
rc = -ENOMEM;
if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
goto err_blkqinit;
disk->queue = q;
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
blk_queue_max_segments(q, UB_MAX_REQ_SG);
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_hw_sectors(q, UB_MAX_SECTORS);
blk_queue_logical_block_size(q, lun->capacity.bsize);
lun->disk = disk;
q->queuedata = lun;
list_add(&lun->link, &sc->luns);
set_capacity(disk, lun->capacity.nsec);
if (lun->removable)
disk->flags |= GENHD_FL_REMOVABLE;
add_disk(disk);
return 0;
err_blkqinit:
put_disk(disk);
err_diskalloc:
ub_id_put(lun->id);
err_id:
kfree(lun);
err_alloc:
return rc;
}
static void ub_disconnect(struct usb_interface *intf)
{
struct ub_dev *sc = usb_get_intfdata(intf);
struct ub_lun *lun;
unsigned long flags;
/*
* Prevent ub_bd_release from pulling the rug from under us.
* XXX This is starting to look like a kref.
* XXX Why not to take this ref at probe time?
*/
spin_lock_irqsave(&ub_lock, flags);
sc->openc++;
spin_unlock_irqrestore(&ub_lock, flags);
/*
* Fence stall clearings, operations triggered by unlinkings and so on.
* We do not attempt to unlink any URBs, because we do not trust the
* unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
*/
atomic_set(&sc->poison, 1);
/*
* Wait for reset to end, if any.
*/
wait_event(sc->reset_wait, !sc->reset);
/*
* Blow away queued commands.
*
* Actually, this never works, because before we get here
* the HCD terminates outstanding URB(s). It causes our
* SCSI command queue to advance, commands fail to submit,
* and the whole queue drains. So, we just use this code to
* print warnings.
*/
spin_lock_irqsave(sc->lock, flags);
{
struct ub_scsi_cmd *cmd;
int cnt = 0;
while ((cmd = ub_cmdq_peek(sc)) != NULL) {
cmd->error = -ENOTCONN;
cmd->state = UB_CMDST_DONE;
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
cnt++;
}
if (cnt != 0) {
printk(KERN_WARNING "%s: "
"%d was queued after shutdown\n", sc->name, cnt);
}
}
spin_unlock_irqrestore(sc->lock, flags);
/*
* Unregister the upper layer.
*/
list_for_each_entry(lun, &sc->luns, link) {
del_gendisk(lun->disk);
/*
* I wish I could do:
* queue_flag_set(QUEUE_FLAG_DEAD, q);
* As it is, we rely on our internal poisoning and let
* the upper levels to spin furiously failing all the I/O.
*/
}
/*
* Testing for -EINPROGRESS is always a bug, so we are bending
* the rules a little.
*/
spin_lock_irqsave(sc->lock, flags);
if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
printk(KERN_WARNING "%s: "
"URB is active after disconnect\n", sc->name);
}
spin_unlock_irqrestore(sc->lock, flags);
/*
* There is virtually no chance that other CPU runs a timeout so long
* after ub_urb_complete should have called del_timer, but only if HCD
* didn't forget to deliver a callback on unlink.
*/
del_timer_sync(&sc->work_timer);
/*
* At this point there must be no commands coming from anyone
* and no URBs left in transit.
*/
ub_put(sc);
}
static struct usb_driver ub_driver = {
.name = "ub",
.probe = ub_probe,
.disconnect = ub_disconnect,
.id_table = ub_usb_ids,
.pre_reset = ub_pre_reset,
.post_reset = ub_post_reset,
};
static int __init ub_init(void)
{
int rc;
int i;
pr_info("'Low Performance USB Block' driver is deprecated. "
"Please switch to usb-storage\n");
for (i = 0; i < UB_QLOCK_NUM; i++)
spin_lock_init(&ub_qlockv[i]);
if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
goto err_regblkdev;
if ((rc = usb_register(&ub_driver)) != 0)
goto err_register;
usb_usual_set_present(USB_US_TYPE_UB);
return 0;
err_register:
unregister_blkdev(UB_MAJOR, DRV_NAME);
err_regblkdev:
return rc;
}
static void __exit ub_exit(void)
{
usb_deregister(&ub_driver);
unregister_blkdev(UB_MAJOR, DRV_NAME);
usb_usual_clear_present(USB_US_TYPE_UB);
}
module_init(ub_init);
module_exit(ub_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/android_kernel_sony_apq8064 | drivers/misc/hmc6352.c | 7239 | 4099 | /*
* hmc6352.c - Honeywell Compass Driver
*
* Copyright (C) 2009 Intel Corp
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
static DEFINE_MUTEX(compass_mutex);
static int compass_command(struct i2c_client *c, u8 cmd)
{
int ret = i2c_master_send(c, &cmd, 1);
if (ret < 0)
dev_warn(&c->dev, "command '%c' failed.\n", cmd);
return ret;
}
static int compass_store(struct device *dev, const char *buf, size_t count,
const char *map)
{
struct i2c_client *c = to_i2c_client(dev);
int ret;
unsigned long val;
if (strict_strtoul(buf, 10, &val))
return -EINVAL;
if (val >= strlen(map))
return -EINVAL;
mutex_lock(&compass_mutex);
ret = compass_command(c, map[val]);
mutex_unlock(&compass_mutex);
if (ret < 0)
return ret;
return count;
}
static ssize_t compass_calibration_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return compass_store(dev, buf, count, "EC");
}
static ssize_t compass_power_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return compass_store(dev, buf, count, "SW");
}
static ssize_t compass_heading_data_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char i2c_data[2];
int ret;
mutex_lock(&compass_mutex);
ret = compass_command(client, 'A');
if (ret != 1) {
mutex_unlock(&compass_mutex);
return ret;
}
msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */
ret = i2c_master_recv(client, i2c_data, 2);
mutex_unlock(&compass_mutex);
if (ret < 0) {
dev_warn(dev, "i2c read data cmd failed\n");
return ret;
}
ret = (i2c_data[0] << 8) | i2c_data[1];
return sprintf(buf, "%d.%d\n", ret/10, ret%10);
}
static DEVICE_ATTR(heading0_input, S_IRUGO, compass_heading_data_show, NULL);
static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store);
static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store);
static struct attribute *mid_att_compass[] = {
&dev_attr_heading0_input.attr,
&dev_attr_calibration.attr,
&dev_attr_power_state.attr,
NULL
};
static const struct attribute_group m_compass_gr = {
.name = "hmc6352",
.attrs = mid_att_compass
};
static int hmc6352_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int res;
res = sysfs_create_group(&client->dev.kobj, &m_compass_gr);
if (res) {
dev_err(&client->dev, "device_create_file failed\n");
return res;
}
dev_info(&client->dev, "%s HMC6352 compass chip found\n",
client->name);
return 0;
}
static int hmc6352_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
return 0;
}
static struct i2c_device_id hmc6352_id[] = {
{ "hmc6352", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, hmc6352_id);
static struct i2c_driver hmc6352_driver = {
.driver = {
.name = "hmc6352",
},
.probe = hmc6352_probe,
.remove = hmc6352_remove,
.id_table = hmc6352_id,
};
module_i2c_driver(hmc6352_driver);
MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com");
MODULE_DESCRIPTION("hmc6352 Compass Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Lukiqq/GT-I9100-Galaxian-ICS-Kernel | fs/dlm/main.c | 8007 | 1998 | /******************************************************************************
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
** of the GNU General Public License v.2.
**
*******************************************************************************
******************************************************************************/
#include "dlm_internal.h"
#include "lockspace.h"
#include "lock.h"
#include "user.h"
#include "memory.h"
#include "config.h"
static int __init init_dlm(void)
{
int error;
error = dlm_memory_init();
if (error)
goto out;
error = dlm_lockspace_init();
if (error)
goto out_mem;
error = dlm_config_init();
if (error)
goto out_lockspace;
error = dlm_register_debugfs();
if (error)
goto out_config;
error = dlm_user_init();
if (error)
goto out_debug;
error = dlm_netlink_init();
if (error)
goto out_user;
error = dlm_plock_init();
if (error)
goto out_netlink;
printk("DLM installed\n");
return 0;
out_netlink:
dlm_netlink_exit();
out_user:
dlm_user_exit();
out_debug:
dlm_unregister_debugfs();
out_config:
dlm_config_exit();
out_lockspace:
dlm_lockspace_exit();
out_mem:
dlm_memory_exit();
out:
return error;
}
static void __exit exit_dlm(void)
{
dlm_plock_exit();
dlm_netlink_exit();
dlm_user_exit();
dlm_config_exit();
dlm_memory_exit();
dlm_lockspace_exit();
dlm_unregister_debugfs();
}
module_init(init_dlm);
module_exit(exit_dlm);
MODULE_DESCRIPTION("Distributed Lock Manager");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL_GPL(dlm_new_lockspace);
EXPORT_SYMBOL_GPL(dlm_release_lockspace);
EXPORT_SYMBOL_GPL(dlm_lock);
EXPORT_SYMBOL_GPL(dlm_unlock);
| gpl-2.0 |
benschhold/android_kernel_oneplus_msm8994_custom | drivers/media/rc/keymaps/rc-trekstor.c | 9543 | 2633 | /*
* TrekStor remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* TrekStor DVB-T USB Stick remote controller. */
/* Imported from af9015.h.
Initial keytable was from Marc Schneider <macke@macke.org> */
static struct rc_map_table trekstor[] = {
{ 0x0084, KEY_0 },
{ 0x0085, KEY_MUTE }, /* Mute */
{ 0x0086, KEY_HOMEPAGE }, /* Home */
{ 0x0087, KEY_UP }, /* Up */
{ 0x0088, KEY_OK }, /* OK */
{ 0x0089, KEY_RIGHT }, /* Right */
{ 0x008a, KEY_FASTFORWARD }, /* Fast forward */
{ 0x008b, KEY_VOLUMEUP }, /* Volume + */
{ 0x008c, KEY_DOWN }, /* Down */
{ 0x008d, KEY_PLAY }, /* Play/Pause */
{ 0x008e, KEY_STOP }, /* Stop */
{ 0x008f, KEY_EPG }, /* Info/EPG */
{ 0x0090, KEY_7 },
{ 0x0091, KEY_4 },
{ 0x0092, KEY_1 },
{ 0x0093, KEY_CHANNELDOWN }, /* Channel - */
{ 0x0094, KEY_8 },
{ 0x0095, KEY_5 },
{ 0x0096, KEY_2 },
{ 0x0097, KEY_CHANNELUP }, /* Channel + */
{ 0x0098, KEY_9 },
{ 0x0099, KEY_6 },
{ 0x009a, KEY_3 },
{ 0x009b, KEY_VOLUMEDOWN }, /* Volume - */
{ 0x009c, KEY_TV }, /* TV */
{ 0x009d, KEY_RECORD }, /* Record */
{ 0x009e, KEY_REWIND }, /* Rewind */
{ 0x009f, KEY_LEFT }, /* Left */
};
static struct rc_map_list trekstor_map = {
.map = {
.scan = trekstor,
.size = ARRAY_SIZE(trekstor),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_TREKSTOR,
}
};
static int __init init_rc_map_trekstor(void)
{
return rc_map_register(&trekstor_map);
}
static void __exit exit_rc_map_trekstor(void)
{
rc_map_unregister(&trekstor_map);
}
module_init(init_rc_map_trekstor)
module_exit(exit_rc_map_trekstor)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
| gpl-2.0 |
vincentmli/linux-stable | drivers/media/rc/keymaps/rc-trekstor.c | 9543 | 2633 | /*
* TrekStor remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <media/rc-map.h>
#include <linux/module.h>
/* TrekStor DVB-T USB Stick remote controller. */
/* Imported from af9015.h.
Initial keytable was from Marc Schneider <macke@macke.org> */
static struct rc_map_table trekstor[] = {
{ 0x0084, KEY_0 },
{ 0x0085, KEY_MUTE }, /* Mute */
{ 0x0086, KEY_HOMEPAGE }, /* Home */
{ 0x0087, KEY_UP }, /* Up */
{ 0x0088, KEY_OK }, /* OK */
{ 0x0089, KEY_RIGHT }, /* Right */
{ 0x008a, KEY_FASTFORWARD }, /* Fast forward */
{ 0x008b, KEY_VOLUMEUP }, /* Volume + */
{ 0x008c, KEY_DOWN }, /* Down */
{ 0x008d, KEY_PLAY }, /* Play/Pause */
{ 0x008e, KEY_STOP }, /* Stop */
{ 0x008f, KEY_EPG }, /* Info/EPG */
{ 0x0090, KEY_7 },
{ 0x0091, KEY_4 },
{ 0x0092, KEY_1 },
{ 0x0093, KEY_CHANNELDOWN }, /* Channel - */
{ 0x0094, KEY_8 },
{ 0x0095, KEY_5 },
{ 0x0096, KEY_2 },
{ 0x0097, KEY_CHANNELUP }, /* Channel + */
{ 0x0098, KEY_9 },
{ 0x0099, KEY_6 },
{ 0x009a, KEY_3 },
{ 0x009b, KEY_VOLUMEDOWN }, /* Volume - */
{ 0x009c, KEY_TV }, /* TV */
{ 0x009d, KEY_RECORD }, /* Record */
{ 0x009e, KEY_REWIND }, /* Rewind */
{ 0x009f, KEY_LEFT }, /* Left */
};
static struct rc_map_list trekstor_map = {
.map = {
.scan = trekstor,
.size = ARRAY_SIZE(trekstor),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_TREKSTOR,
}
};
static int __init init_rc_map_trekstor(void)
{
return rc_map_register(&trekstor_map);
}
static void __exit exit_rc_map_trekstor(void)
{
rc_map_unregister(&trekstor_map);
}
module_init(init_rc_map_trekstor)
module_exit(exit_rc_map_trekstor)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
| gpl-2.0 |
THCue/android_kernel_lge_v410 | fs/lockd/grace.c | 9799 | 1594 | /*
* Common code for control of lockd and nfsv4 grace periods.
*/
#include <linux/module.h>
#include <linux/lockd/bind.h>
static LIST_HEAD(grace_list);
static DEFINE_SPINLOCK(grace_lock);
/**
* locks_start_grace
* @lm: who this grace period is for
*
* A grace period is a period during which locks should not be given
* out. Currently grace periods are only enforced by the two lock
* managers (lockd and nfsd), using the locks_in_grace() function to
* check when they are in a grace period.
*
* This function is called to start a grace period.
*/
void locks_start_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_add(&lm->list, &grace_list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
/**
* locks_end_grace
* @lm: who this grace period is for
*
* Call this function to state that the given lock manager is ready to
* resume regular locking. The grace period will not end until all lock
* managers that called locks_start_grace() also call locks_end_grace().
* Note that callers count on it being safe to call this more than once,
* and the second call should be a no-op.
*/
void locks_end_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_del_init(&lm->list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_end_grace);
/**
* locks_in_grace
*
* Lock managers call this function to determine when it is OK for them
* to answer ordinary lock requests, and when they should accept only
* lock reclaims.
*/
int locks_in_grace(void)
{
return !list_empty(&grace_list);
}
EXPORT_SYMBOL_GPL(locks_in_grace);
| gpl-2.0 |
psyke83/codeaurora-kernel_samsung_europa | fs/lockd/grace.c | 9799 | 1594 | /*
* Common code for control of lockd and nfsv4 grace periods.
*/
#include <linux/module.h>
#include <linux/lockd/bind.h>
static LIST_HEAD(grace_list);
static DEFINE_SPINLOCK(grace_lock);
/**
* locks_start_grace
* @lm: who this grace period is for
*
* A grace period is a period during which locks should not be given
* out. Currently grace periods are only enforced by the two lock
* managers (lockd and nfsd), using the locks_in_grace() function to
* check when they are in a grace period.
*
* This function is called to start a grace period.
*/
void locks_start_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_add(&lm->list, &grace_list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
/**
* locks_end_grace
* @lm: who this grace period is for
*
* Call this function to state that the given lock manager is ready to
* resume regular locking. The grace period will not end until all lock
* managers that called locks_start_grace() also call locks_end_grace().
* Note that callers count on it being safe to call this more than once,
* and the second call should be a no-op.
*/
void locks_end_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_del_init(&lm->list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_end_grace);
/**
* locks_in_grace
*
* Lock managers call this function to determine when it is OK for them
* to answer ordinary lock requests, and when they should accept only
* lock reclaims.
*/
int locks_in_grace(void)
{
return !list_empty(&grace_list);
}
EXPORT_SYMBOL_GPL(locks_in_grace);
| gpl-2.0 |
dimon2242/Neuro_kernel | fs/lockd/grace.c | 9799 | 1594 | /*
* Common code for control of lockd and nfsv4 grace periods.
*/
#include <linux/module.h>
#include <linux/lockd/bind.h>
static LIST_HEAD(grace_list);
static DEFINE_SPINLOCK(grace_lock);
/**
* locks_start_grace
* @lm: who this grace period is for
*
* A grace period is a period during which locks should not be given
* out. Currently grace periods are only enforced by the two lock
* managers (lockd and nfsd), using the locks_in_grace() function to
* check when they are in a grace period.
*
* This function is called to start a grace period.
*/
void locks_start_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_add(&lm->list, &grace_list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
/**
* locks_end_grace
* @lm: who this grace period is for
*
* Call this function to state that the given lock manager is ready to
* resume regular locking. The grace period will not end until all lock
* managers that called locks_start_grace() also call locks_end_grace().
* Note that callers count on it being safe to call this more than once,
* and the second call should be a no-op.
*/
void locks_end_grace(struct lock_manager *lm)
{
spin_lock(&grace_lock);
list_del_init(&lm->list);
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_end_grace);
/**
* locks_in_grace
*
* Lock managers call this function to determine when it is OK for them
* to answer ordinary lock requests, and when they should accept only
* lock reclaims.
*/
int locks_in_grace(void)
{
return !list_empty(&grace_list);
}
EXPORT_SYMBOL_GPL(locks_in_grace);
| gpl-2.0 |
changyihsin/powermemo_kernel | arch/m68k/tools/amiga/dmesg.c | 14151 | 1657 | /*
* linux/arch/m68k/tools/amiga/dmesg.c -- Retrieve the kernel messages stored
* in Chip RAM with the kernel command
* line option `debug=mem'.
*
* © Copyright 1996 by Geert Uytterhoeven <geert@linux-m68k.org>
*
*
* Usage:
*
* dmesg
* dmesg <CHIPMEM_END>
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of the Linux
* distribution for more details.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#define CHIPMEM_START 0x00000000
#define CHIPMEM_END 0x00200000 /* overridden by argv[1] */
#define SAVEKMSG_MAGIC1 0x53415645 /* 'SAVE' */
#define SAVEKMSG_MAGIC2 0x4B4D5347 /* 'KMSG' */
struct savekmsg {
u_long magic1; /* SAVEKMSG_MAGIC1 */
u_long magic2; /* SAVEKMSG_MAGIC2 */
u_long magicptr; /* address of magic1 */
u_long size;
char data[0];
};
int main(int argc, char *argv[])
{
u_long start = CHIPMEM_START, end = CHIPMEM_END, p;
int found = 0;
struct savekmsg *m = NULL;
if (argc >= 2)
end = strtoul(argv[1], NULL, 0);
printf("Searching for SAVEKMSG magic...\n");
for (p = start; p <= end-sizeof(struct savekmsg); p += 4) {
m = (struct savekmsg *)p;
if ((m->magic1 == SAVEKMSG_MAGIC1) && (m->magic2 == SAVEKMSG_MAGIC2) &&
(m->magicptr == p)) {
found = 1;
break;
}
}
if (!found)
printf("Not found\n");
else {
printf("Found %ld bytes at 0x%08lx\n", m->size, (u_long)&m->data);
puts(">>>>>>>>>>>>>>>>>>>>");
fflush(stdout);
write(1, &m->data, m->size);
fflush(stdout);
puts("<<<<<<<<<<<<<<<<<<<<");
}
return(0);
}
| gpl-2.0 |
LorDClockaN/LorDmodSaga | drivers/scsi/scsi_module.c | 14919 | 1688 | /*
* Copyright (C) 2003 Christoph Hellwig.
* Released under GPL v2.
*
* Support for old-style host templates.
*
* NOTE: Do not use this for new drivers ever.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <scsi/scsi_host.h>
static int __init init_this_scsi_driver(void)
{
struct scsi_host_template *sht = &driver_template;
struct Scsi_Host *shost;
struct list_head *l;
int error;
if (!sht->release) {
printk(KERN_ERR
"scsi HBA driver %s didn't set a release method.\n",
sht->name);
return -EINVAL;
}
sht->module = THIS_MODULE;
INIT_LIST_HEAD(&sht->legacy_hosts);
sht->detect(sht);
if (list_empty(&sht->legacy_hosts))
return -ENODEV;
list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list) {
error = scsi_add_host(shost, NULL);
if (error)
goto fail;
scsi_scan_host(shost);
}
return 0;
fail:
l = &shost->sht_legacy_list;
while ((l = l->prev) != &sht->legacy_hosts)
scsi_remove_host(list_entry(l, struct Scsi_Host, sht_legacy_list));
return error;
}
static void __exit exit_this_scsi_driver(void)
{
struct scsi_host_template *sht = &driver_template;
struct Scsi_Host *shost, *s;
list_for_each_entry(shost, &sht->legacy_hosts, sht_legacy_list)
scsi_remove_host(shost);
list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list)
sht->release(shost);
if (list_empty(&sht->legacy_hosts))
return;
printk(KERN_WARNING "%s did not call scsi_unregister\n", sht->name);
dump_stack();
list_for_each_entry_safe(shost, s, &sht->legacy_hosts, sht_legacy_list)
scsi_unregister(shost);
}
module_init(init_this_scsi_driver);
module_exit(exit_this_scsi_driver);
| gpl-2.0 |
zarboz/android_kernel_dlxpul | arch/arm/mach-msm/board-monarudo-wifi.c | 72 | 8123 | /* linux/arch/arm/mach-msm/board-monarudo-wifi.c
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <asm/mach-types.h>
#include <asm/gpio.h>
#include <asm/io.h>
#include <linux/skbuff.h>
#include <linux/wifi_tiwlan.h>
#include <mach/msm_bus_board.h>
#include "board-monarudo.h"
#include "board-monarudo-wifi.h"
int monarudo_wifi_power(int on);
int monarudo_wifi_reset(int on);
int monarudo_wifi_set_carddetect(int on);
int monarudo_wifi_get_mac_addr(unsigned char *buf);
#define PREALLOC_WLAN_NUMBER_OF_SECTIONS 4
#define PREALLOC_WLAN_NUMBER_OF_BUFFERS 160
#define PREALLOC_WLAN_SECTION_HEADER 24
#define WLAN_SECTION_SIZE_0 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128)
#define WLAN_SECTION_SIZE_1 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 128)
#define WLAN_SECTION_SIZE_2 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 512)
#define WLAN_SECTION_SIZE_3 (PREALLOC_WLAN_NUMBER_OF_BUFFERS * 1024)
#define WLAN_SKB_BUF_NUM 16
#define HW_OOB 1
static struct sk_buff *wlan_static_skb[WLAN_SKB_BUF_NUM];
typedef struct wifi_mem_prealloc_struct {
void *mem_ptr;
unsigned long size;
} wifi_mem_prealloc_t;
static wifi_mem_prealloc_t wifi_mem_array[PREALLOC_WLAN_NUMBER_OF_SECTIONS] = {
{ NULL, (WLAN_SECTION_SIZE_0 + PREALLOC_WLAN_SECTION_HEADER) },
{ NULL, (WLAN_SECTION_SIZE_1 + PREALLOC_WLAN_SECTION_HEADER) },
{ NULL, (WLAN_SECTION_SIZE_2 + PREALLOC_WLAN_SECTION_HEADER) },
{ NULL, (WLAN_SECTION_SIZE_3 + PREALLOC_WLAN_SECTION_HEADER) }
};
static void *monarudo_wifi_mem_prealloc(int section, unsigned long size)
{
if (section == PREALLOC_WLAN_NUMBER_OF_SECTIONS)
return wlan_static_skb;
if ((section < 0) || (section > PREALLOC_WLAN_NUMBER_OF_SECTIONS))
return NULL;
if (wifi_mem_array[section].size < size)
return NULL;
return wifi_mem_array[section].mem_ptr;
}
int __init monarudo_init_wifi_mem(void)
{
int i;
for (i = 0; (i < WLAN_SKB_BUF_NUM); i++) {
if (i < (WLAN_SKB_BUF_NUM/2))
wlan_static_skb[i] = dev_alloc_skb(PAGE_SIZE*2);
else
wlan_static_skb[i] = dev_alloc_skb(PAGE_SIZE*4);
}
for (i = 0; (i < PREALLOC_WLAN_NUMBER_OF_SECTIONS); i++) {
wifi_mem_array[i].mem_ptr = kmalloc(wifi_mem_array[i].size,
GFP_KERNEL);
if (wifi_mem_array[i].mem_ptr == NULL)
return -ENOMEM;
}
return 0;
}
static struct resource monarudo_wifi_resources[] = {
[0] = {
.name = "bcmdhd_wlan_irq",
.start = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, WL_HOST_WAKE_XC),
.end = PM8921_GPIO_IRQ(PM8921_IRQ_BASE, WL_HOST_WAKE_XC),
#ifdef HW_OOB
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL | IORESOURCE_IRQ_SHAREABLE,
#else
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
#endif
},
};
/* Bandwidth requests (zero) if no vote placed */
static struct msm_bus_vectors wlan_init_vectors[] = {
{
.src = MSM_BUS_MASTER_SPS,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 0,
.ib = 0,
},
};
/* Bus bandwidth requests in Bytes/sec */
static struct msm_bus_vectors wlan_max_vectors[] = {
{
.src = MSM_BUS_MASTER_SPS,
.dst = MSM_BUS_SLAVE_EBI_CH0,
.ab = 60000000, /* At least 480Mbps on bus. */
.ib = 960000000, /* MAX bursts rate */
},
};
static struct msm_bus_paths wlan_bus_scale_usecases[] = {
{
ARRAY_SIZE(wlan_init_vectors),
wlan_init_vectors,
},
{
ARRAY_SIZE(wlan_max_vectors),
wlan_max_vectors,
},
};
static struct msm_bus_scale_pdata wlan_bus_scale_pdata = {
wlan_bus_scale_usecases,
ARRAY_SIZE(wlan_bus_scale_usecases),
.name = "wlan",
};
static struct wifi_platform_data monarudo_wifi_control = {
.set_power = monarudo_wifi_power,
.set_reset = monarudo_wifi_reset,
.set_carddetect = monarudo_wifi_set_carddetect,
.mem_prealloc = monarudo_wifi_mem_prealloc,
.get_mac_addr = monarudo_wifi_get_mac_addr,
.bus_scale_table = &wlan_bus_scale_pdata,
};
static struct platform_device monarudo_wifi_device = {
.name = "bcmdhd_wlan",
.id = 1,
.num_resources = ARRAY_SIZE(monarudo_wifi_resources),
.resource = monarudo_wifi_resources,
.dev = {
.platform_data = &monarudo_wifi_control,
},
};
static unsigned monarudo_wifi_update_nvs(char *str)
{
#define NVS_LEN_OFFSET 0x0C
#define NVS_DATA_OFFSET 0x40
unsigned char *ptr;
unsigned len;
if (!str)
return -EINVAL;
ptr = get_wifi_nvs_ram();
/* Size in format LE assumed */
memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len));
/* the last bye in NVRAM is 0, trim it */
if (ptr[NVS_DATA_OFFSET + len - 1] == 0)
len -= 1;
if (ptr[NVS_DATA_OFFSET + len - 1] != '\n') {
len += 1;
ptr[NVS_DATA_OFFSET + len - 1] = '\n';
}
strcpy(ptr + NVS_DATA_OFFSET + len, str);
len += strlen(str);
memcpy(ptr + NVS_LEN_OFFSET, &len, sizeof(len));
return 0;
}
#ifdef HW_OOB
static unsigned strip_nvs_param(char *param)
{
unsigned char *nvs_data;
unsigned param_len;
int start_idx, end_idx;
unsigned char *ptr;
unsigned len;
if (!param)
return -EINVAL;
ptr = get_wifi_nvs_ram();
/* Size in format LE assumed */
memcpy(&len, ptr + NVS_LEN_OFFSET, sizeof(len));
/* the last bye in NVRAM is 0, trim it */
if (ptr[NVS_DATA_OFFSET + len - 1] == 0)
len -= 1;
nvs_data = ptr + NVS_DATA_OFFSET;
param_len = strlen(param);
/* search param */
for (start_idx = 0; start_idx < len - param_len; start_idx++) {
if (memcmp(&nvs_data[start_idx], param, param_len) == 0)
break;
}
end_idx = 0;
if (start_idx < len - param_len) {
/* search end-of-line */
for (end_idx = start_idx + param_len; end_idx < len; end_idx++) {
if (nvs_data[end_idx] == '\n' || nvs_data[end_idx] == 0)
break;
}
}
if (start_idx < end_idx) {
/* move the remain data forward */
for (; end_idx + 1 < len; start_idx++, end_idx++)
nvs_data[start_idx] = nvs_data[end_idx+1];
len = len - (end_idx - start_idx + 1);
memcpy(ptr + NVS_LEN_OFFSET, &len, sizeof(len));
}
return 0;
}
#endif
#define WIFI_MAC_PARAM_STR "macaddr="
#define WIFI_MAX_MAC_LEN 17 /* XX:XX:XX:XX:XX:XX */
static uint
get_mac_from_wifi_nvs_ram(char *buf, unsigned int buf_len)
{
unsigned char *nvs_ptr;
unsigned char *mac_ptr;
uint len = 0;
if (!buf || !buf_len)
return 0;
nvs_ptr = get_wifi_nvs_ram();
if (nvs_ptr)
nvs_ptr += NVS_DATA_OFFSET;
mac_ptr = strstr(nvs_ptr, WIFI_MAC_PARAM_STR);
if (mac_ptr) {
mac_ptr += strlen(WIFI_MAC_PARAM_STR);
/* skip leading space */
while (mac_ptr[0] == ' ')
mac_ptr++;
/* locate end-of-line */
len = 0;
while (mac_ptr[len] != '\r' && mac_ptr[len] != '\n' &&
mac_ptr[len] != '\0') {
len++;
}
if (len > buf_len)
len = buf_len;
memcpy(buf, mac_ptr, len);
}
return len;
}
#define ETHER_ADDR_LEN 6
int monarudo_wifi_get_mac_addr(unsigned char *buf)
{
static u8 ether_mac_addr[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0xFF};
char mac[WIFI_MAX_MAC_LEN];
unsigned mac_len;
unsigned int macpattern[ETHER_ADDR_LEN];
int i;
mac_len = get_mac_from_wifi_nvs_ram(mac, WIFI_MAX_MAC_LEN);
if (mac_len > 0) {
/* Mac address to pattern */
sscanf(mac, "%02x:%02x:%02x:%02x:%02x:%02x",
&macpattern[0], &macpattern[1], &macpattern[2],
&macpattern[3], &macpattern[4], &macpattern[5]
);
for (i = 0; i < ETHER_ADDR_LEN; i++)
ether_mac_addr[i] = (u8)macpattern[i];
}
memcpy(buf, ether_mac_addr, sizeof(ether_mac_addr));
printk(KERN_INFO"monarudo_wifi_get_mac_addr = %02x %02x %02x %02x %02x %02x \n",
ether_mac_addr[0], ether_mac_addr[1], ether_mac_addr[2], ether_mac_addr[3], ether_mac_addr[4], ether_mac_addr[5]);
return 0;
}
int __init monarudo_wifi_init(void)
{
int ret;
printk(KERN_INFO "%s: start\n", __func__);
#ifdef HW_OOB
strip_nvs_param("sd_oobonly");
#else
monarudo_wifi_update_nvs("sd_oobonly=1\n");
#endif
monarudo_wifi_update_nvs("btc_params80=0\n");
monarudo_wifi_update_nvs("btc_params6=30\n");
monarudo_init_wifi_mem();
ret = platform_device_register(&monarudo_wifi_device);
return ret;
}
| gpl-2.0 |
jameskdev/lge-kernel-d1l_kr | drivers/usb/gadget/f_rmnet_sdio.c | 328 | 40979 | /*
* f_rmnet_sdio.c -- RmNet SDIO function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 Nokia Corporation
* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/workqueue.h>
#include <linux/netdevice.h>
#include <linux/usb/cdc.h>
#include <linux/usb/composite.h>
#include <linux/usb/ch9.h>
#include <linux/termios.h>
#include <linux/debugfs.h>
#include <mach/sdio_cmux.h>
#include <mach/sdio_dmux.h>
#ifdef CONFIG_RMNET_SDIO_CTL_CHANNEL
static uint32_t rmnet_sdio_ctl_ch = CONFIG_RMNET_SDIO_CTL_CHANNEL;
#else
static uint32_t rmnet_sdio_ctl_ch;
#endif
module_param(rmnet_sdio_ctl_ch, uint, S_IRUGO);
MODULE_PARM_DESC(rmnet_sdio_ctl_ch, "RmNet control SDIO channel ID");
#ifdef CONFIG_RMNET_SDIO_DATA_CHANNEL
static uint32_t rmnet_sdio_data_ch = CONFIG_RMNET_SDIO_DATA_CHANNEL;
#else
static uint32_t rmnet_sdio_data_ch;
#endif
module_param(rmnet_sdio_data_ch, uint, S_IRUGO);
MODULE_PARM_DESC(rmnet_sdio_data_ch, "RmNet data SDIO channel ID");
#define ACM_CTRL_DTR (1 << 0)
#define SDIO_MUX_HDR 8
#define RMNET_SDIO_NOTIFY_INTERVAL 5
#define RMNET_SDIO_MAX_NFY_SZE sizeof(struct usb_cdc_notification)
#define RMNET_SDIO_RX_REQ_MAX 16
#define RMNET_SDIO_RX_REQ_SIZE 2048
#define RMNET_SDIO_TX_REQ_MAX 200
#define TX_PKT_DROP_THRESHOLD 1000
#define RX_PKT_FLOW_CTRL_EN_THRESHOLD 1000
#define RX_PKT_FLOW_CTRL_DISABLE 500
unsigned int sdio_tx_pkt_drop_thld = TX_PKT_DROP_THRESHOLD;
module_param(sdio_tx_pkt_drop_thld, uint, S_IRUGO | S_IWUSR);
unsigned int sdio_rx_fctrl_en_thld = RX_PKT_FLOW_CTRL_EN_THRESHOLD;
module_param(sdio_rx_fctrl_en_thld, uint, S_IRUGO | S_IWUSR);
unsigned int sdio_rx_fctrl_dis_thld = RX_PKT_FLOW_CTRL_DISABLE;
module_param(sdio_rx_fctrl_dis_thld, uint, S_IRUGO | S_IWUSR);
/* QMI requests & responses buffer*/
struct rmnet_sdio_qmi_buf {
void *buf;
int len;
struct list_head list;
};
struct rmnet_sdio_dev {
struct usb_function function;
struct usb_composite_dev *cdev;
struct usb_ep *epout;
struct usb_ep *epin;
struct usb_ep *epnotify;
struct usb_request *notify_req;
u8 ifc_id;
/* QMI lists */
struct list_head qmi_req_q;
unsigned int qreq_q_len;
struct list_head qmi_resp_q;
unsigned int qresp_q_len;
/* Tx/Rx lists */
struct list_head tx_idle;
unsigned int tx_idle_len;
struct sk_buff_head tx_skb_queue;
struct list_head rx_idle;
unsigned int rx_idle_len;
struct sk_buff_head rx_skb_queue;
spinlock_t lock;
atomic_t online;
atomic_t notify_count;
struct workqueue_struct *wq;
struct work_struct disconnect_work;
struct work_struct ctl_rx_work;
struct work_struct data_rx_work;
struct delayed_work sdio_open_work;
struct work_struct sdio_close_work;
#define RMNET_SDIO_CH_OPEN 1
unsigned long data_ch_status;
unsigned long ctrl_ch_status;
unsigned int dpkts_pending_atdmux;
int cbits_to_modem;
struct work_struct set_modem_ctl_bits_work;
/* pkt logging dpkt - data pkt; cpkt - control pkt*/
struct dentry *dent;
unsigned long dpkt_tolaptop;
unsigned long dpkt_tomodem;
unsigned long tx_drp_cnt;
unsigned long cpkt_tolaptop;
unsigned long cpkt_tomodem;
};
static struct usb_interface_descriptor rmnet_sdio_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 3,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceProtocol = USB_CLASS_VENDOR_SPEC,
/* .iInterface = DYNAMIC */
};
/* Full speed support */
static struct usb_endpoint_descriptor rmnet_sdio_fs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
.bInterval = 1 << RMNET_SDIO_NOTIFY_INTERVAL,
};
static struct usb_endpoint_descriptor rmnet_sdio_fs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(64),
};
static struct usb_endpoint_descriptor rmnet_sdio_fs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(64),
};
static struct usb_descriptor_header *rmnet_sdio_fs_function[] = {
(struct usb_descriptor_header *) &rmnet_sdio_interface_desc,
(struct usb_descriptor_header *) &rmnet_sdio_fs_notify_desc,
(struct usb_descriptor_header *) &rmnet_sdio_fs_in_desc,
(struct usb_descriptor_header *) &rmnet_sdio_fs_out_desc,
NULL,
};
/* High speed support */
static struct usb_endpoint_descriptor rmnet_sdio_hs_notify_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_INT,
.wMaxPacketSize = __constant_cpu_to_le16(RMNET_SDIO_MAX_NFY_SZE),
.bInterval = RMNET_SDIO_NOTIFY_INTERVAL + 4,
};
static struct usb_endpoint_descriptor rmnet_sdio_hs_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(512),
};
static struct usb_endpoint_descriptor rmnet_sdio_hs_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = __constant_cpu_to_le16(512),
};
static struct usb_descriptor_header *rmnet_sdio_hs_function[] = {
(struct usb_descriptor_header *) &rmnet_sdio_interface_desc,
(struct usb_descriptor_header *) &rmnet_sdio_hs_notify_desc,
(struct usb_descriptor_header *) &rmnet_sdio_hs_in_desc,
(struct usb_descriptor_header *) &rmnet_sdio_hs_out_desc,
NULL,
};
/* String descriptors */
static struct usb_string rmnet_sdio_string_defs[] = {
[0].s = "QMI RmNet",
{ } /* end of list */
};
static struct usb_gadget_strings rmnet_sdio_string_table = {
.language = 0x0409, /* en-us */
.strings = rmnet_sdio_string_defs,
};
static struct usb_gadget_strings *rmnet_sdio_strings[] = {
&rmnet_sdio_string_table,
NULL,
};
static struct rmnet_sdio_qmi_buf *
rmnet_sdio_alloc_qmi(unsigned len, gfp_t kmalloc_flags)
{
struct rmnet_sdio_qmi_buf *qmi;
qmi = kmalloc(sizeof(struct rmnet_sdio_qmi_buf), kmalloc_flags);
if (qmi != NULL) {
qmi->buf = kmalloc(len, kmalloc_flags);
if (qmi->buf == NULL) {
kfree(qmi);
qmi = NULL;
}
}
return qmi ? qmi : ERR_PTR(-ENOMEM);
}
static void rmnet_sdio_free_qmi(struct rmnet_sdio_qmi_buf *qmi)
{
kfree(qmi->buf);
kfree(qmi);
}
/*
* Allocate a usb_request and its buffer. Returns a pointer to the
* usb_request or a pointer with an error code if there is an error.
*/
static struct usb_request *
rmnet_sdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, kmalloc_flags);
if (len && req != NULL) {
req->length = len;
req->buf = kmalloc(len, kmalloc_flags);
if (req->buf == NULL) {
usb_ep_free_request(ep, req);
req = NULL;
}
}
return req ? req : ERR_PTR(-ENOMEM);
}
/*
* Free a usb_request and its buffer.
*/
static void rmnet_sdio_free_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
usb_ep_free_request(ep, req);
}
static void rmnet_sdio_notify_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct rmnet_sdio_dev *dev = req->context;
struct usb_composite_dev *cdev = dev->cdev;
int status = req->status;
switch (status) {
case -ECONNRESET:
case -ESHUTDOWN:
/* connection gone */
atomic_set(&dev->notify_count, 0);
break;
default:
ERROR(cdev, "rmnet notifyep error %d\n", status);
/* FALLTHROUGH */
case 0:
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
return;
/* handle multiple pending QMI_RESPONSE_AVAILABLE
* notifications by resending until we're done
*/
if (atomic_dec_and_test(&dev->notify_count))
break;
status = usb_ep_queue(dev->epnotify, req, GFP_ATOMIC);
if (status) {
atomic_dec(&dev->notify_count);
ERROR(cdev, "rmnet notify ep enq error %d\n", status);
}
break;
}
}
static void rmnet_sdio_qmi_resp_available(struct rmnet_sdio_dev *dev)
{
struct usb_composite_dev *cdev = dev->cdev;
struct usb_cdc_notification *event;
int status;
unsigned long flags;
/* Response will be sent later */
if (atomic_inc_return(&dev->notify_count) != 1)
return;
spin_lock_irqsave(&dev->lock, flags);
if (!atomic_read(&dev->online)) {
spin_unlock_irqrestore(&dev->lock, flags);
return;
}
event = dev->notify_req->buf;
event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
| USB_RECIP_INTERFACE;
event->bNotificationType = USB_CDC_NOTIFY_RESPONSE_AVAILABLE;
event->wValue = cpu_to_le16(0);
event->wIndex = cpu_to_le16(dev->ifc_id);
event->wLength = cpu_to_le16(0);
spin_unlock_irqrestore(&dev->lock, flags);
status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_ATOMIC);
if (status < 0) {
if (atomic_read(&dev->online))
atomic_dec(&dev->notify_count);
ERROR(cdev, "rmnet notify ep enqueue error %d\n", status);
}
}
#define SDIO_MAX_CTRL_PKT_SIZE 4096
static void rmnet_sdio_ctl_receive_cb(void *data, int size, void *priv)
{
struct rmnet_sdio_dev *dev = priv;
struct usb_composite_dev *cdev = dev->cdev;
struct rmnet_sdio_qmi_buf *qmi_resp;
unsigned long flags;
if (!data) {
pr_info("%s: cmux_ch close event\n", __func__);
if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) &&
test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status);
clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status);
queue_work(dev->wq, &dev->sdio_close_work);
}
return;
}
if (!size || !test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
return;
if (size > SDIO_MAX_CTRL_PKT_SIZE) {
ERROR(cdev, "ctrl pkt size:%d exceeds max pkt size:%d\n",
size, SDIO_MAX_CTRL_PKT_SIZE);
return;
}
if (!atomic_read(&dev->online)) {
DBG(cdev, "USB disconnected\n");
return;
}
qmi_resp = rmnet_sdio_alloc_qmi(size, GFP_KERNEL);
if (IS_ERR(qmi_resp)) {
DBG(cdev, "unable to allocate memory for QMI resp\n");
return;
}
memcpy(qmi_resp->buf, data, size);
qmi_resp->len = size;
spin_lock_irqsave(&dev->lock, flags);
list_add_tail(&qmi_resp->list, &dev->qmi_resp_q);
dev->qresp_q_len++;
spin_unlock_irqrestore(&dev->lock, flags);
rmnet_sdio_qmi_resp_available(dev);
}
static void rmnet_sdio_ctl_write_done(void *data, int size, void *priv)
{
struct rmnet_sdio_dev *dev = priv;
struct usb_composite_dev *cdev = dev->cdev;
VDBG(cdev, "rmnet control write done = %d bytes\n", size);
}
static void rmnet_sdio_sts_callback(int id, void *priv)
{
struct rmnet_sdio_dev *dev = priv;
struct usb_composite_dev *cdev = dev->cdev;
DBG(cdev, "rmnet_sdio_sts_callback: id: %d\n", id);
}
static void rmnet_sdio_control_rx_work(struct work_struct *w)
{
struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev,
ctl_rx_work);
struct usb_composite_dev *cdev = dev->cdev;
struct rmnet_sdio_qmi_buf *qmi_req;
unsigned long flags;
int ret;
while (1) {
spin_lock_irqsave(&dev->lock, flags);
if (list_empty(&dev->qmi_req_q))
goto unlock;
qmi_req = list_first_entry(&dev->qmi_req_q,
struct rmnet_sdio_qmi_buf, list);
list_del(&qmi_req->list);
dev->qreq_q_len--;
spin_unlock_irqrestore(&dev->lock, flags);
ret = sdio_cmux_write(rmnet_sdio_ctl_ch, qmi_req->buf,
qmi_req->len);
if (ret != qmi_req->len) {
ERROR(cdev, "rmnet control SDIO write failed\n");
return;
}
dev->cpkt_tomodem++;
/*
* cmux_write API copies the buffer and gives it to sdio_al.
* Hence freeing the memory before write is completed.
*/
rmnet_sdio_free_qmi(qmi_req);
}
unlock:
spin_unlock_irqrestore(&dev->lock, flags);
}
static void rmnet_sdio_response_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct rmnet_sdio_dev *dev = req->context;
struct usb_composite_dev *cdev = dev->cdev;
switch (req->status) {
case -ECONNRESET:
case -ESHUTDOWN:
case 0:
return;
default:
INFO(cdev, "rmnet %s response error %d, %d/%d\n",
ep->name, req->status,
req->actual, req->length);
}
}
static void rmnet_sdio_command_complete(struct usb_ep *ep,
struct usb_request *req)
{
struct rmnet_sdio_dev *dev = req->context;
struct usb_composite_dev *cdev = dev->cdev;
struct rmnet_sdio_qmi_buf *qmi_req;
int len = req->actual;
if (req->status < 0) {
ERROR(cdev, "rmnet command error %d\n", req->status);
return;
}
/* discard the packet if sdio is not available */
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
return;
qmi_req = rmnet_sdio_alloc_qmi(len, GFP_ATOMIC);
if (IS_ERR(qmi_req)) {
ERROR(cdev, "unable to allocate memory for QMI req\n");
return;
}
memcpy(qmi_req->buf, req->buf, len);
qmi_req->len = len;
spin_lock(&dev->lock);
list_add_tail(&qmi_req->list, &dev->qmi_req_q);
dev->qreq_q_len++;
spin_unlock(&dev->lock);
queue_work(dev->wq, &dev->ctl_rx_work);
}
static int
rmnet_sdio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
function);
struct usb_composite_dev *cdev = f->config->cdev;
struct usb_request *req = cdev->req;
int ret = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
struct rmnet_sdio_qmi_buf *resp;
if (!atomic_read(&dev->online))
return -ENOTCONN;
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_SEND_ENCAPSULATED_COMMAND:
ret = w_length;
req->complete = rmnet_sdio_command_complete;
req->context = dev;
break;
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_GET_ENCAPSULATED_RESPONSE:
if (w_value)
goto invalid;
else {
unsigned len;
spin_lock(&dev->lock);
if (list_empty(&dev->qmi_resp_q)) {
INFO(cdev, "qmi resp empty "
" req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
spin_unlock(&dev->lock);
goto invalid;
}
resp = list_first_entry(&dev->qmi_resp_q,
struct rmnet_sdio_qmi_buf, list);
list_del(&resp->list);
dev->qresp_q_len--;
spin_unlock(&dev->lock);
len = min_t(unsigned, w_length, resp->len);
memcpy(req->buf, resp->buf, len);
ret = len;
req->context = dev;
req->complete = rmnet_sdio_response_complete;
rmnet_sdio_free_qmi(resp);
/* check if its the right place to add */
dev->cpkt_tolaptop++;
}
break;
case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
/* This is a workaround for RmNet and is borrowed from the
* CDC/ACM standard. The host driver will issue the above ACM
* standard request to the RmNet interface in the following
* scenario: Once the network adapter is disabled from device
* manager, the above request will be sent from the qcusbnet
* host driver, with DTR being '0'. Once network adapter is
* enabled from device manager (or during enumeration), the
* request will be sent with DTR being '1'.
*/
if (w_value & ACM_CTRL_DTR)
dev->cbits_to_modem |= TIOCM_DTR;
else
dev->cbits_to_modem &= ~TIOCM_DTR;
queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
ret = 0;
break;
default:
invalid:
DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
}
/* respond with data transfer or status phase? */
if (ret >= 0) {
VDBG(cdev, "rmnet req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
req->zero = (ret < w_length);
req->length = ret;
ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
if (ret < 0)
ERROR(cdev, "rmnet ep0 enqueue err %d\n", ret);
}
return ret;
}
static int
rmnet_sdio_rx_submit(struct rmnet_sdio_dev *dev, struct usb_request *req,
gfp_t gfp_flags)
{
struct sk_buff *skb;
int retval;
skb = alloc_skb(RMNET_SDIO_RX_REQ_SIZE + SDIO_MUX_HDR, gfp_flags);
if (skb == NULL)
return -ENOMEM;
skb_reserve(skb, SDIO_MUX_HDR);
req->buf = skb->data;
req->length = RMNET_SDIO_RX_REQ_SIZE;
req->context = skb;
retval = usb_ep_queue(dev->epout, req, gfp_flags);
if (retval)
dev_kfree_skb_any(skb);
return retval;
}
static void rmnet_sdio_start_rx(struct rmnet_sdio_dev *dev)
{
struct usb_composite_dev *cdev = dev->cdev;
int status;
struct usb_request *req;
unsigned long flags;
if (!atomic_read(&dev->online)) {
pr_err("%s: USB not connected\n", __func__);
return;
}
spin_lock_irqsave(&dev->lock, flags);
while (!list_empty(&dev->rx_idle)) {
req = list_first_entry(&dev->rx_idle, struct usb_request, list);
list_del(&req->list);
dev->rx_idle_len--;
spin_unlock_irqrestore(&dev->lock, flags);
status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC);
spin_lock_irqsave(&dev->lock, flags);
if (status) {
ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
list_add_tail(&req->list, &dev->rx_idle);
dev->rx_idle_len++;
break;
}
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static void rmnet_sdio_start_tx(struct rmnet_sdio_dev *dev)
{
unsigned long flags;
int status;
struct sk_buff *skb;
struct usb_request *req;
struct usb_composite_dev *cdev = dev->cdev;
if (!atomic_read(&dev->online))
return;
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status))
return;
spin_lock_irqsave(&dev->lock, flags);
while (!list_empty(&dev->tx_idle)) {
skb = __skb_dequeue(&dev->tx_skb_queue);
if (!skb) {
spin_unlock_irqrestore(&dev->lock, flags);
return;
}
req = list_first_entry(&dev->tx_idle, struct usb_request, list);
req->context = skb;
req->buf = skb->data;
req->length = skb->len;
list_del(&req->list);
dev->tx_idle_len--;
spin_unlock(&dev->lock);
status = usb_ep_queue(dev->epin, req, GFP_ATOMIC);
spin_lock(&dev->lock);
if (status) {
/* USB still online, queue requests back */
if (atomic_read(&dev->online)) {
ERROR(cdev, "rmnet tx data enqueue err %d\n",
status);
list_add_tail(&req->list, &dev->tx_idle);
dev->tx_idle_len++;
__skb_queue_head(&dev->tx_skb_queue, skb);
} else {
req->buf = 0;
rmnet_sdio_free_req(dev->epin, req);
dev_kfree_skb_any(skb);
}
break;
}
dev->dpkt_tolaptop++;
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static void rmnet_sdio_data_receive_cb(void *priv, struct sk_buff *skb)
{
struct rmnet_sdio_dev *dev = priv;
unsigned long flags;
/* SDIO mux sends NULL SKB when link state changes */
if (!skb) {
pr_info("%s: dmux_ch close event\n", __func__);
if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status) &&
test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status);
clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status);
queue_work(dev->wq, &dev->sdio_close_work);
}
return;
}
if (!atomic_read(&dev->online)) {
dev_kfree_skb_any(skb);
return;
}
spin_lock_irqsave(&dev->lock, flags);
if (dev->tx_skb_queue.qlen > sdio_tx_pkt_drop_thld) {
if (printk_ratelimit())
pr_err("%s: tx pkt dropped: tx_drop_cnt:%lu\n",
__func__, dev->tx_drp_cnt);
dev->tx_drp_cnt++;
spin_unlock_irqrestore(&dev->lock, flags);
dev_kfree_skb_any(skb);
return;
}
__skb_queue_tail(&dev->tx_skb_queue, skb);
spin_unlock_irqrestore(&dev->lock, flags);
rmnet_sdio_start_tx(dev);
}
static void rmnet_sdio_data_write_done(void *priv, struct sk_buff *skb)
{
struct rmnet_sdio_dev *dev = priv;
/* SDIO mux sends NULL SKB when link state changes */
if (!skb) {
pr_info("%s: dmux_ch open event\n", __func__);
queue_delayed_work(dev->wq, &dev->sdio_open_work, 0);
return;
}
dev_kfree_skb_any(skb);
/* this function is called from
* sdio mux from spin_lock_irqsave
*/
spin_lock(&dev->lock);
dev->dpkts_pending_atdmux--;
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) ||
dev->dpkts_pending_atdmux >= sdio_rx_fctrl_dis_thld) {
spin_unlock(&dev->lock);
return;
}
spin_unlock(&dev->lock);
rmnet_sdio_start_rx(dev);
}
static void rmnet_sdio_data_rx_work(struct work_struct *w)
{
struct rmnet_sdio_dev *dev = container_of(w, struct rmnet_sdio_dev,
data_rx_work);
struct usb_composite_dev *cdev = dev->cdev;
struct sk_buff *skb;
int ret;
unsigned long flags;
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
pr_info("%s: sdio data ch not open\n", __func__);
return;
}
spin_lock_irqsave(&dev->lock, flags);
while ((skb = __skb_dequeue(&dev->rx_skb_queue))) {
spin_unlock_irqrestore(&dev->lock, flags);
ret = msm_sdio_dmux_write(rmnet_sdio_data_ch, skb);
spin_lock_irqsave(&dev->lock, flags);
if (ret < 0) {
ERROR(cdev, "rmnet SDIO data write failed\n");
dev_kfree_skb_any(skb);
break;
} else {
dev->dpkt_tomodem++;
dev->dpkts_pending_atdmux++;
}
}
spin_unlock_irqrestore(&dev->lock, flags);
}
static void rmnet_sdio_complete_epout(struct usb_ep *ep,
struct usb_request *req)
{
struct rmnet_sdio_dev *dev = ep->driver_data;
struct usb_composite_dev *cdev = dev->cdev;
struct sk_buff *skb = req->context;
int status = req->status;
int queue = 0;
switch (status) {
case 0:
/* successful completion */
skb_put(skb, req->actual);
queue = 1;
break;
case -ECONNRESET:
case -ESHUTDOWN:
/* connection gone */
dev_kfree_skb_any(skb);
req->buf = 0;
rmnet_sdio_free_req(ep, req);
return;
default:
/* unexpected failure */
ERROR(cdev, "RMNET %s response error %d, %d/%d\n",
ep->name, status,
req->actual, req->length);
dev_kfree_skb_any(skb);
break;
}
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
pr_info("%s: sdio data ch not open\n", __func__);
dev_kfree_skb_any(skb);
req->buf = 0;
rmnet_sdio_free_req(ep, req);
return;
}
spin_lock(&dev->lock);
if (queue) {
__skb_queue_tail(&dev->rx_skb_queue, skb);
queue_work(dev->wq, &dev->data_rx_work);
}
if (dev->dpkts_pending_atdmux >= sdio_rx_fctrl_en_thld) {
list_add_tail(&req->list, &dev->rx_idle);
dev->rx_idle_len++;
spin_unlock(&dev->lock);
return;
}
spin_unlock(&dev->lock);
status = rmnet_sdio_rx_submit(dev, req, GFP_ATOMIC);
if (status) {
ERROR(cdev, "rmnet data rx enqueue err %d\n", status);
list_add_tail(&req->list, &dev->rx_idle);
dev->rx_idle_len++;
}
}
static void rmnet_sdio_complete_epin(struct usb_ep *ep, struct usb_request *req)
{
struct rmnet_sdio_dev *dev = ep->driver_data;
struct sk_buff *skb = req->context;
struct usb_composite_dev *cdev = dev->cdev;
int status = req->status;
switch (status) {
case 0:
/* successful completion */
case -ECONNRESET:
case -ESHUTDOWN:
/* connection gone */
break;
default:
ERROR(cdev, "rmnet data tx ep error %d\n", status);
break;
}
spin_lock(&dev->lock);
list_add_tail(&req->list, &dev->tx_idle);
dev->tx_idle_len++;
spin_unlock(&dev->lock);
dev_kfree_skb_any(skb);
rmnet_sdio_start_tx(dev);
}
static void rmnet_sdio_free_buf(struct rmnet_sdio_dev *dev)
{
struct rmnet_sdio_qmi_buf *qmi;
struct usb_request *req;
struct list_head *act, *tmp;
struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
dev->dpkt_tolaptop = 0;
dev->dpkt_tomodem = 0;
dev->cpkt_tolaptop = 0;
dev->cpkt_tomodem = 0;
dev->dpkts_pending_atdmux = 0;
dev->tx_drp_cnt = 0;
/* free all usb requests in tx pool */
list_for_each_safe(act, tmp, &dev->tx_idle) {
req = list_entry(act, struct usb_request, list);
list_del(&req->list);
dev->tx_idle_len--;
req->buf = NULL;
rmnet_sdio_free_req(dev->epout, req);
}
/* free all usb requests in rx pool */
list_for_each_safe(act, tmp, &dev->rx_idle) {
req = list_entry(act, struct usb_request, list);
list_del(&req->list);
dev->rx_idle_len--;
req->buf = NULL;
rmnet_sdio_free_req(dev->epin, req);
}
/* free all buffers in qmi request pool */
list_for_each_safe(act, tmp, &dev->qmi_req_q) {
qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
list_del(&qmi->list);
dev->qreq_q_len--;
rmnet_sdio_free_qmi(qmi);
}
/* free all buffers in qmi request pool */
list_for_each_safe(act, tmp, &dev->qmi_resp_q) {
qmi = list_entry(act, struct rmnet_sdio_qmi_buf, list);
list_del(&qmi->list);
dev->qresp_q_len--;
rmnet_sdio_free_qmi(qmi);
}
while ((skb = __skb_dequeue(&dev->tx_skb_queue)))
dev_kfree_skb_any(skb);
while ((skb = __skb_dequeue(&dev->rx_skb_queue)))
dev_kfree_skb_any(skb);
rmnet_sdio_free_req(dev->epnotify, dev->notify_req);
spin_unlock_irqrestore(&dev->lock, flags);
}
static void rmnet_sdio_set_modem_cbits_w(struct work_struct *w)
{
struct rmnet_sdio_dev *dev;
dev = container_of(w, struct rmnet_sdio_dev, set_modem_ctl_bits_work);
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
return;
pr_debug("%s: cbits_to_modem:%d\n",
__func__, dev->cbits_to_modem);
sdio_cmux_tiocmset(rmnet_sdio_ctl_ch,
dev->cbits_to_modem,
~dev->cbits_to_modem);
}
static void rmnet_sdio_disconnect_work(struct work_struct *w)
{
/* REVISIT: Push all the data to sdio if anythign is pending */
}
static void rmnet_sdio_suspend(struct usb_function *f)
{
struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
function);
if (!atomic_read(&dev->online))
return;
/* This is a workaround for Windows Host bug during suspend.
* Windows 7/xp Hosts are suppose to drop DTR, when Host suspended.
* Since it is not beind done, Hence exclusively dropping the DTR
* from function driver suspend.
*/
dev->cbits_to_modem &= ~TIOCM_DTR;
queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
}
static void rmnet_sdio_disable(struct usb_function *f)
{
struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
function);
if (!atomic_read(&dev->online))
return;
usb_ep_disable(dev->epnotify);
usb_ep_disable(dev->epout);
usb_ep_disable(dev->epin);
atomic_set(&dev->online, 0);
atomic_set(&dev->notify_count, 0);
rmnet_sdio_free_buf(dev);
/* cleanup work */
queue_work(dev->wq, &dev->disconnect_work);
dev->cbits_to_modem = 0;
queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
}
static void rmnet_close_sdio_work(struct work_struct *w)
{
struct rmnet_sdio_dev *dev;
unsigned long flags;
struct usb_cdc_notification *event;
int status;
struct rmnet_sdio_qmi_buf *qmi;
struct usb_request *req;
struct sk_buff *skb;
pr_debug("%s:\n", __func__);
dev = container_of(w, struct rmnet_sdio_dev, sdio_close_work);
if (!atomic_read(&dev->online))
return;
usb_ep_fifo_flush(dev->epnotify);
spin_lock_irqsave(&dev->lock, flags);
event = dev->notify_req->buf;
event->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
| USB_RECIP_INTERFACE;
event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
event->wValue = cpu_to_le16(0);
event->wIndex = cpu_to_le16(dev->ifc_id);
event->wLength = cpu_to_le16(0);
spin_unlock_irqrestore(&dev->lock, flags);
status = usb_ep_queue(dev->epnotify, dev->notify_req, GFP_KERNEL);
if (status < 0) {
if (!atomic_read(&dev->online))
return;
pr_err("%s: rmnet notify ep enqueue error %d\n",
__func__, status);
}
usb_ep_fifo_flush(dev->epout);
usb_ep_fifo_flush(dev->epin);
cancel_work_sync(&dev->data_rx_work);
spin_lock_irqsave(&dev->lock, flags);
if (!atomic_read(&dev->online)) {
spin_unlock_irqrestore(&dev->lock, flags);
return;
}
/* free all usb requests in tx pool */
while (!list_empty(&dev->tx_idle)) {
req = list_first_entry(&dev->tx_idle, struct usb_request, list);
list_del(&req->list);
dev->tx_idle_len--;
req->buf = NULL;
rmnet_sdio_free_req(dev->epout, req);
}
/* free all usb requests in rx pool */
while (!list_empty(&dev->rx_idle)) {
req = list_first_entry(&dev->rx_idle, struct usb_request, list);
list_del(&req->list);
dev->rx_idle_len--;
req->buf = NULL;
rmnet_sdio_free_req(dev->epin, req);
}
/* free all buffers in qmi request pool */
while (!list_empty(&dev->qmi_req_q)) {
qmi = list_first_entry(&dev->qmi_req_q,
struct rmnet_sdio_qmi_buf, list);
list_del(&qmi->list);
dev->qreq_q_len--;
rmnet_sdio_free_qmi(qmi);
}
/* free all buffers in qmi response pool */
while (!list_empty(&dev->qmi_resp_q)) {
qmi = list_first_entry(&dev->qmi_resp_q,
struct rmnet_sdio_qmi_buf, list);
list_del(&qmi->list);
dev->qresp_q_len--;
rmnet_sdio_free_qmi(qmi);
}
atomic_set(&dev->notify_count, 0);
pr_info("%s: setting notify count to zero\n", __func__);
while ((skb = __skb_dequeue(&dev->tx_skb_queue)))
dev_kfree_skb_any(skb);
while ((skb = __skb_dequeue(&dev->rx_skb_queue)))
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&dev->lock, flags);
}
static int rmnet_sdio_start_io(struct rmnet_sdio_dev *dev)
{
struct usb_request *req;
int ret, i;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (!atomic_read(&dev->online)) {
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) ||
!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) {
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
for (i = 0; i < RMNET_SDIO_RX_REQ_MAX; i++) {
req = rmnet_sdio_alloc_req(dev->epout, 0, GFP_ATOMIC);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
spin_unlock_irqrestore(&dev->lock, flags);
goto free_buf;
}
req->complete = rmnet_sdio_complete_epout;
list_add_tail(&req->list, &dev->rx_idle);
dev->rx_idle_len++;
}
for (i = 0; i < RMNET_SDIO_TX_REQ_MAX; i++) {
req = rmnet_sdio_alloc_req(dev->epin, 0, GFP_ATOMIC);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
spin_unlock_irqrestore(&dev->lock, flags);
goto free_buf;
}
req->complete = rmnet_sdio_complete_epin;
list_add_tail(&req->list, &dev->tx_idle);
dev->tx_idle_len++;
}
spin_unlock_irqrestore(&dev->lock, flags);
/* Queue Rx data requests */
rmnet_sdio_start_rx(dev);
return 0;
free_buf:
rmnet_sdio_free_buf(dev);
dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
return ret;
}
#define RMNET_SDIO_OPEN_RETRY_DELAY msecs_to_jiffies(2000)
#define SDIO_SDIO_OPEN_MAX_RETRY 90
static void rmnet_open_sdio_work(struct work_struct *w)
{
struct rmnet_sdio_dev *dev =
container_of(w, struct rmnet_sdio_dev,
sdio_open_work.work);
struct usb_composite_dev *cdev = dev->cdev;
int ret;
static int retry_cnt;
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) {
/* Control channel for QMI messages */
ret = sdio_cmux_open(rmnet_sdio_ctl_ch,
rmnet_sdio_ctl_receive_cb,
rmnet_sdio_ctl_write_done,
rmnet_sdio_sts_callback, dev);
if (!ret)
set_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status);
}
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
/* Data channel for network packets */
ret = msm_sdio_dmux_open(rmnet_sdio_data_ch, dev,
rmnet_sdio_data_receive_cb,
rmnet_sdio_data_write_done);
if (!ret)
set_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status);
}
if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status) &&
test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) {
rmnet_sdio_start_io(dev);
/* if usb cable is connected, update DTR status to modem */
if (atomic_read(&dev->online))
queue_work(dev->wq, &dev->set_modem_ctl_bits_work);
pr_info("%s: usb rmnet sdio channels are open retry_cnt:%d\n",
__func__, retry_cnt);
retry_cnt = 0;
return;
}
retry_cnt++;
pr_debug("%s: usb rmnet sdio open retry_cnt:%d\n",
__func__, retry_cnt);
if (retry_cnt > SDIO_SDIO_OPEN_MAX_RETRY) {
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status))
ERROR(cdev, "Unable to open control SDIO channel\n");
if (!test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status))
ERROR(cdev, "Unable to open DATA SDIO channel\n");
} else {
queue_delayed_work(dev->wq, &dev->sdio_open_work,
RMNET_SDIO_OPEN_RETRY_DELAY);
}
}
static int rmnet_sdio_set_alt(struct usb_function *f,
unsigned intf, unsigned alt)
{
struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
function);
struct usb_composite_dev *cdev = dev->cdev;
int ret = 0;
dev->epin->driver_data = dev;
usb_ep_enable(dev->epin, ep_choose(cdev->gadget,
&rmnet_sdio_hs_in_desc,
&rmnet_sdio_fs_in_desc));
dev->epout->driver_data = dev;
usb_ep_enable(dev->epout, ep_choose(cdev->gadget,
&rmnet_sdio_hs_out_desc,
&rmnet_sdio_fs_out_desc));
usb_ep_enable(dev->epnotify, ep_choose(cdev->gadget,
&rmnet_sdio_hs_notify_desc,
&rmnet_sdio_fs_notify_desc));
/* allocate notification */
dev->notify_req = rmnet_sdio_alloc_req(dev->epnotify,
RMNET_SDIO_MAX_NFY_SZE, GFP_ATOMIC);
if (IS_ERR(dev->notify_req)) {
ret = PTR_ERR(dev->notify_req);
pr_err("%s: unable to allocate memory for notify ep\n",
__func__);
return ret;
}
dev->notify_req->complete = rmnet_sdio_notify_complete;
dev->notify_req->context = dev;
dev->notify_req->length = RMNET_SDIO_MAX_NFY_SZE;
atomic_set(&dev->online, 1);
ret = rmnet_sdio_start_io(dev);
return ret;
}
static int rmnet_sdio_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
function);
int id;
struct usb_ep *ep;
dev->cdev = cdev;
/* allocate interface ID */
id = usb_interface_id(c, f);
if (id < 0)
return id;
dev->ifc_id = id;
rmnet_sdio_interface_desc.bInterfaceNumber = id;
ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_in_desc);
if (!ep)
goto out;
ep->driver_data = cdev; /* claim endpoint */
dev->epin = ep;
ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_out_desc);
if (!ep)
goto out;
ep->driver_data = cdev; /* claim endpoint */
dev->epout = ep;
ep = usb_ep_autoconfig(cdev->gadget, &rmnet_sdio_fs_notify_desc);
if (!ep)
goto out;
ep->driver_data = cdev; /* claim endpoint */
dev->epnotify = ep;
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
if (gadget_is_dualspeed(c->cdev->gadget)) {
rmnet_sdio_hs_in_desc.bEndpointAddress =
rmnet_sdio_fs_in_desc.bEndpointAddress;
rmnet_sdio_hs_out_desc.bEndpointAddress =
rmnet_sdio_fs_out_desc.bEndpointAddress;
rmnet_sdio_hs_notify_desc.bEndpointAddress =
rmnet_sdio_fs_notify_desc.bEndpointAddress;
}
queue_delayed_work(dev->wq, &dev->sdio_open_work, 0);
return 0;
out:
if (dev->epnotify)
dev->epnotify->driver_data = NULL;
if (dev->epout)
dev->epout->driver_data = NULL;
if (dev->epin)
dev->epin->driver_data = NULL;
return -ENODEV;
}
static void
rmnet_sdio_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct rmnet_sdio_dev *dev = container_of(f, struct rmnet_sdio_dev,
function);
cancel_delayed_work_sync(&dev->sdio_open_work);
destroy_workqueue(dev->wq);
dev->epout = dev->epin = dev->epnotify = NULL; /* release endpoints */
if (test_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status)) {
msm_sdio_dmux_close(rmnet_sdio_data_ch);
clear_bit(RMNET_SDIO_CH_OPEN, &dev->data_ch_status);
}
if (test_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status)) {
sdio_cmux_close(rmnet_sdio_ctl_ch);
clear_bit(RMNET_SDIO_CH_OPEN, &dev->ctrl_ch_status);
}
debugfs_remove_recursive(dev->dent);
kfree(dev);
}
#if defined(CONFIG_DEBUG_FS)
static ssize_t rmnet_sdio_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct rmnet_sdio_dev *dev = file->private_data;
char *buf;
unsigned long flags;
int ret;
buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL);
if (!buf)
return -ENOMEM;
spin_lock_irqsave(&dev->lock, flags);
ret = scnprintf(buf, PAGE_SIZE,
"-*-DATA-*-\n"
"dpkts_tohost:%lu epInPool:%u tx_size:%u drp_cnt:%lu\n"
"dpkts_tomodem:%lu epOutPool:%u rx_size:%u pending:%u\n"
"-*-QMI-*-\n"
"cpkts_tomodem:%lu qmi_req_q:%u cbits:%d\n"
"cpkts_tolaptop:%lu qmi_resp_q:%u notify_cnt:%d\n"
"-*-MISC-*-\n"
"data_ch_status: %lu ctrl_ch_status: %lu\n",
/* data */
dev->dpkt_tolaptop, dev->tx_idle_len,
dev->tx_skb_queue.qlen, dev->tx_drp_cnt,
dev->dpkt_tomodem, dev->rx_idle_len,
dev->rx_skb_queue.qlen, dev->dpkts_pending_atdmux,
/* qmi */
dev->cpkt_tomodem, dev->qreq_q_len,
dev->cbits_to_modem,
dev->cpkt_tolaptop, dev->qresp_q_len,
atomic_read(&dev->notify_count),
/* misc */
dev->data_ch_status, dev->ctrl_ch_status);
spin_unlock_irqrestore(&dev->lock, flags);
ret = simple_read_from_buffer(ubuf, count, ppos, buf, ret);
kfree(buf);
return ret;
}
static ssize_t rmnet_sdio_reset_stats(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct rmnet_sdio_dev *dev = file->private_data;
dev->dpkt_tolaptop = 0;
dev->dpkt_tomodem = 0;
dev->cpkt_tolaptop = 0;
dev->cpkt_tomodem = 0;
dev->dpkts_pending_atdmux = 0;
dev->tx_drp_cnt = 0;
/* TBD: How do we reset skb qlen
* it might have side effects
*/
return count;
}
static int debug_rmnet_sdio_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
const struct file_operations debug_rmnet_sdio_stats_ops = {
.open = debug_rmnet_sdio_open,
.read = rmnet_sdio_read_stats,
.write = rmnet_sdio_reset_stats,
};
static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev)
{
dev->dent = debugfs_create_dir("usb_rmnet_sdio", 0);
if (IS_ERR(dev->dent))
return;
debugfs_create_file("status", 0444, dev->dent, dev,
&debug_rmnet_sdio_stats_ops);
}
#else
static void rmnet_sdio_debugfs_init(struct rmnet_sdio_dev *dev)
{
return;
}
#endif
int rmnet_sdio_function_add(struct usb_configuration *c)
{
struct rmnet_sdio_dev *dev;
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->wq = create_singlethread_workqueue("k_rmnet_work");
if (!dev->wq) {
ret = -ENOMEM;
goto free_dev;
}
spin_lock_init(&dev->lock);
atomic_set(&dev->notify_count, 0);
atomic_set(&dev->online, 0);
INIT_WORK(&dev->disconnect_work, rmnet_sdio_disconnect_work);
INIT_WORK(&dev->set_modem_ctl_bits_work, rmnet_sdio_set_modem_cbits_w);
INIT_WORK(&dev->ctl_rx_work, rmnet_sdio_control_rx_work);
INIT_WORK(&dev->data_rx_work, rmnet_sdio_data_rx_work);
INIT_DELAYED_WORK(&dev->sdio_open_work, rmnet_open_sdio_work);
INIT_WORK(&dev->sdio_close_work, rmnet_close_sdio_work);
INIT_LIST_HEAD(&dev->qmi_req_q);
INIT_LIST_HEAD(&dev->qmi_resp_q);
INIT_LIST_HEAD(&dev->rx_idle);
INIT_LIST_HEAD(&dev->tx_idle);
skb_queue_head_init(&dev->tx_skb_queue);
skb_queue_head_init(&dev->rx_skb_queue);
dev->function.name = "rmnet_sdio";
dev->function.strings = rmnet_sdio_strings;
dev->function.descriptors = rmnet_sdio_fs_function;
dev->function.hs_descriptors = rmnet_sdio_hs_function;
dev->function.bind = rmnet_sdio_bind;
dev->function.unbind = rmnet_sdio_unbind;
dev->function.setup = rmnet_sdio_setup;
dev->function.set_alt = rmnet_sdio_set_alt;
dev->function.disable = rmnet_sdio_disable;
dev->function.suspend = rmnet_sdio_suspend;
ret = usb_add_function(c, &dev->function);
if (ret)
goto free_wq;
rmnet_sdio_debugfs_init(dev);
return 0;
free_wq:
destroy_workqueue(dev->wq);
free_dev:
kfree(dev);
return ret;
}
| gpl-2.0 |
HyochanPyo/kernel_3.18.9 | arch/powerpc/platforms/44x/ppc476.c | 328 | 7507 | /*
* PowerPC 476FPE board specific routines
*
* Copyright © 2013 Tony Breeds IBM Corporation
* Copyright © 2013 Alistair Popple IBM Corporation
*
* Based on earlier code:
* Matt Porter <mporter@kernel.crashing.org>
* Copyright 2002-2005 MontaVista Software Inc.
*
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
* Copyright (c) 2003-2005 Zultys Technologies
*
* Rewritten and ported to the merged powerpc tree:
* Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
* Copyright © 2011 David Kliekamp IBM Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/ppc4xx.h>
#include <asm/mpic.h>
#include <asm/mmu.h>
#include <linux/pci.h>
#include <linux/i2c.h>
static const struct of_device_id ppc47x_of_bus[] __initconst = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,plb6", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
/* The EEPROM is missing and the default values are bogus. This forces USB in
* to EHCI mode */
static void quirk_ppc_currituck_usb_fixup(struct pci_dev *dev)
{
if (of_machine_is_compatible("ibm,currituck")) {
pci_write_config_dword(dev, 0xe0, 0x0114231f);
pci_write_config_dword(dev, 0xe4, 0x00006c40);
}
}
DECLARE_PCI_FIXUP_HEADER(0x1033, 0x0035, quirk_ppc_currituck_usb_fixup);
/* Akebono has an AVR microcontroller attached to the I2C bus
* which is used to power off/reset the system. */
/* AVR I2C Commands */
#define AVR_PWRCTL_CMD (0x26)
/* Flags for the power control I2C commands */
#define AVR_PWRCTL_PWROFF (0x01)
#define AVR_PWRCTL_RESET (0x02)
static struct i2c_client *avr_i2c_client;
static void avr_halt_system(int pwrctl_flags)
{
/* Request the AVR to reset the system */
i2c_smbus_write_byte_data(avr_i2c_client,
AVR_PWRCTL_CMD, pwrctl_flags);
/* Wait for system to be reset */
while (1)
;
}
static void avr_power_off_system(void)
{
avr_halt_system(AVR_PWRCTL_PWROFF);
}
static void avr_reset_system(char *cmd)
{
avr_halt_system(AVR_PWRCTL_RESET);
}
static int avr_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
avr_i2c_client = client;
ppc_md.restart = avr_reset_system;
ppc_md.power_off = avr_power_off_system;
return 0;
}
static const struct i2c_device_id avr_id[] = {
{ "akebono-avr", 0 },
{ }
};
static struct i2c_driver avr_driver = {
.driver = {
.name = "akebono-avr",
},
.probe = avr_probe,
.id_table = avr_id,
};
static int __init ppc47x_device_probe(void)
{
i2c_add_driver(&avr_driver);
of_platform_bus_probe(NULL, ppc47x_of_bus, NULL);
return 0;
}
machine_device_initcall(ppc47x, ppc47x_device_probe);
static void __init ppc47x_init_irq(void)
{
struct device_node *np;
/* Find top level interrupt controller */
for_each_node_with_property(np, "interrupt-controller") {
if (of_get_property(np, "interrupts", NULL) == NULL)
break;
}
if (np == NULL)
panic("Can't find top level interrupt controller");
/* Check type and do appropriate initialization */
if (of_device_is_compatible(np, "chrp,open-pic")) {
/* The MPIC driver will get everything it needs from the
* device-tree, just pass 0 to all arguments
*/
struct mpic *mpic =
mpic_alloc(np, 0, MPIC_NO_RESET, 0, 0, " MPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
ppc_md.get_irq = mpic_get_irq;
} else
panic("Unrecognized top level interrupt controller");
}
#ifdef CONFIG_SMP
static void smp_ppc47x_setup_cpu(int cpu)
{
mpic_setup_this_cpu();
}
static int smp_ppc47x_kick_cpu(int cpu)
{
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
const u64 *spin_table_addr_prop;
u32 *spin_table;
extern void start_secondary_47x(void);
BUG_ON(cpunode == NULL);
/* Assume spin table. We could test for the enable-method in
* the device-tree but currently there's little point as it's
* our only supported method
*/
spin_table_addr_prop =
of_get_property(cpunode, "cpu-release-addr", NULL);
if (spin_table_addr_prop == NULL) {
pr_err("CPU%d: Can't start, missing cpu-release-addr !\n",
cpu);
return 1;
}
/* Assume it's mapped as part of the linear mapping. This is a bit
* fishy but will work fine for now
*
* XXX: Is there any reason to assume differently?
*/
spin_table = (u32 *)__va(*spin_table_addr_prop);
pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
spin_table[3] = cpu;
smp_wmb();
spin_table[1] = __pa(start_secondary_47x);
mb();
return 0;
}
static struct smp_ops_t ppc47x_smp_ops = {
.probe = smp_mpic_probe,
.message_pass = smp_mpic_message_pass,
.setup_cpu = smp_ppc47x_setup_cpu,
.kick_cpu = smp_ppc47x_kick_cpu,
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
};
static void __init ppc47x_smp_init(void)
{
if (mmu_has_feature(MMU_FTR_TYPE_47x))
smp_ops = &ppc47x_smp_ops;
}
#else /* CONFIG_SMP */
static void __init ppc47x_smp_init(void) { }
#endif /* CONFIG_SMP */
static void __init ppc47x_setup_arch(void)
{
/* No need to check the DMA config as we /know/ our windows are all of
* RAM. Lets hope that doesn't change */
swiotlb_detect_4g();
ppc47x_smp_init();
}
static int board_rev = -1;
static int __init ppc47x_get_board_rev(void)
{
int reg;
u8 *fpga;
struct device_node *np = NULL;
if (of_machine_is_compatible("ibm,currituck")) {
np = of_find_compatible_node(NULL, NULL, "ibm,currituck-fpga");
reg = 0;
} else if (of_machine_is_compatible("ibm,akebono")) {
np = of_find_compatible_node(NULL, NULL, "ibm,akebono-fpga");
reg = 2;
}
if (!np)
goto fail;
fpga = (u8 *) of_iomap(np, 0);
of_node_put(np);
if (!fpga)
goto fail;
board_rev = ioread8(fpga + reg) & 0x03;
pr_info("%s: Found board revision %d\n", __func__, board_rev);
iounmap(fpga);
return 0;
fail:
pr_info("%s: Unable to find board revision\n", __func__);
return 0;
}
machine_arch_initcall(ppc47x, ppc47x_get_board_rev);
/* Use USB controller should have been hardware swizzled but it wasn't :( */
static void ppc47x_pci_irq_fixup(struct pci_dev *dev)
{
if (dev->vendor == 0x1033 && (dev->device == 0x0035 ||
dev->device == 0x00e0)) {
if (board_rev == 0) {
dev->irq = irq_create_mapping(NULL, 47);
pr_info("%s: Mapping irq %d\n", __func__, dev->irq);
} else if (board_rev == 2) {
dev->irq = irq_create_mapping(NULL, 49);
pr_info("%s: Mapping irq %d\n", __func__, dev->irq);
} else {
pr_alert("%s: Unknown board revision\n", __func__);
}
}
}
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init ppc47x_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "ibm,akebono"))
return 1;
if (of_flat_dt_is_compatible(root, "ibm,currituck")) {
ppc_md.pci_irq_fixup = ppc47x_pci_irq_fixup;
return 1;
}
return 0;
}
define_machine(ppc47x) {
.name = "PowerPC 47x",
.probe = ppc47x_probe,
.progress = udbg_progress,
.init_IRQ = ppc47x_init_irq,
.setup_arch = ppc47x_setup_arch,
.restart = ppc4xx_reset_system,
.calibrate_decr = generic_calibrate_decr,
};
| gpl-2.0 |
bradfa/linux | arch/sh/mm/kmap.c | 584 | 1703 | /*
* arch/sh/mm/kmap.c
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt
*
* Released under the terms of the GNU GPL v2.0.
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#define kmap_get_fixmap_pte(vaddr) \
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
static pte_t *kmap_coherent_pte;
void __init kmap_coherent_init(void)
{
unsigned long vaddr;
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
}
void *kmap_coherent(struct page *page, unsigned long addr)
{
enum fixed_addresses idx;
unsigned long vaddr;
BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
preempt_disable();
pagefault_disable();
idx = FIX_CMAP_END -
(((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) +
(FIX_N_COLOURS * smp_processor_id()));
vaddr = __fix_to_virt(idx);
BUG_ON(!pte_none(*(kmap_coherent_pte - idx)));
set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL));
return (void *)vaddr;
}
void kunmap_coherent(void *kvaddr)
{
if (kvaddr >= (void *)FIXADDR_START) {
unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
enum fixed_addresses idx = __virt_to_fix(vaddr);
/* XXX.. Kill this later, here for sanity at the moment.. */
__flush_purge_region((void *)vaddr, PAGE_SIZE);
pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
local_flush_tlb_one(get_asid(), vaddr);
}
pagefault_enable();
preempt_enable();
}
| gpl-2.0 |
djmax81/Suemax-kernel_Exynos5433_new | drivers/pci/quirks.c | 584 | 115696 | /*
* This file contains work-arounds for many known PCI hardware
* bugs. Devices present only on certain architectures (host
* bridges et cetera) should be handled in arch-specific code.
*
* Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
*
* Copyright (c) 1999 Martin Mares <mj@ucw.cz>
*
* Init/reset quirks for USB host controllers should be in the
* USB quirks file, where their drivers can access reuse it.
*
* The bridge optimization stuff has been removed. If you really
* have a silly BIOS which is unable to set your host bridge right,
* use the PowerTweak utility (see http://powertweak.sourceforge.net).
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/acpi.h>
#include <linux/kallsyms.h>
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/ktime.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
/*
* Decoding should be disabled for a PCI device during BAR sizing to avoid
* conflict. But doing so may cause problems on host bridge and perhaps other
* key system devices. For devices that need to have mmio decoding always-on,
* we need to set the dev->mmio_always_on bit.
*/
static void quirk_mmio_always_on(struct pci_dev *dev)
{
dev->mmio_always_on = 1;
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
/* The Mellanox Tavor device gives false positive parity errors
* Mark this device with a broken_parity_status, to allow
* PCI scanning code to "skip" this now blacklisted device.
*/
static void quirk_mellanox_tavor(struct pci_dev *dev)
{
dev->broken_parity_status = 1; /* This device gives false positives */
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor);
/* Deal with broken BIOS'es that neglect to enable passive release,
which can cause problems in combination with the 82441FX/PPro MTRRs */
static void quirk_passive_release(struct pci_dev *dev)
{
struct pci_dev *d = NULL;
unsigned char dlc;
/* We have to make sure a particular bit is set in the PIIX3
ISA bridge, so we have to go out and find it. */
while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
pci_read_config_byte(d, 0x82, &dlc);
if (!(dlc & 1<<1)) {
dev_info(&d->dev, "PIIX3: Enabling Passive Release\n");
dlc |= 1<<1;
pci_write_config_byte(d, 0x82, dlc);
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
/* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
but VIA don't answer queries. If you happen to have good contacts at VIA
ask them for me please -- Alan
This appears to be BIOS not version dependent. So presumably there is a
chipset level fix */
static void quirk_isa_dma_hangs(struct pci_dev *dev)
{
if (!isa_dma_bridge_buggy) {
isa_dma_bridge_buggy=1;
dev_info(&dev->dev, "Activating ISA DMA hang workarounds\n");
}
}
/*
* Its not totally clear which chipsets are the problematic ones
* We know 82C586 and 82C596 variants are affected.
*/
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
/*
* Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear
* for some HT machines to use C4 w/o hanging.
*/
static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
{
u32 pmbase;
u16 pm1a;
pci_read_config_dword(dev, 0x40, &pmbase);
pmbase = pmbase & 0xff80;
pm1a = inw(pmbase);
if (pm1a & 0x10) {
dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
outw(0x10, pmbase);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
/*
* Chipsets where PCI->PCI transfers vanish or hang
*/
static void quirk_nopcipci(struct pci_dev *dev)
{
if ((pci_pci_problems & PCIPCI_FAIL)==0) {
dev_info(&dev->dev, "Disabling direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_FAIL;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
static void quirk_nopciamd(struct pci_dev *dev)
{
u8 rev;
pci_read_config_byte(dev, 0x08, &rev);
if (rev == 0x13) {
/* Erratum 24 */
dev_info(&dev->dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
pci_pci_problems |= PCIAGP_FAIL;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
/*
* Triton requires workarounds to be used by the drivers
*/
static void quirk_triton(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_TRITON)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_TRITON;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
/*
* VIA Apollo KT133 needs PCI latency patch
* Made according to a windows driver based patch by George E. Breese
* see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
* and http://www.georgebreese.com/net/software/#PCI
* Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
* the info on which Mr Breese based his work.
*
* Updated based on further information from the site and also on
* information provided by VIA
*/
static void quirk_vialatency(struct pci_dev *dev)
{
struct pci_dev *p;
u8 busarb;
/* Ok we have a potential problem chipset here. Now see if we have
a buggy southbridge */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
if (p!=NULL) {
/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
/* Check for buggy part revisions */
if (p->revision < 0x40 || p->revision > 0x42)
goto exit;
} else {
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
if (p==NULL) /* No problem parts */
goto exit;
/* Check for buggy part revisions */
if (p->revision < 0x10 || p->revision > 0x12)
goto exit;
}
/*
* Ok we have the problem. Now set the PCI master grant to
* occur every master grant. The apparent bug is that under high
* PCI load (quite common in Linux of course) you can get data
* loss when the CPU is held off the bus for 3 bus master requests
* This happens to include the IDE controllers....
*
* VIA only apply this fix when an SB Live! is present but under
* both Linux and Windows this isn't enough, and we have seen
* corruption without SB Live! but with things like 3 UDMA IDE
* controllers. So we ignore that bit of the VIA recommendation..
*/
pci_read_config_byte(dev, 0x76, &busarb);
/* Set bit 4 and bi 5 of byte 76 to 0x01
"Master priority rotation on every PCI master grant */
busarb &= ~(1<<5);
busarb |= (1<<4);
pci_write_config_byte(dev, 0x76, busarb);
dev_info(&dev->dev, "Applying VIA southbridge workaround\n");
exit:
pci_dev_put(p);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
/* Must restore this on a resume from RAM */
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
/*
* VIA Apollo VP3 needs ETBF on BT848/878
*/
static void quirk_viaetbf(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_VIAETBF)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VIAETBF;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
static void quirk_vsfx(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_VSFX)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_VSFX;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
/*
* Ali Magik requires workarounds to be used by the drivers
* that DMA to AGP space. Latency must be set to 0xA and triton
* workaround applied too
* [Info kindly provided by ALi]
*/
static void quirk_alimagik(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
/*
* Natoma has some interesting boundary conditions with Zoran stuff
* at least
*/
static void quirk_natoma(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_NATOMA)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
pci_pci_problems |= PCIPCI_NATOMA;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
/*
* This chip can cause PCI parity errors if config register 0xA0 is read
* while DMAs are occurring.
*/
static void quirk_citrine(struct pci_dev *dev)
{
dev->cfg_size = 0xA0;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
/*
* S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
* If it's needed, re-allocate the region.
*/
static void quirk_s3_64M(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
r->start = 0;
r->end = 0x3ffffff;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
/*
* Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS
* ver. 1.33 20070103) don't set the correct ISA PCI region header info.
* BAR0 should be 8 bytes; instead, it may be set to something like 8k
* (which conflicts w/ BAR1's memory range).
*/
static void quirk_cs5536_vsa(struct pci_dev *dev)
{
if (pci_resource_len(dev, 0) != 8) {
struct resource *res = &dev->resource[0];
res->end = res->start + 8 - 1;
dev_info(&dev->dev, "CS5536 ISA bridge bug detected "
"(incorrect header); workaround applied.\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
static void quirk_io_region(struct pci_dev *dev, int port,
unsigned size, int nr, const char *name)
{
u16 region;
struct pci_bus_region bus_region;
struct resource *res = dev->resource + nr;
pci_read_config_word(dev, port, ®ion);
region &= ~(size - 1);
if (!region)
return;
res->name = pci_name(dev);
res->flags = IORESOURCE_IO;
/* Convert from PCI bus to resource space */
bus_region.start = region;
bus_region.end = region + size - 1;
pcibios_bus_to_resource(dev, res, &bus_region);
if (!pci_claim_resource(dev, nr))
dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
}
/*
* ATI Northbridge setups MCE the processor if you even
* read somewhere between 0x3b0->0x3bb or read 0x3d3
*/
static void quirk_ati_exploding_mce(struct pci_dev *dev)
{
dev_info(&dev->dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
/* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
request_region(0x3b0, 0x0C, "RadeonIGP");
request_region(0x3d3, 0x01, "RadeonIGP");
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
/*
* Let's make the southbridge information explicit instead
* of having to worry about people probing the ACPI areas,
* for example.. (Yes, it happens, and if you read the wrong
* ACPI register it will put the machine to sleep with no
* way of waking it up again. Bummer).
*
* ALI M7101: Two IO regions pointed to by words at
* 0xE0 (64 bytes of ACPI registers)
* 0xE2 (32 bytes of SMB registers)
*/
static void quirk_ali7101_acpi(struct pci_dev *dev)
{
quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
{
u32 devres;
u32 mask, size, base;
pci_read_config_dword(dev, port, &devres);
if ((devres & enable) != enable)
return;
mask = (devres >> 16) & 15;
base = devres & 0xffff;
size = 16;
for (;;) {
unsigned bit = size >> 1;
if ((bit & mask) == bit)
break;
size = bit;
}
/*
* For now we only print it out. Eventually we'll want to
* reserve it (at least if it's in the 0x1000+ range), but
* let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
}
static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
{
u32 devres;
u32 mask, size, base;
pci_read_config_dword(dev, port, &devres);
if ((devres & enable) != enable)
return;
base = devres & 0xffff0000;
mask = (devres & 0x3f) << 16;
size = 128 << 16;
for (;;) {
unsigned bit = size >> 1;
if ((bit & mask) == bit)
break;
size = bit;
}
/*
* For now we only print it out. Eventually we'll want to
* reserve it, but let's get enough confirmation reports first.
*/
base &= -size;
dev_info(&dev->dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
}
/*
* PIIX4 ACPI: Two IO regions pointed to by longwords at
* 0x40 (64 bytes of ACPI registers)
* 0x90 (16 bytes of SMB registers)
* and a few strange programmable PIIX4 device resources.
*/
static void quirk_piix4_acpi(struct pci_dev *dev)
{
u32 res_a;
quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
/* Device resource A has enables for some of the other ones */
pci_read_config_dword(dev, 0x5c, &res_a);
piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
/* Device resource D is just bitfields for static resources */
/* Device 12 enabled? */
if (res_a & (1 << 29)) {
piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
}
/* Device 13 enabled? */
if (res_a & (1 << 30)) {
piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
}
piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
#define ICH_PMBASE 0x40
#define ICH_ACPI_CNTL 0x44
#define ICH4_ACPI_EN 0x10
#define ICH6_ACPI_EN 0x80
#define ICH4_GPIOBASE 0x58
#define ICH4_GPIO_CNTL 0x5c
#define ICH4_GPIO_EN 0x10
#define ICH6_GPIOBASE 0x48
#define ICH6_GPIO_CNTL 0x4c
#define ICH6_GPIO_EN 0x10
/*
* ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
* 0x40 (128 bytes of ACPI, GPIO & TCO registers)
* 0x58 (64 bytes of GPIO I/O space)
*/
static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
{
u8 enable;
/*
* The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict
* with low legacy (and fixed) ports. We don't know the decoding
* priority and can't tell whether the legacy device or the one created
* here is really at that address. This happens on boards with broken
* BIOSes.
*/
pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
if (enable & ICH4_ACPI_EN)
quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
"ICH4 ACPI/GPIO/TCO");
pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
if (enable & ICH4_GPIO_EN)
quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
"ICH4 GPIO");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
{
u8 enable;
pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
if (enable & ICH6_ACPI_EN)
quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
"ICH6 ACPI/GPIO/TCO");
pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
if (enable & ICH6_GPIO_EN)
quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
"ICH6 GPIO");
}
static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name, int dynsize)
{
u32 val;
u32 size, base;
pci_read_config_dword(dev, reg, &val);
/* Enabled? */
if (!(val & 1))
return;
base = val & 0xfffc;
if (dynsize) {
/*
* This is not correct. It is 16, 32 or 64 bytes depending on
* register D31:F0:ADh bits 5:4.
*
* But this gets us at least _part_ of it.
*/
size = 16;
} else {
size = 128;
}
base &= ~(size-1);
/* Just print it out for now. We should reserve it after more debugging */
dev_info(&dev->dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
}
static void quirk_ich6_lpc(struct pci_dev *dev)
{
/* Shared ACPI/GPIO decode with all ICH6+ */
ich6_lpc_acpi_gpio(dev);
/* ICH6-specific generic IO decode */
ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg, const char *name)
{
u32 val;
u32 mask, base;
pci_read_config_dword(dev, reg, &val);
/* Enabled? */
if (!(val & 1))
return;
/*
* IO base in bits 15:2, mask in bits 23:18, both
* are dword-based
*/
base = val & 0xfffc;
mask = (val >> 16) & 0xfc;
mask |= 3;
/* Just print it out for now. We should reserve it after more debugging */
dev_info(&dev->dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
}
/* ICH7-10 has the same common LPC generic IO decode registers */
static void quirk_ich7_lpc(struct pci_dev *dev)
{
/* We share the common ACPI/GPIO decode with ICH6 */
ich6_lpc_acpi_gpio(dev);
/* And have 4 ICH7+ generic decodes */
ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
/*
* VIA ACPI: One IO region pointed to by longword at
* 0x48 or 0x20 (256 bytes of ACPI registers)
*/
static void quirk_vt82c586_acpi(struct pci_dev *dev)
{
if (dev->revision & 0x10)
quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
"vt82c586 ACPI");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
/*
* VIA VT82C686 ACPI: Three IO region pointed to by (long)words at
* 0x48 (256 bytes of ACPI registers)
* 0x70 (128 bytes of hardware monitoring register)
* 0x90 (16 bytes of SMB registers)
*/
static void quirk_vt82c686_acpi(struct pci_dev *dev)
{
quirk_vt82c586_acpi(dev);
quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
"vt82c686 HW-mon");
quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
/*
* VIA VT8235 ISA Bridge: Two IO regions pointed to by words at
* 0x88 (128 bytes of power management registers)
* 0xd0 (16 bytes of SMB registers)
*/
static void quirk_vt8235_acpi(struct pci_dev *dev)
{
quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
/*
* TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast back-to-back:
* Disable fast back-to-back on the secondary bus segment
*/
static void quirk_xio2000a(struct pci_dev *dev)
{
struct pci_dev *pdev;
u16 command;
dev_warn(&dev->dev, "TI XIO2000a quirk detected; "
"secondary bus fast back-to-back transfers disabled\n");
list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
pci_read_config_word(pdev, PCI_COMMAND, &command);
if (command & PCI_COMMAND_FAST_BACK)
pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
quirk_xio2000a);
#ifdef CONFIG_X86_IO_APIC
#include <asm/io_apic.h>
/*
* VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
* devices to the external APIC.
*
* TODO: When we have device-specific interrupt routers,
* this code will go away from quirks.
*/
static void quirk_via_ioapic(struct pci_dev *dev)
{
u8 tmp;
if (nr_ioapics < 1)
tmp = 0; /* nothing routed to external APIC */
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
dev_info(&dev->dev, "%sbling VIA external APIC routing\n",
tmp == 0 ? "Disa" : "Ena");
/* Offset 0x58: External APIC IRQ output control */
pci_write_config_byte (dev, 0x58, tmp);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
/*
* VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
* This leads to doubled level interrupt rates.
* Set this bit to get rid of cycle wastage.
* Otherwise uncritical.
*/
static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
{
u8 misc_control2;
#define BYPASS_APIC_DEASSERT 8
pci_read_config_byte(dev, 0x5B, &misc_control2);
if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
dev_info(&dev->dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
/*
* The AMD io apic can hang the box when an apic irq is masked.
* We check all revs >= B0 (yet not in the pre production!) as the bug
* is currently marked NoFix
*
* We have multiple reports of hangs with this chipset that went away with
* noapic specified. For the moment we assume it's the erratum. We may be wrong
* of course. However the advice is demonstrably good even if so..
*/
static void quirk_amd_ioapic(struct pci_dev *dev)
{
if (dev->revision >= 0x02) {
dev_warn(&dev->dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
dev_warn(&dev->dev, " : booting with the \"noapic\" option\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
static void quirk_ioapic_rmw(struct pci_dev *dev)
{
if (dev->devfn == 0 && dev->bus->number == 0)
sis_apic_bug = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
#endif /* CONFIG_X86_IO_APIC */
/*
* Some settings of MMRBC can lead to data corruption so block changes.
* See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
*/
static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
{
if (dev->subordinate && dev->revision <= 0x12) {
dev_info(&dev->dev, "AMD8131 rev %x detected; "
"disabling PCI-X MMRBC\n", dev->revision);
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
/*
* FIXME: it is questionable that quirk_via_acpi
* is needed. It shows up as an ISA bridge, and does not
* support the PCI_INTERRUPT_LINE register at all. Therefore
* it seems like setting the pci_dev's 'irq' to the
* value of the ACPI SCI interrupt is only done for convenience.
* -jgarzik
*/
static void quirk_via_acpi(struct pci_dev *d)
{
/*
* VIA ACPI device: SCI IRQ line in PCI config byte 0x42
*/
u8 irq;
pci_read_config_byte(d, 0x42, &irq);
irq &= 0xf;
if (irq && (irq != 2))
d->irq = irq;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
/*
* VIA bridges which have VLink
*/
static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
static void quirk_via_bridge(struct pci_dev *dev)
{
/* See what bridge we have and find the device ranges */
switch (dev->device) {
case PCI_DEVICE_ID_VIA_82C686:
/* The VT82C686 is special, it attaches to PCI and can have
any device number. All its subdevices are functions of
that single device. */
via_vlink_dev_lo = PCI_SLOT(dev->devfn);
via_vlink_dev_hi = PCI_SLOT(dev->devfn);
break;
case PCI_DEVICE_ID_VIA_8237:
case PCI_DEVICE_ID_VIA_8237A:
via_vlink_dev_lo = 15;
break;
case PCI_DEVICE_ID_VIA_8235:
via_vlink_dev_lo = 16;
break;
case PCI_DEVICE_ID_VIA_8231:
case PCI_DEVICE_ID_VIA_8233_0:
case PCI_DEVICE_ID_VIA_8233A:
case PCI_DEVICE_ID_VIA_8233C_0:
via_vlink_dev_lo = 17;
break;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
/**
* quirk_via_vlink - VIA VLink IRQ number update
* @dev: PCI device
*
* If the device we are dealing with is on a PIC IRQ we need to
* ensure that the IRQ line register which usually is not relevant
* for PCI cards, is actually written so that interrupts get sent
* to the right place.
* We only do this on systems where a VIA south bridge was detected,
* and only for VIA devices on the motherboard (see quirk_via_bridge
* above).
*/
static void quirk_via_vlink(struct pci_dev *dev)
{
u8 irq, new_irq;
/* Check if we have VLink at all */
if (via_vlink_dev_lo == -1)
return;
new_irq = dev->irq;
/* Don't quirk interrupts outside the legacy IRQ range */
if (!new_irq || new_irq > 15)
return;
/* Internal device ? */
if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
return;
/* This is an internal VLink device on a PIC interrupt. The BIOS
ought to have set this but may not have, so we redo it */
pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
if (new_irq != irq) {
dev_info(&dev->dev, "VIA VLink IRQ fixup, from %d to %d\n",
irq, new_irq);
udelay(15); /* unknown if delay really needed */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
/*
* VIA VT82C598 has its device ID settable and many BIOSes
* set it to the ID of VT82C597 for backward compatibility.
* We need to switch it off to be able to recognize the real
* type of the chip.
*/
static void quirk_vt82c598_id(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0xfc, 0);
pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
/*
* CardBus controllers have a legacy base address that enables them
* to respond as i82365 pcmcia controllers. We don't want them to
* do this even if the Linux CardBus driver is not loaded, because
* the Linux i82365 driver does not (and should not) handle CardBus.
*/
static void quirk_cardbus_legacy(struct pci_dev *dev)
{
pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
/*
* Following the PCI ordering rules is optional on the AMD762. I'm not
* sure what the designers were smoking but let's not inhale...
*
* To be fair to AMD, it follows the spec by default, its BIOS people
* who turn it off!
*/
static void quirk_amd_ordering(struct pci_dev *dev)
{
u32 pcic;
pci_read_config_dword(dev, 0x4C, &pcic);
if ((pcic&6)!=6) {
pcic |= 6;
dev_warn(&dev->dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
pci_write_config_dword(dev, 0x4C, pcic);
pci_read_config_dword(dev, 0x84, &pcic);
pcic |= (1<<23); /* Required in this mode */
pci_write_config_dword(dev, 0x84, pcic);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
/*
* DreamWorks provided workaround for Dunord I-3000 problem
*
* This card decodes and responds to addresses not apparently
* assigned to it. We force a larger allocation to ensure that
* nothing gets put too close to it.
*/
static void quirk_dunord(struct pci_dev *dev)
{
struct resource *r = &dev->resource [1];
r->start = 0;
r->end = 0xffffff;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
/*
* i82380FB mobile docking controller: its PCI-to-PCI bridge
* is subtractive decoding (transparent), and does indicate this
* in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
* instead of 0x01.
*/
static void quirk_transparent_bridge(struct pci_dev *dev)
{
dev->transparent = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
/*
* Common misconfiguration of the MediaGX/Geode PCI master that will
* reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1
* datasheets found at http://www.national.com/analog for info on what
* these bits do. <christer@weinigel.se>
*/
static void quirk_mediagx_master(struct pci_dev *dev)
{
u8 reg;
pci_read_config_byte(dev, 0x41, ®);
if (reg & 2) {
reg &= ~2;
dev_info(&dev->dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg);
pci_write_config_byte(dev, 0x41, reg);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
/*
* Ensure C0 rev restreaming is off. This is normally done by
* the BIOS but in the odd case it is not the results are corruption
* hence the presence of a Linux check
*/
static void quirk_disable_pxb(struct pci_dev *pdev)
{
u16 config;
if (pdev->revision != 0x04) /* Only C0 requires this */
return;
pci_read_config_word(pdev, 0x40, &config);
if (config & (1<<6)) {
config &= ~(1<<6);
pci_write_config_word(pdev, 0x40, config);
dev_info(&pdev->dev, "C0 revision 450NX. Disabling PCI restreaming\n");
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
static void quirk_amd_ide_mode(struct pci_dev *pdev)
{
/* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
u8 tmp;
pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
if (tmp == 0x01) {
pci_read_config_byte(pdev, 0x40, &tmp);
pci_write_config_byte(pdev, 0x40, tmp|1);
pci_write_config_byte(pdev, 0x9, 1);
pci_write_config_byte(pdev, 0xa, 6);
pci_write_config_byte(pdev, 0x40, tmp);
pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
dev_info(&pdev->dev, "set SATA to AHCI mode\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
/*
* Serverworks CSB5 IDE does not fully support native mode
*/
static void quirk_svwks_csb5ide(struct pci_dev *pdev)
{
u8 prog;
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
if (prog & 5) {
prog &= ~5;
pdev->class &= ~5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
/* PCI layer will sort out resources */
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
/*
* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
*/
static void quirk_ide_samemode(struct pci_dev *pdev)
{
u8 prog;
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
dev_info(&pdev->dev, "IDE mode mismatch; forcing legacy mode\n");
prog &= ~5;
pdev->class &= ~5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
/*
* Some ATA devices break if put into D3
*/
static void quirk_no_ata_d3(struct pci_dev *pdev)
{
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
}
/* Quirk the legacy ATA devices only. The AHCI ones are ok */
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* ALi loses some register settings that we cannot then restore */
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* VIA comes back fine but we need to keep it alive or ACPI GTM failures
occur when mode detecting */
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
*/
static void quirk_eisa_bridge(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_EISA << 8;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
/*
* On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
* is not activated. The myth is that Asus said that they do not want the
* users to be irritated by just another PCI Device in the Win98 device
* manager. (see the file prog/hotplug/README.p4b in the lm_sensors
* package 2.7.0 for details)
*
* The SMBus PCI Device can be activated by setting a bit in the ICH LPC
* bridge. Unfortunately, this device has no subvendor/subdevice ID. So it
* becomes necessary to do this tweak in two steps -- the chosen trigger
* is either the Host bridge (preferred) or on-board VGA controller.
*
* Note that we used to unhide the SMBus that way on Toshiba laptops
* (Satellite A40 and Tecra M2) but then found that the thermal management
* was done by SMM code, which could cause unsynchronized concurrent
* accesses to the SMBus registers, with potentially bad effects. Thus you
* should be very careful when adding new entries: if SMM is accessing the
* Intel SMBus, this is a very good reason to leave it hidden.
*
* Likewise, many recent laptops use ACPI for thermal management. If the
* ACPI DSDT code accesses the SMBus, then Linux should not access it
* natively, and keeping the SMBus hidden is the right thing to do. If you
* are about to add an entry in the table below, please first disassemble
* the DSDT and double-check that there is no code accessing the SMBus.
*/
static int asus_hides_smbus;
static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
{
if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
switch(dev->subsystem_device) {
case 0x8025: /* P4B-LX */
case 0x8070: /* P4B */
case 0x8088: /* P4B533 */
case 0x1626: /* L3C notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
switch(dev->subsystem_device) {
case 0x80b1: /* P4GE-V */
case 0x80b2: /* P4PE */
case 0x8093: /* P4B533-V */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
switch(dev->subsystem_device) {
case 0x8030: /* P4T533 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
switch (dev->subsystem_device) {
case 0x8070: /* P4G8X Deluxe */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
switch (dev->subsystem_device) {
case 0x80c9: /* PU-DLS */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
switch (dev->subsystem_device) {
case 0x1751: /* M2N notebook */
case 0x1821: /* M5N notebook */
case 0x1897: /* A6L notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch (dev->subsystem_device) {
case 0x184b: /* W1N notebook */
case 0x186a: /* M6Ne notebook */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
switch (dev->subsystem_device) {
case 0x80f2: /* P4P800-X */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
switch (dev->subsystem_device) {
case 0x1882: /* M6V notebook */
case 0x1977: /* A6VA notebook */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
case 0x088C: /* HP Compaq nc8000 */
case 0x0890: /* HP Compaq nc6000 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
switch (dev->subsystem_device) {
case 0x12bc: /* HP D330L */
case 0x12bd: /* HP D530 */
case 0x006a: /* HP Compaq nx9500 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
switch (dev->subsystem_device) {
case 0x12bf: /* HP xw4100 */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
case 0xC00C: /* Samsung P35 notebook */
asus_hides_smbus = 1;
}
} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
switch(dev->subsystem_device) {
case 0x0058: /* Compaq Evo N620c */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
switch(dev->subsystem_device) {
case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs, therefore checking
* its on-board VGA controller */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
switch(dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
case 0x00ba: /* Compaq Evo D510 USDT */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs and on-board VGA
* controller is disabled if an AGP card is
* inserted, therefore checking USB UHCI
* Controller #1 */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
switch (dev->subsystem_device) {
case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
/* Motherboard doesn't have host bridge
* subvendor/subdevice IDs, therefore checking
* its on-board VGA controller */
asus_hides_smbus = 1;
}
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
static void asus_hides_smbus_lpc(struct pci_dev *dev)
{
u16 val;
if (likely(!asus_hides_smbus))
return;
pci_read_config_word(dev, 0xF2, &val);
if (val & 0x8) {
pci_write_config_word(dev, 0xF2, val & (~0x8));
pci_read_config_word(dev, 0xF2, &val);
if (val & 0x8)
dev_info(&dev->dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val);
else
dev_info(&dev->dev, "Enabled i801 SMBus device\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
/* It appears we just have one such device. If not, we have a warning */
static void __iomem *asus_rcba_base;
static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
{
u32 rcba;
if (likely(!asus_hides_smbus))
return;
WARN_ON(asus_rcba_base);
pci_read_config_dword(dev, 0xF0, &rcba);
/* use bits 31:14, 16 kB aligned */
asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
if (asus_rcba_base == NULL)
return;
}
static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
{
u32 val;
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
/* read the Function Disable register, dword mode only */
val = readl(asus_rcba_base + 0x3418);
writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
}
static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
{
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
iounmap(asus_rcba_base);
asus_rcba_base = NULL;
dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
}
static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
{
asus_hides_smbus_lpc_ich6_suspend(dev);
asus_hides_smbus_lpc_ich6_resume_early(dev);
asus_hides_smbus_lpc_ich6_resume(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
/*
* SiS 96x south bridge: BIOS typically hides SMBus device...
*/
static void quirk_sis_96x_smbus(struct pci_dev *dev)
{
u8 val = 0;
pci_read_config_byte(dev, 0x77, &val);
if (val & 0x10) {
dev_info(&dev->dev, "Enabling SiS 96x SMBus\n");
pci_write_config_byte(dev, 0x77, val & ~0x10);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
/*
* ... This is further complicated by the fact that some SiS96x south
* bridges pretend to be 85C503/5513 instead. In that case see if we
* spotted a compatible north bridge to make sure.
* (pci_find_device doesn't work yet)
*
* We can also enable the sis96x bit in the discovery register..
*/
#define SIS_DETECT_REGISTER 0x40
static void quirk_sis_503(struct pci_dev *dev)
{
u8 reg;
u16 devid;
pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
return;
}
/*
* Ok, it now shows up as a 96x.. run the 96x quirk by
* hand in case it has already been processed.
* (depends on link order, which is apparently not guaranteed)
*/
dev->device = devid;
quirk_sis_96x_smbus(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
/*
* On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller
* and MC97 modem controller are disabled when a second PCI soundcard is
* present. This patch, tweaking the VT8237 ISA bridge, enables them.
* -- bjd
*/
static void asus_hides_ac97_lpc(struct pci_dev *dev)
{
u8 val;
int asus_hides_ac97 = 0;
if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_VIA_8237)
asus_hides_ac97 = 1;
}
if (!asus_hides_ac97)
return;
pci_read_config_byte(dev, 0x50, &val);
if (val & 0xc0) {
pci_write_config_byte(dev, 0x50, val & (~0xc0));
pci_read_config_byte(dev, 0x50, &val);
if (val & 0xc0)
dev_info(&dev->dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val);
else
dev_info(&dev->dev, "Enabled onboard AC97/MC97 devices\n");
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
/*
* If we are using libata we can drive this chip properly but must
* do this early on to make the additional device appear during
* the PCI scanning.
*/
static void quirk_jmicron_ata(struct pci_dev *pdev)
{
u32 conf1, conf5, class;
u8 hdr;
/* Only poke fn 0 */
if (PCI_FUNC(pdev->devfn))
return;
pci_read_config_dword(pdev, 0x40, &conf1);
pci_read_config_dword(pdev, 0x80, &conf5);
conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */
conf5 &= ~(1 << 24); /* Clear bit 24 */
switch (pdev->device) {
case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */
case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */
case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */
/* The controller should be in single function ahci mode */
conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */
break;
case PCI_DEVICE_ID_JMICRON_JMB365:
case PCI_DEVICE_ID_JMICRON_JMB366:
/* Redirect IDE second PATA port to the right spot */
conf5 |= (1 << 24);
/* Fall through */
case PCI_DEVICE_ID_JMICRON_JMB361:
case PCI_DEVICE_ID_JMICRON_JMB363:
case PCI_DEVICE_ID_JMICRON_JMB369:
/* Enable dual function mode, AHCI on fn 0, IDE fn1 */
/* Set the class codes correctly and then direct IDE 0 */
conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */
break;
case PCI_DEVICE_ID_JMICRON_JMB368:
/* The controller should be in single function IDE mode */
conf1 |= 0x00C00000; /* Set 22, 23 */
break;
}
pci_write_config_dword(pdev, 0x40, conf1);
pci_write_config_dword(pdev, 0x80, conf5);
/* Update pdev accordingly */
pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
pdev->hdr_type = hdr & 0x7f;
pdev->multifunction = !!(hdr & 0x80);
pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
pdev->class = class >> 8;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
#endif
#ifdef CONFIG_X86_IO_APIC
static void quirk_alder_ioapic(struct pci_dev *pdev)
{
int i;
if ((pdev->class >> 8) != 0xff00)
return;
/* the first BAR is the location of the IO APIC...we must
* not touch this (and it's already covered by the fixmap), so
* forcibly insert it into the resource tree */
if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
insert_resource(&iomem_resource, &pdev->resource[0]);
/* The next five BARs all seem to be rubbish, so just clean
* them out */
for (i=1; i < 6; i++) {
memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
#endif
static void quirk_pcie_mch(struct pci_dev *pdev)
{
pci_msi_off(pdev);
pdev->no_msi = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
/*
* It's possible for the MSI to get corrupted if shpc and acpi
* are used together on certain PXH-based systems.
*/
static void quirk_pcie_pxh(struct pci_dev *dev)
{
pci_msi_off(dev);
dev->no_msi = 1;
dev_warn(&dev->dev, "PXH quirk detected; SHPC device MSI disabled\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
/*
* Some Intel PCI Express chipsets have trouble with downstream
* device power management.
*/
static void quirk_intel_pcie_pm(struct pci_dev * dev)
{
pci_pm_d3_delay = 120;
dev->no_d1d2 = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
#ifdef CONFIG_X86_IO_APIC
/*
* Boot interrupts on some chipsets cannot be turned off. For these chipsets,
* remap the original interrupt in the linux kernel to the boot interrupt, so
* that a PCI device's interrupt handler is installed on the boot interrupt
* line instead.
*/
static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
{
if (noioapicquirk || noioapicreroute)
return;
dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
dev_info(&dev->dev, "rerouting interrupts for [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
/*
* On some chipsets we can disable the generation of legacy INTx boot
* interrupts.
*/
/*
* IO-APIC1 on 6300ESB generates boot interrupts, see intel order no
* 300641-004US, section 5.7.3.
*/
#define INTEL_6300_IOAPIC_ABAR 0x40
#define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
{
u16 pci_config_word;
if (noioapicquirk)
return;
pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word);
pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt);
/*
* disable boot interrupts on HT-1000
*/
#define BC_HT1000_FEATURE_REG 0x64
#define BC_HT1000_PIC_REGS_ENABLE (1<<0)
#define BC_HT1000_MAP_IDX 0xC00
#define BC_HT1000_MAP_DATA 0xC01
static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
{
u32 pci_config_dword;
u8 irq;
if (noioapicquirk)
return;
pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
BC_HT1000_PIC_REGS_ENABLE);
for (irq = 0x10; irq < 0x10 + 32; irq++) {
outb(irq, BC_HT1000_MAP_IDX);
outb(0x00, BC_HT1000_MAP_DATA);
}
pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
/*
* disable boot interrupts on AMD and ATI chipsets
*/
/*
* NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131
* rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode
* (due to an erratum).
*/
#define AMD_813X_MISC 0x40
#define AMD_813X_NOIOAMODE (1<<0)
#define AMD_813X_REV_B1 0x12
#define AMD_813X_REV_B2 0x13
static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
{
u32 pci_config_dword;
if (noioapicquirk)
return;
if ((dev->revision == AMD_813X_REV_B1) ||
(dev->revision == AMD_813X_REV_B2))
return;
pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
pci_config_dword &= ~AMD_813X_NOIOAMODE;
pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
#define AMD_8111_PCI_IRQ_ROUTING 0x56
static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
{
u16 pci_config_word;
if (noioapicquirk)
return;
pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
if (!pci_config_word) {
dev_info(&dev->dev, "boot interrupts on device [%04x:%04x] "
"already disabled\n", dev->vendor, dev->device);
return;
}
pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
dev->vendor, dev->device);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
#endif /* CONFIG_X86_IO_APIC */
/*
* Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size
* but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
* Re-allocate the region if needed...
*/
static void quirk_tc86c001_ide(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
if (r->start & 0x8) {
r->start = 0;
r->end = 0xf;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
quirk_tc86c001_ide);
/*
* PLX PCI 9050 PCI Target bridge controller has an errata that prevents the
* local configuration registers accessible via BAR0 (memory) or BAR1 (i/o)
* being read correctly if bit 7 of the base address is set.
* The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128).
* Re-allocate the regions to a 256-byte boundary if necessary.
*/
static void quirk_plx_pci9050(struct pci_dev *dev)
{
unsigned int bar;
/* Fixed in revision 2 (PCI 9052). */
if (dev->revision >= 2)
return;
for (bar = 0; bar <= 1; bar++)
if (pci_resource_len(dev, bar) == 0x80 &&
(pci_resource_start(dev, bar) & 0x80)) {
struct resource *r = &dev->resource[bar];
dev_info(&dev->dev,
"Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
bar);
r->start = 0;
r->end = 0xff;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
quirk_plx_pci9050);
/*
* The following Meilhaus (vendor ID 0x1402) device IDs (amongst others)
* may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b,
* 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c,
* 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b.
*
* Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq"
* driver.
*/
DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
static void quirk_netmos(struct pci_dev *dev)
{
unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
unsigned int num_serial = dev->subsystem_device & 0xf;
/*
* These Netmos parts are multiport serial devices with optional
* parallel ports. Even when parallel ports are present, they
* are identified as class SERIAL, which means the serial driver
* will claim them. To prevent this, mark them as class OTHER.
* These combo devices should be claimed by parport_serial.
*
* The subdevice ID is of the form 0x00PS, where <P> is the number
* of parallel ports and <S> is the number of serial ports.
*/
switch (dev->device) {
case PCI_DEVICE_ID_NETMOS_9835:
/* Well, this rule doesn't hold for the following 9835 device */
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
return;
case PCI_DEVICE_ID_NETMOS_9735:
case PCI_DEVICE_ID_NETMOS_9745:
case PCI_DEVICE_ID_NETMOS_9845:
case PCI_DEVICE_ID_NETMOS_9855:
if (num_parallel) {
dev_info(&dev->dev, "Netmos %04x (%u parallel, "
"%u serial); changing class SERIAL to OTHER "
"(use parport_serial)\n",
dev->device, num_parallel, num_serial);
dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
(dev->class & 0xff);
}
}
}
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
static void quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command, pmcsr;
u8 __iomem *csr;
u8 cmd_hi;
int pm;
switch (dev->device) {
/* PCI IDs taken from drivers/net/e100.c */
case 0x1029:
case 0x1030 ... 0x1034:
case 0x1038 ... 0x103E:
case 0x1050 ... 0x1057:
case 0x1059:
case 0x1064 ... 0x106B:
case 0x1091 ... 0x1095:
case 0x1209:
case 0x1229:
case 0x2449:
case 0x2459:
case 0x245D:
case 0x27DC:
break;
default:
return;
}
/*
* Some firmware hands off the e100 with interrupts enabled,
* which can cause a flood of interrupts if packets are
* received before the driver attaches to the device. So
* disable all e100 interrupts here. The driver will
* re-enable them when it's ready.
*/
pci_read_config_word(dev, PCI_COMMAND, &command);
if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
return;
/*
* Check that the device is in the D0 power state. If it's not,
* there is no point to look any further.
*/
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
if (pm) {
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
return;
}
/* Convert from PCI bus to resource space. */
csr = ioremap(pci_resource_start(dev, 0), 8);
if (!csr) {
dev_warn(&dev->dev, "Can't map e100 registers\n");
return;
}
cmd_hi = readb(csr + 3);
if (cmd_hi == 0) {
dev_warn(&dev->dev, "Firmware left e100 interrupts enabled; "
"disabling\n");
writeb(1, csr + 3);
}
iounmap(csr);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
/*
* The 82575 and 82598 may experience data corruption issues when transitioning
* out of L0S. To prevent this we need to disable L0S on the pci-e link
*/
static void quirk_disable_aspm_l0s(struct pci_dev *dev)
{
dev_info(&dev->dev, "Disabling L0s\n");
pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
static void fixup_rev1_53c810(struct pci_dev *dev)
{
/* rev 1 ncr53c810 chips don't set the class at all which means
* they don't get their resources remapped. Fix that here.
*/
if (dev->class == PCI_CLASS_NOT_DEFINED) {
dev_info(&dev->dev, "NCR 53c810 rev 1 detected; setting PCI class\n");
dev->class = PCI_CLASS_STORAGE_SCSI;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
/* Enable 1k I/O space granularity on the Intel P64H2 */
static void quirk_p64h2_1k_io(struct pci_dev *dev)
{
u16 en1k;
pci_read_config_word(dev, 0x40, &en1k);
if (en1k & 0x200) {
dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n");
dev->io_window_1k = 1;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
/* Under some circumstances, AER is not linked with extended capabilities.
* Force it to be linked by setting the corresponding control bit in the
* config space.
*/
static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
{
uint8_t b;
if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
if (!(b & 0x20)) {
pci_write_config_byte(dev, 0xf41, b | 0x20);
dev_info(&dev->dev,
"Linking AER extended capability\n");
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap);
static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
{
/*
* Disable PCI Bus Parking and PCI Master read caching on CX700
* which causes unspecified timing errors with a VT6212L on the PCI
* bus leading to USB2.0 packet loss.
*
* This quirk is only enabled if a second (on the external PCI bus)
* VT6212L is found -- the CX700 core itself also contains a USB
* host controller with the same PCI ID as the VT6212L.
*/
/* Count VT6212L instances */
struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
uint8_t b;
/* p should contain the first (internal) VT6212L -- see if we have
an external one by searching again */
p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
if (!p)
return;
pci_dev_put(p);
if (pci_read_config_byte(dev, 0x76, &b) == 0) {
if (b & 0x40) {
/* Turn off PCI Bus Parking */
pci_write_config_byte(dev, 0x76, b ^ 0x40);
dev_info(&dev->dev,
"Disabling VIA CX700 PCI parking\n");
}
}
if (pci_read_config_byte(dev, 0x72, &b) == 0) {
if (b != 0) {
/* Turn off PCI Master read caching */
pci_write_config_byte(dev, 0x72, 0x0);
/* Set PCI Master Bus time-out to "1x16 PCLK" */
pci_write_config_byte(dev, 0x75, 0x1);
/* Disable "Read FIFO Timer" */
pci_write_config_byte(dev, 0x77, 0x0);
dev_info(&dev->dev,
"Disabling VIA CX700 PCI caching\n");
}
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
/*
* For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
* VPD end tag will hang the device. This problem was initially
* observed when a vpd entry was created in sysfs
* ('/sys/bus/pci/devices/<id>/vpd'). A read to this sysfs entry
* will dump 32k of data. Reading a full 32k will cause an access
* beyond the VPD end tag causing the device to hang. Once the device
* is hung, the bnx2 driver will not be able to reset the device.
* We believe that it is legal to read beyond the end tag and
* therefore the solution is to limit the read/write length.
*/
static void quirk_brcm_570x_limit_vpd(struct pci_dev *dev)
{
/*
* Only disable the VPD capability for 5706, 5706S, 5708,
* 5708S and 5709 rev. A
*/
if ((dev->device == PCI_DEVICE_ID_NX2_5706) ||
(dev->device == PCI_DEVICE_ID_NX2_5706S) ||
(dev->device == PCI_DEVICE_ID_NX2_5708) ||
(dev->device == PCI_DEVICE_ID_NX2_5708S) ||
((dev->device == PCI_DEVICE_ID_NX2_5709) &&
(dev->revision & 0xf0) == 0x0)) {
if (dev->vpd)
dev->vpd->len = 0x80;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5706,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5706S,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5708,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5708S,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709,
quirk_brcm_570x_limit_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
{
u32 rev;
pci_read_config_dword(dev, 0xf4, &rev);
/* Only CAP the MRRS if the device is a 5719 A0 */
if (rev == 0x05719000) {
int readrq = pcie_get_readrq(dev);
if (readrq > 2048)
pcie_set_readrq(dev, 2048);
}
}
DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5719,
quirk_brcm_5719_limit_mrrs);
/* Originally in EDAC sources for i82875P:
* Intel tells BIOS developers to hide device 6 which
* configures the overflow device access containing
* the DRBs - this is where we expose device 6.
* http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm
*/
static void quirk_unhide_mch_dev6(struct pci_dev *dev)
{
u8 reg;
if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
dev_info(&dev->dev, "Enabling MCH 'Overflow' Device\n");
pci_write_config_byte(dev, 0xF4, reg | 0x02);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
quirk_unhide_mch_dev6);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
#ifdef CONFIG_TILEPRO
/*
* The Tilera TILEmpower tilepro platform needs to set the link speed
* to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
* setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
* capability register of the PEX8624 PCIe switch. The switch
* supports link speed auto negotiation, but falsely sets
* the link speed to 5GT/s.
*/
static void quirk_tile_plx_gen1(struct pci_dev *dev)
{
if (tile_plx_gen1) {
pci_write_config_dword(dev, 0x98, 0x1);
mdelay(50);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
#endif /* CONFIG_TILEPRO */
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
* PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually
* some other busses controlled by the chipset even if Linux is not
* aware of it. Instead of setting the flag on all busses in the
* machine, simply disable MSI globally.
*/
static void quirk_disable_all_msi(struct pci_dev *dev)
{
pci_no_msi();
dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n");
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
/* Disable MSI on chipsets that are known to not support it */
static void quirk_disable_msi(struct pci_dev *dev)
{
if (dev->subordinate) {
dev_warn(&dev->dev, "MSI quirk detected; "
"subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
/*
* The APC bridge device in AMD 780 family northbridges has some random
* OEM subsystem ID in its vendor ID register (erratum 18), so instead
* we use the possible vendor/device IDs of the host bridge for the
* declared quirk, and search for the APC bridge by slot number.
*/
static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
{
struct pci_dev *apc_bridge;
apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
if (apc_bridge) {
if (apc_bridge->device == 0x9602)
quirk_disable_msi(apc_bridge);
pci_dev_put(apc_bridge);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
static int msi_ht_cap_enabled(struct pci_dev *dev)
{
int pos, ttl = 48;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0)
{
dev_info(&dev->dev, "Found %s HT MSI Mapping\n",
flags & HT_MSI_FLAGS_ENABLE ?
"enabled" : "disabled");
return (flags & HT_MSI_FLAGS_ENABLE) != 0;
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
return 0;
}
/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
static void quirk_msi_ht_cap(struct pci_dev *dev)
{
if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
dev_warn(&dev->dev, "MSI quirk detected; "
"subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
quirk_msi_ht_cap);
/* The nVidia CK804 chipset may have 2 HT MSI mappings.
* MSI are supported if the MSI capability set in any of these mappings.
*/
static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
struct pci_dev *pdev;
if (!dev->subordinate)
return;
/* check HT MSI cap on this chipset and the root one.
* a single one having MSI is enough to be sure that MSI are supported.
*/
pdev = pci_get_slot(dev->bus, 0);
if (!pdev)
return;
if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
dev_warn(&dev->dev, "MSI quirk detected; "
"subordinate MSI disabled\n");
dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
}
pci_dev_put(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_msi_ht_cap);
/* Force enable MSI mapping capability on HT bridges */
static void ht_enable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
flags | HT_MSI_FLAGS_ENABLE);
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
ht_enable_msi_mapping);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
ht_enable_msi_mapping);
/* The P5N32-SLI motherboards from Asus have a problem with msi
* for the MCP55 NIC. It is not yet determined whether the msi problem
* also affects other devices. As for now, turn off msi for this device.
*/
static void nvenet_msi_disable(struct pci_dev *dev)
{
const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (board_name &&
(strstr(board_name, "P5N32-SLI PREMIUM") ||
strstr(board_name, "P5N32-E SLI"))) {
dev_info(&dev->dev,
"Disabling msi for MCP55 NIC on P5N32-SLI\n");
dev->no_msi = 1;
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NVENET_15,
nvenet_msi_disable);
/*
* Some versions of the MCP55 bridge from nvidia have a legacy irq routing
* config register. This register controls the routing of legacy interrupts
* from devices that route through the MCP55. If this register is misprogramed
* interrupts are only sent to the bsp, unlike conventional systems where the
* irq is broadxast to all online cpus. Not having this register set
* properly prevents kdump from booting up properly, so lets make sure that
* we have it set correctly.
* Note this is an undocumented register.
*/
static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
{
u32 cfg;
if (!pci_find_capability(dev, PCI_CAP_ID_HT))
return;
pci_read_config_dword(dev, 0x74, &cfg);
if (cfg & ((1 << 2) | (1 << 15))) {
printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
cfg &= ~((1 << 2) | (1 << 15));
pci_write_config_dword(dev, 0x74, cfg);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
nvbridge_check_legacy_irq_routing);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
nvbridge_check_legacy_irq_routing);
static int ht_check_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
int found = 0;
/* check if there is HT MSI cap or enabled on this device */
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (found < 1)
found = 1;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
if (flags & HT_MSI_FLAGS_ENABLE) {
if (found < 2) {
found = 2;
break;
}
}
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
return found;
}
static int host_bridge_with_leaf(struct pci_dev *host_bridge)
{
struct pci_dev *dev;
int pos;
int i, dev_no;
int found = 0;
dev_no = host_bridge->devfn >> 3;
for (i = dev_no + 1; i < 0x20; i++) {
dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
if (!dev)
continue;
/* found next host bridge ?*/
pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
if (pos != 0) {
pci_dev_put(dev);
break;
}
if (ht_check_msi_mapping(dev)) {
found = 1;
pci_dev_put(dev);
break;
}
pci_dev_put(dev);
}
return found;
}
#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
static int is_end_of_ht_chain(struct pci_dev *dev)
{
int pos, ctrl_off;
int end = 0;
u16 flags, ctrl;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
if (!pos)
goto out;
pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
ctrl_off = ((flags >> 10) & 1) ?
PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
pci_read_config_word(dev, pos + ctrl_off, &ctrl);
if (ctrl & (1 << 6))
end = 1;
out:
return end;
}
static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
{
struct pci_dev *host_bridge;
int pos;
int i, dev_no;
int found = 0;
dev_no = dev->devfn >> 3;
for (i = dev_no; i >= 0; i--) {
host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
if (!host_bridge)
continue;
pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
if (pos != 0) {
found = 1;
break;
}
pci_dev_put(host_bridge);
}
if (!found)
return;
/* don't enable end_device/host_bridge with leaf directly here */
if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
host_bridge_with_leaf(host_bridge))
goto out;
/* root did that ! */
if (msi_ht_cap_enabled(host_bridge))
goto out;
ht_enable_msi_mapping(dev);
out:
pci_dev_put(host_bridge);
}
static void ht_disable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
while (pos && ttl--) {
u8 flags;
if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
&flags) == 0) {
dev_info(&dev->dev, "Disabling HT MSI Mapping\n");
pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
flags & ~HT_MSI_FLAGS_ENABLE);
}
pos = pci_find_next_ht_capability(dev, pos,
HT_CAPTYPE_MSI_MAPPING);
}
}
static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
{
struct pci_dev *host_bridge;
int pos;
int found;
if (!pci_msi_enabled())
return;
/* check if there is HT MSI cap or enabled on this device */
found = ht_check_msi_mapping(dev);
/* no HT MSI CAP */
if (found == 0)
return;
/*
* HT MSI mapping should be disabled on devices that are below
* a non-Hypertransport host bridge. Locate the host bridge...
*/
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
if (host_bridge == NULL) {
dev_warn(&dev->dev,
"nv_msi_ht_cap_quirk didn't locate host bridge\n");
return;
}
pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
if (pos != 0) {
/* Host bridge is to HT */
if (found == 1) {
/* it is not enabled, try to enable it */
if (all)
ht_enable_msi_mapping(dev);
else
nv_ht_enable_msi_mapping(dev);
}
goto out;
}
/* HT MSI is not enabled */
if (found == 1)
goto out;
/* Host bridge is not to HT, disable HT MSI mapping on this device */
ht_disable_msi_mapping(dev);
out:
pci_dev_put(host_bridge);
}
static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 1);
}
static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 0);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
}
static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
{
struct pci_dev *p;
/* SB700 MSI issue will be fixed at HW level from revision A21,
* we need check PCI REVISION ID of SMBus controller to get SB700
* revision.
*/
p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
NULL);
if (!p)
return;
if ((p->revision < 0x3B) && (p->revision >= 0x30))
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
pci_dev_put(p);
}
static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
{
/* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */
if (dev->revision < 0x18) {
dev_info(&dev->dev, "set MSI_INTX_DISABLE_BUG flag\n");
dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5780,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5780S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5714,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5714S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5715,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_TIGON3_5715S,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
quirk_msi_intx_disable_ati_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
quirk_msi_intx_disable_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
quirk_msi_intx_disable_qca_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
quirk_msi_intx_disable_qca_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
quirk_msi_intx_disable_qca_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
quirk_msi_intx_disable_qca_bug);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
quirk_msi_intx_disable_qca_bug);
#endif /* CONFIG_PCI_MSI */
/* Allow manual resource allocation for PCI hotplug bridges
* via pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For
* some PCI-PCI hotplug bridges, like PLX 6254 (former HINT HB6),
* kernel fails to allocate resources when hotplug device is
* inserted and PCI bus is rescanned.
*/
static void quirk_hotplug_bridge(struct pci_dev *dev)
{
dev->is_hotplug_bridge = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
/*
* This is a quirk for the Ricoh MMC controller found as a part of
* some mulifunction chips.
* This is very similar and based on the ricoh_mmc driver written by
* Philip Langdale. Thank you for these magic sequences.
*
* These chips implement the four main memory card controllers (SD, MMC, MS, xD)
* and one or both of cardbus or firewire.
*
* It happens that they implement SD and MMC
* support as separate controllers (and PCI functions). The linux SDHCI
* driver supports MMC cards but the chip detects MMC cards in hardware
* and directs them to the MMC controller - so the SDHCI driver never sees
* them.
*
* To get around this, we must disable the useless MMC controller.
* At that point, the SDHCI controller will start seeing them
* It seems to be the case that the relevant PCI registers to deactivate the
* MMC controller live on PCI function 0, which might be the cardbus controller
* or the firewire controller, depending on the particular chip in question
*
* This has to be done early, because as soon as we disable the MMC controller
* other pci functions shift up one level, e.g. function #2 becomes function
* #1, and this will confuse the pci core.
*/
#ifdef CONFIG_MMC_RICOH_MMC
static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
{
/* disable via cardbus interface */
u8 write_enable;
u8 write_target;
u8 disable;
/* disable must be done via function #0 */
if (PCI_FUNC(dev->devfn))
return;
pci_read_config_byte(dev, 0xB7, &disable);
if (disable & 0x02)
return;
pci_read_config_byte(dev, 0x8E, &write_enable);
pci_write_config_byte(dev, 0x8E, 0xAA);
pci_read_config_byte(dev, 0x8D, &write_target);
pci_write_config_byte(dev, 0x8D, 0xB7);
pci_write_config_byte(dev, 0xB7, disable | 0x02);
pci_write_config_byte(dev, 0x8E, write_enable);
pci_write_config_byte(dev, 0x8D, write_target);
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
{
/* disable via firewire interface */
u8 write_enable;
u8 disable;
/* disable must be done via function #0 */
if (PCI_FUNC(dev->devfn))
return;
/*
* RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize
* certain types of SD/MMC cards. Lowering the SD base
* clock frequency from 200Mhz to 50Mhz fixes this issue.
*
* 0x150 - SD2.0 mode enable for changing base clock
* frequency to 50Mhz
* 0xe1 - Base clock frequency
* 0x32 - 50Mhz new clock frequency
* 0xf9 - Key register for 0x150
* 0xfc - key register for 0xe1
*/
if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
pci_write_config_byte(dev, 0xf9, 0xfc);
pci_write_config_byte(dev, 0x150, 0x10);
pci_write_config_byte(dev, 0xf9, 0x00);
pci_write_config_byte(dev, 0xfc, 0x01);
pci_write_config_byte(dev, 0xe1, 0x32);
pci_write_config_byte(dev, 0xfc, 0x00);
dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
}
pci_read_config_byte(dev, 0xCB, &disable);
if (disable & 0x02)
return;
pci_read_config_byte(dev, 0xCA, &write_enable);
pci_write_config_byte(dev, 0xCA, 0x57);
pci_write_config_byte(dev, 0xCB, disable | 0x02);
pci_write_config_byte(dev, 0xCA, write_enable);
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/
#ifdef CONFIG_DMAR_TABLE
#define VTUNCERRMSK_REG 0x1ac
#define VTD_MSK_SPEC_ERRORS (1 << 31)
/*
* This is a quirk for masking vt-d spec defined errors to platform error
* handling logic. With out this, platforms using Intel 7500, 5500 chipsets
* (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
* on the RAS config settings of the platform) when a vt-d fault happens.
* The resulting SMI caused the system to hang.
*
* VT-d spec related errors are already handled by the VT-d OS code, so no
* need to report the same error through other channels.
*/
static void vtd_mask_spec_errors(struct pci_dev *dev)
{
u32 word;
pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
#endif
static void fixup_ti816x_class(struct pci_dev *dev)
{
/* TI 816x devices do not have class code set when in PCIe boot mode */
dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
/* Some PCIe devices do not work reliably with the claimed maximum
* payload size supported.
*/
static void fixup_mpss_256(struct pci_dev *dev)
{
dev->pcie_mpss = 1; /* 256 bytes */
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
/* Intel 5000 and 5100 Memory controllers have an errata with read completion
* coalescing (which is enabled by default on some BIOSes) and MPS of 256B.
* Since there is no way of knowing what the PCIE MPS on each fabric will be
* until all of the devices are discovered and buses walked, read completion
* coalescing must be disabled. Unfortunately, it cannot be re-enabled because
* it is possible to hotplug a device with MPS of 256B.
*/
static void quirk_intel_mc_errata(struct pci_dev *dev)
{
int err;
u16 rcc;
if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
return;
/* Intel errata specifies bits to change but does not say what they are.
* Keeping them magical until such time as the registers and values can
* be explained.
*/
err = pci_read_config_word(dev, 0x48, &rcc);
if (err) {
dev_err(&dev->dev, "Error attempting to read the read "
"completion coalescing register.\n");
return;
}
if (!(rcc & (1 << 10)))
return;
rcc &= ~(1 << 10);
err = pci_write_config_word(dev, 0x48, rcc);
if (err) {
dev_err(&dev->dev, "Error attempting to write the read "
"completion coalescing register.\n");
return;
}
pr_info_once("Read completion coalescing disabled due to hardware "
"errata relating to 256B MPS.\n");
}
/* Intel 5000 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
/* Intel 5100 series memory controllers and ports 2-7 */
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
static ktime_t fixup_debug_start(struct pci_dev *dev,
void (*fn)(struct pci_dev *dev))
{
ktime_t calltime = ktime_set(0, 0);
dev_dbg(&dev->dev, "calling %pF\n", fn);
if (initcall_debug) {
pr_debug("calling %pF @ %i for %s\n",
fn, task_pid_nr(current), dev_name(&dev->dev));
calltime = ktime_get();
}
return calltime;
}
static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
void (*fn)(struct pci_dev *dev))
{
ktime_t delta, rettime;
unsigned long long duration;
if (initcall_debug) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
pr_debug("pci fixup %pF returned after %lld usecs for %s\n",
fn, duration, dev_name(&dev->dev));
}
}
/*
* Some BIOS implementations leave the Intel GPU interrupts enabled,
* even though no one is handling them (f.e. i915 driver is never loaded).
* Additionally the interrupt destination is not set up properly
* and the interrupt ends up -somewhere-.
*
* These spurious interrupts are "sticky" and the kernel disables
* the (shared) interrupt line after 100.000+ generated interrupts.
*
* Fix it by disabling the still enabled interrupts.
* This resolves crashes often seen on monitor unplug.
*/
#define I915_DEIER_REG 0x4400c
static void disable_igfx_irq(struct pci_dev *dev)
{
void __iomem *regs = pci_iomap(dev, 0, 0);
if (regs == NULL) {
dev_warn(&dev->dev, "igfx quirk: Can't iomap PCI device\n");
return;
}
/* Check if any interrupt line is still enabled */
if (readl(regs + I915_DEIER_REG) != 0) {
dev_warn(&dev->dev, "BIOS left Intel GPU interrupts enabled; "
"disabling\n");
writel(0, regs + I915_DEIER_REG);
}
pci_iounmap(dev, regs);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
/*
* Some devices may pass our check in pci_intx_mask_supported if
* PCI_COMMAND_INTX_DISABLE works though they actually do not properly
* support this feature.
*/
static void quirk_broken_intx_masking(struct pci_dev *dev)
{
dev->broken_intx_masking = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
quirk_broken_intx_masking);
DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
quirk_broken_intx_masking);
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
ktime_t calltime;
for (; f < end; f++)
if ((f->class == (u32) (dev->class >> f->class_shift) ||
f->class == (u32) PCI_ANY_ID) &&
(f->vendor == dev->vendor ||
f->vendor == (u16) PCI_ANY_ID) &&
(f->device == dev->device ||
f->device == (u16) PCI_ANY_ID)) {
calltime = fixup_debug_start(dev, f->hook);
f->hook(dev);
fixup_debug_report(dev, calltime, f->hook);
}
}
extern struct pci_fixup __start_pci_fixups_early[];
extern struct pci_fixup __end_pci_fixups_early[];
extern struct pci_fixup __start_pci_fixups_header[];
extern struct pci_fixup __end_pci_fixups_header[];
extern struct pci_fixup __start_pci_fixups_final[];
extern struct pci_fixup __end_pci_fixups_final[];
extern struct pci_fixup __start_pci_fixups_enable[];
extern struct pci_fixup __end_pci_fixups_enable[];
extern struct pci_fixup __start_pci_fixups_resume[];
extern struct pci_fixup __end_pci_fixups_resume[];
extern struct pci_fixup __start_pci_fixups_resume_early[];
extern struct pci_fixup __end_pci_fixups_resume_early[];
extern struct pci_fixup __start_pci_fixups_suspend[];
extern struct pci_fixup __end_pci_fixups_suspend[];
static bool pci_apply_fixup_final_quirks;
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
{
struct pci_fixup *start, *end;
switch(pass) {
case pci_fixup_early:
start = __start_pci_fixups_early;
end = __end_pci_fixups_early;
break;
case pci_fixup_header:
start = __start_pci_fixups_header;
end = __end_pci_fixups_header;
break;
case pci_fixup_final:
if (!pci_apply_fixup_final_quirks)
return;
start = __start_pci_fixups_final;
end = __end_pci_fixups_final;
break;
case pci_fixup_enable:
start = __start_pci_fixups_enable;
end = __end_pci_fixups_enable;
break;
case pci_fixup_resume:
start = __start_pci_fixups_resume;
end = __end_pci_fixups_resume;
break;
case pci_fixup_resume_early:
start = __start_pci_fixups_resume_early;
end = __end_pci_fixups_resume_early;
break;
case pci_fixup_suspend:
start = __start_pci_fixups_suspend;
end = __end_pci_fixups_suspend;
break;
default:
/* stupid compiler warning, you would think with an enum... */
return;
}
pci_do_fixups(dev, start, end);
}
EXPORT_SYMBOL(pci_fixup_device);
static int __init pci_apply_final_quirks(void)
{
struct pci_dev *dev = NULL;
u8 cls = 0;
u8 tmp;
if (pci_cache_line_size)
printk(KERN_DEBUG "PCI: CLS %u bytes\n",
pci_cache_line_size << 2);
pci_apply_fixup_final_quirks = true;
for_each_pci_dev(dev) {
pci_fixup_device(pci_fixup_final, dev);
/*
* If arch hasn't set it explicitly yet, use the CLS
* value shared by all PCI devices. If there's a
* mismatch, fall back to the default value.
*/
if (!pci_cache_line_size) {
pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
if (!cls)
cls = tmp;
if (!tmp || cls == tmp)
continue;
printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), "
"using %u bytes\n", cls << 2, tmp << 2,
pci_dfl_cache_line_size << 2);
pci_cache_line_size = pci_dfl_cache_line_size;
}
}
if (!pci_cache_line_size) {
printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
cls << 2, pci_dfl_cache_line_size << 2);
pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
}
return 0;
}
fs_initcall_sync(pci_apply_final_quirks);
/*
* Followings are device-specific reset methods which can be used to
* reset a single function if other methods (e.g. FLR, PM D0->D3) are
* not available.
*/
static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
{
int pos;
/* only implement PCI_CLASS_SERIAL_USB at present */
if (dev->class == PCI_CLASS_SERIAL_USB) {
pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
if (!pos)
return -ENOTTY;
if (probe)
return 0;
pci_write_config_byte(dev, pos + 0x4, 1);
msleep(100);
return 0;
} else {
return -ENOTTY;
}
}
static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
{
int i;
u16 status;
/*
* http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
*
* The 82599 supports FLR on VFs, but FLR support is reported only
* in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5).
* Therefore, we can't use pcie_flr(), which checks the VF DEVCAP.
*/
if (probe)
return 0;
/* Wait for Transaction Pending bit clean */
for (i = 0; i < 4; i++) {
if (i)
msleep((1 << (i - 1)) * 100);
pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
if (!(status & PCI_EXP_DEVSTA_TRPND))
goto clear;
}
dev_err(&dev->dev, "transaction is not cleared; "
"proceeding with reset anyway\n");
clear:
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
return 0;
}
#include "../gpu/drm/i915/i915_reg.h"
#define MSG_CTL 0x45010
#define NSDE_PWR_STATE 0xd0100
#define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */
static int reset_ivb_igd(struct pci_dev *dev, int probe)
{
void __iomem *mmio_base;
unsigned long timeout;
u32 val;
if (probe)
return 0;
mmio_base = pci_iomap(dev, 0, 0);
if (!mmio_base)
return -ENOMEM;
iowrite32(0x00000002, mmio_base + MSG_CTL);
/*
* Clobbering SOUTH_CHICKEN2 register is fine only if the next
* driver loaded sets the right bits. However, this's a reset and
* the bits have been set by i915 previously, so we clobber
* SOUTH_CHICKEN2 register directly here.
*/
iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
iowrite32(val, mmio_base + PCH_PP_CONTROL);
timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
do {
val = ioread32(mmio_base + PCH_PP_STATUS);
if ((val & 0xb0000000) == 0)
goto reset_complete;
msleep(10);
} while (time_before(jiffies, timeout));
dev_warn(&dev->dev, "timeout during reset\n");
reset_complete:
iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
pci_iounmap(dev, mmio_base);
return 0;
}
#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
reset_intel_82599_sfp_virtfn },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
reset_intel_generic_dev },
{ 0 }
};
/*
* These device-specific reset methods are here rather than in a driver
* because when a host assigns a device to a guest VM, the host may need
* to reset the device but probably doesn't have a driver for it.
*/
int pci_dev_specific_reset(struct pci_dev *dev, int probe)
{
const struct pci_dev_reset_methods *i;
for (i = pci_dev_reset_methods; i->reset; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID))
return i->reset(dev, probe);
}
return -ENOTTY;
}
static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
{
if (!PCI_FUNC(dev->devfn))
return pci_dev_get(dev);
return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
}
static const struct pci_dev_dma_source {
u16 vendor;
u16 device;
struct pci_dev *(*dma_source)(struct pci_dev *dev);
} pci_dev_dma_source[] = {
/*
* https://bugzilla.redhat.com/show_bug.cgi?id=605888
*
* Some Ricoh devices use the function 0 source ID for DMA on
* other functions of a multifunction device. The DMA devices
* is therefore function 0, which will have implications of the
* iommu grouping of these devices.
*/
{ PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
{ PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
{ PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
{ PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
{ 0 }
};
/*
* IOMMUs with isolation capabilities need to be programmed with the
* correct source ID of a device. In most cases, the source ID matches
* the device doing the DMA, but sometimes hardware is broken and will
* tag the DMA as being sourced from a different device. This function
* allows that translation. Note that the reference count of the
* returned device is incremented on all paths.
*/
struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
{
const struct pci_dev_dma_source *i;
for (i = pci_dev_dma_source; i->dma_source; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID))
return i->dma_source(dev);
}
return pci_dev_get(dev);
}
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
} pci_dev_acs_enabled[] = {
{ 0 }
};
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
int ret;
/*
* Allow devices that do not expose standard PCIe ACS capabilities
* or control to indicate their support here. Multi-function express
* devices which do not allow internal peer-to-peer between functions,
* but do not implement PCIe ACS may wish to return true here.
*/
for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
if ((i->vendor == dev->vendor ||
i->vendor == (u16)PCI_ANY_ID) &&
(i->device == dev->device ||
i->device == (u16)PCI_ANY_ID)) {
ret = i->acs_enabled(dev, acs_flags);
if (ret >= 0)
return ret;
}
}
return -ENOTTY;
}
| gpl-2.0 |
burstlam/kernel-35 | arch/alpha/kernel/osf_sys.c | 584 | 30828 | /*
* linux/arch/alpha/kernel/osf_sys.c
*
* Copyright (C) 1995 Linus Torvalds
*/
/*
* This file handles some of the stranger OSF/1 system call interfaces.
* Some of the system calls expect a non-C calling standard, others have
* special parameter blocks..
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/utsname.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/major.h>
#include <linux/stat.h>
#include <linux/mman.h>
#include <linux/shm.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/types.h>
#include <linux/ipc.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include <linux/vfs.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <asm/fpu.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/sysinfo.h>
#include <asm/hwrpb.h>
#include <asm/processor.h>
/*
* Brk needs to return an error. Still support Linux's brk(0) query idiom,
* which OSF programs just shouldn't be doing. We're still not quite
* identical to OSF as we don't return 0 on success, but doing otherwise
* would require changes to libc. Hopefully this is good enough.
*/
SYSCALL_DEFINE1(osf_brk, unsigned long, brk)
{
unsigned long retval = sys_brk(brk);
if (brk && brk != retval)
retval = -ENOMEM;
return retval;
}
/*
* This is pure guess-work..
*/
SYSCALL_DEFINE4(osf_set_program_attributes, unsigned long, text_start,
unsigned long, text_len, unsigned long, bss_start,
unsigned long, bss_len)
{
struct mm_struct *mm;
lock_kernel();
mm = current->mm;
mm->end_code = bss_start + bss_len;
mm->start_brk = bss_start + bss_len;
mm->brk = bss_start + bss_len;
#if 0
printk("set_program_attributes(%lx %lx %lx %lx)\n",
text_start, text_len, bss_start, bss_len);
#endif
unlock_kernel();
return 0;
}
/*
* OSF/1 directory handling functions...
*
* The "getdents()" interface is much more sane: the "basep" stuff is
* braindamage (it can't really handle filesystems where the directory
* offset differences aren't the same as "d_reclen").
*/
#define NAME_OFFSET offsetof (struct osf_dirent, d_name)
struct osf_dirent {
unsigned int d_ino;
unsigned short d_reclen;
unsigned short d_namlen;
char d_name[1];
};
struct osf_dirent_callback {
struct osf_dirent __user *dirent;
long __user *basep;
unsigned int count;
int error;
};
static int
osf_filldir(void *__buf, const char *name, int namlen, loff_t offset,
u64 ino, unsigned int d_type)
{
struct osf_dirent __user *dirent;
struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf;
unsigned int reclen = ALIGN(NAME_OFFSET + namlen + 1, sizeof(u32));
unsigned int d_ino;
buf->error = -EINVAL; /* only used if we fail */
if (reclen > buf->count)
return -EINVAL;
d_ino = ino;
if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
buf->error = -EOVERFLOW;
return -EOVERFLOW;
}
if (buf->basep) {
if (put_user(offset, buf->basep))
goto Efault;
buf->basep = NULL;
}
dirent = buf->dirent;
if (put_user(d_ino, &dirent->d_ino) ||
put_user(namlen, &dirent->d_namlen) ||
put_user(reclen, &dirent->d_reclen) ||
copy_to_user(dirent->d_name, name, namlen) ||
put_user(0, dirent->d_name + namlen))
goto Efault;
dirent = (void __user *)dirent + reclen;
buf->dirent = dirent;
buf->count -= reclen;
return 0;
Efault:
buf->error = -EFAULT;
return -EFAULT;
}
SYSCALL_DEFINE4(osf_getdirentries, unsigned int, fd,
struct osf_dirent __user *, dirent, unsigned int, count,
long __user *, basep)
{
int error;
struct file *file;
struct osf_dirent_callback buf;
error = -EBADF;
file = fget(fd);
if (!file)
goto out;
buf.dirent = dirent;
buf.basep = basep;
buf.count = count;
buf.error = 0;
error = vfs_readdir(file, osf_filldir, &buf);
if (error >= 0)
error = buf.error;
if (count != buf.count)
error = count - buf.count;
fput(file);
out:
return error;
}
#undef NAME_OFFSET
SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags, unsigned long, fd,
unsigned long, off)
{
unsigned long ret = -EINVAL;
#if 0
if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
printk("%s: unimplemented OSF mmap flags %04lx\n",
current->comm, flags);
#endif
if ((off + PAGE_ALIGN(len)) < off)
goto out;
if (off & ~PAGE_MASK)
goto out;
ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
out:
return ret;
}
/*
* The OSF/1 statfs structure is much larger, but this should
* match the beginning, at least.
*/
struct osf_statfs {
short f_type;
short f_flags;
int f_fsize;
int f_bsize;
int f_blocks;
int f_bfree;
int f_bavail;
int f_files;
int f_ffree;
__kernel_fsid_t f_fsid;
};
static int
linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat,
unsigned long bufsiz)
{
struct osf_statfs tmp_stat;
tmp_stat.f_type = linux_stat->f_type;
tmp_stat.f_flags = 0; /* mount flags */
tmp_stat.f_fsize = linux_stat->f_frsize;
tmp_stat.f_bsize = linux_stat->f_bsize;
tmp_stat.f_blocks = linux_stat->f_blocks;
tmp_stat.f_bfree = linux_stat->f_bfree;
tmp_stat.f_bavail = linux_stat->f_bavail;
tmp_stat.f_files = linux_stat->f_files;
tmp_stat.f_ffree = linux_stat->f_ffree;
tmp_stat.f_fsid = linux_stat->f_fsid;
if (bufsiz > sizeof(tmp_stat))
bufsiz = sizeof(tmp_stat);
return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
}
static int
do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer,
unsigned long bufsiz)
{
struct kstatfs linux_stat;
int error = vfs_statfs(dentry, &linux_stat);
if (!error)
error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
return error;
}
SYSCALL_DEFINE3(osf_statfs, char __user *, pathname,
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
{
struct path path;
int retval;
retval = user_path(pathname, &path);
if (!retval) {
retval = do_osf_statfs(path.dentry, buffer, bufsiz);
path_put(&path);
}
return retval;
}
SYSCALL_DEFINE3(osf_fstatfs, unsigned long, fd,
struct osf_statfs __user *, buffer, unsigned long, bufsiz)
{
struct file *file;
int retval;
retval = -EBADF;
file = fget(fd);
if (file) {
retval = do_osf_statfs(file->f_path.dentry, buffer, bufsiz);
fput(file);
}
return retval;
}
/*
* Uhh.. OSF/1 mount parameters aren't exactly obvious..
*
* Although to be frank, neither are the native Linux/i386 ones..
*/
struct ufs_args {
char __user *devname;
int flags;
uid_t exroot;
};
struct cdfs_args {
char __user *devname;
int flags;
uid_t exroot;
/* This has lots more here, which Linux handles with the option block
but I'm too lazy to do the translation into ASCII. */
};
struct procfs_args {
char __user *devname;
int flags;
uid_t exroot;
};
/*
* We can't actually handle ufs yet, so we translate UFS mounts to
* ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS
* layout is so braindead it's a major headache doing it.
*
* Just how long ago was it written? OTOH our UFS driver may be still
* unhappy with OSF UFS. [CHECKME]
*/
static int
osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
{
int retval;
struct cdfs_args tmp;
char *devname;
retval = -EFAULT;
if (copy_from_user(&tmp, args, sizeof(tmp)))
goto out;
devname = getname(tmp.devname);
retval = PTR_ERR(devname);
if (IS_ERR(devname))
goto out;
retval = do_mount(devname, dirname, "ext2", flags, NULL);
putname(devname);
out:
return retval;
}
static int
osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
{
int retval;
struct cdfs_args tmp;
char *devname;
retval = -EFAULT;
if (copy_from_user(&tmp, args, sizeof(tmp)))
goto out;
devname = getname(tmp.devname);
retval = PTR_ERR(devname);
if (IS_ERR(devname))
goto out;
retval = do_mount(devname, dirname, "iso9660", flags, NULL);
putname(devname);
out:
return retval;
}
static int
osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
{
struct procfs_args tmp;
if (copy_from_user(&tmp, args, sizeof(tmp)))
return -EFAULT;
return do_mount("", dirname, "proc", flags, NULL);
}
SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path,
int, flag, void __user *, data)
{
int retval;
char *name;
name = getname(path);
retval = PTR_ERR(name);
if (IS_ERR(name))
goto out;
switch (typenr) {
case 1:
retval = osf_ufs_mount(name, data, flag);
break;
case 6:
retval = osf_cdfs_mount(name, data, flag);
break;
case 9:
retval = osf_procfs_mount(name, data, flag);
break;
default:
retval = -EINVAL;
printk("osf_mount(%ld, %x)\n", typenr, flag);
}
putname(name);
out:
return retval;
}
SYSCALL_DEFINE1(osf_utsname, char __user *, name)
{
int error;
down_read(&uts_sem);
error = -EFAULT;
if (copy_to_user(name + 0, utsname()->sysname, 32))
goto out;
if (copy_to_user(name + 32, utsname()->nodename, 32))
goto out;
if (copy_to_user(name + 64, utsname()->release, 32))
goto out;
if (copy_to_user(name + 96, utsname()->version, 32))
goto out;
if (copy_to_user(name + 128, utsname()->machine, 32))
goto out;
error = 0;
out:
up_read(&uts_sem);
return error;
}
SYSCALL_DEFINE0(getpagesize)
{
return PAGE_SIZE;
}
SYSCALL_DEFINE0(getdtablesize)
{
return sysctl_nr_open;
}
/*
* For compatibility with OSF/1 only. Use utsname(2) instead.
*/
SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
{
unsigned len;
int i;
if (!access_ok(VERIFY_WRITE, name, namelen))
return -EFAULT;
len = namelen;
if (namelen > 32)
len = 32;
down_read(&uts_sem);
for (i = 0; i < len; ++i) {
__put_user(utsname()->domainname[i], name + i);
if (utsname()->domainname[i] == '\0')
break;
}
up_read(&uts_sem);
return 0;
}
/*
* The following stuff should move into a header file should it ever
* be labeled "officially supported." Right now, there is just enough
* support to avoid applications (such as tar) printing error
* messages. The attributes are not really implemented.
*/
/*
* Values for Property list entry flag
*/
#define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry
by default */
#define PLE_FLAG_MASK 0x1 /* Valid flag values */
#define PLE_FLAG_ALL -1 /* All flag value */
struct proplistname_args {
unsigned int pl_mask;
unsigned int pl_numnames;
char **pl_names;
};
union pl_args {
struct setargs {
char __user *path;
long follow;
long nbytes;
char __user *buf;
} set;
struct fsetargs {
long fd;
long nbytes;
char __user *buf;
} fset;
struct getargs {
char __user *path;
long follow;
struct proplistname_args __user *name_args;
long nbytes;
char __user *buf;
int __user *min_buf_size;
} get;
struct fgetargs {
long fd;
struct proplistname_args __user *name_args;
long nbytes;
char __user *buf;
int __user *min_buf_size;
} fget;
struct delargs {
char __user *path;
long follow;
struct proplistname_args __user *name_args;
} del;
struct fdelargs {
long fd;
struct proplistname_args __user *name_args;
} fdel;
};
enum pl_code {
PL_SET = 1, PL_FSET = 2,
PL_GET = 3, PL_FGET = 4,
PL_DEL = 5, PL_FDEL = 6
};
SYSCALL_DEFINE2(osf_proplist_syscall, enum pl_code, code,
union pl_args __user *, args)
{
long error;
int __user *min_buf_size_ptr;
lock_kernel();
switch (code) {
case PL_SET:
if (get_user(error, &args->set.nbytes))
error = -EFAULT;
break;
case PL_FSET:
if (get_user(error, &args->fset.nbytes))
error = -EFAULT;
break;
case PL_GET:
error = get_user(min_buf_size_ptr, &args->get.min_buf_size);
if (error)
break;
error = put_user(0, min_buf_size_ptr);
break;
case PL_FGET:
error = get_user(min_buf_size_ptr, &args->fget.min_buf_size);
if (error)
break;
error = put_user(0, min_buf_size_ptr);
break;
case PL_DEL:
case PL_FDEL:
error = 0;
break;
default:
error = -EOPNOTSUPP;
break;
};
unlock_kernel();
return error;
}
SYSCALL_DEFINE2(osf_sigstack, struct sigstack __user *, uss,
struct sigstack __user *, uoss)
{
unsigned long usp = rdusp();
unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size;
unsigned long oss_os = on_sig_stack(usp);
int error;
if (uss) {
void __user *ss_sp;
error = -EFAULT;
if (get_user(ss_sp, &uss->ss_sp))
goto out;
/* If the current stack was set with sigaltstack, don't
swap stacks while we are on it. */
error = -EPERM;
if (current->sas_ss_sp && on_sig_stack(usp))
goto out;
/* Since we don't know the extent of the stack, and we don't
track onstack-ness, but rather calculate it, we must
presume a size. Ho hum this interface is lossy. */
current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
current->sas_ss_size = SIGSTKSZ;
}
if (uoss) {
error = -EFAULT;
if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))
|| __put_user(oss_sp, &uoss->ss_sp)
|| __put_user(oss_os, &uoss->ss_onstack))
goto out;
}
error = 0;
out:
return error;
}
SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
{
char *sysinfo_table[] = {
utsname()->sysname,
utsname()->nodename,
utsname()->release,
utsname()->version,
utsname()->machine,
"alpha", /* instruction set architecture */
"dummy", /* hardware serial number */
"dummy", /* hardware manufacturer */
"dummy", /* secure RPC domain */
};
unsigned long offset;
char *res;
long len, err = -EINVAL;
offset = command-1;
if (offset >= ARRAY_SIZE(sysinfo_table)) {
/* Digital UNIX has a few unpublished interfaces here */
printk("sysinfo(%d)", command);
goto out;
}
down_read(&uts_sem);
res = sysinfo_table[offset];
len = strlen(res)+1;
if (len > count)
len = count;
if (copy_to_user(buf, res, len))
err = -EFAULT;
else
err = 0;
up_read(&uts_sem);
out:
return err;
}
SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
unsigned long, nbytes, int __user *, start, void __user *, arg)
{
unsigned long w;
struct percpu_struct *cpu;
switch (op) {
case GSI_IEEE_FP_CONTROL:
/* Return current software fp control & status bits. */
/* Note that DU doesn't verify available space here. */
w = current_thread_info()->ieee_state & IEEE_SW_MASK;
w = swcr_update_status(w, rdfpcr());
if (put_user(w, (unsigned long __user *) buffer))
return -EFAULT;
return 0;
case GSI_IEEE_STATE_AT_SIGNAL:
/*
* Not sure anybody will ever use this weird stuff. These
* ops can be used (under OSF/1) to set the fpcr that should
* be used when a signal handler starts executing.
*/
break;
case GSI_UACPROC:
if (nbytes < sizeof(unsigned int))
return -EINVAL;
w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK;
if (put_user(w, (unsigned int __user *)buffer))
return -EFAULT;
return 1;
case GSI_PROC_TYPE:
if (nbytes < sizeof(unsigned long))
return -EINVAL;
cpu = (struct percpu_struct*)
((char*)hwrpb + hwrpb->processor_offset);
w = cpu->type;
if (put_user(w, (unsigned long __user*)buffer))
return -EFAULT;
return 1;
case GSI_GET_HWRPB:
if (nbytes < sizeof(*hwrpb))
return -EINVAL;
if (copy_to_user(buffer, hwrpb, nbytes) != 0)
return -EFAULT;
return 1;
default:
break;
}
return -EOPNOTSUPP;
}
SYSCALL_DEFINE5(osf_setsysinfo, unsigned long, op, void __user *, buffer,
unsigned long, nbytes, int __user *, start, void __user *, arg)
{
switch (op) {
case SSI_IEEE_FP_CONTROL: {
unsigned long swcr, fpcr;
unsigned int *state;
/*
* Alpha Architecture Handbook 4.7.7.3:
* To be fully IEEE compiant, we must track the current IEEE
* exception state in software, because spurious bits can be
* set in the trap shadow of a software-complete insn.
*/
if (get_user(swcr, (unsigned long __user *)buffer))
return -EFAULT;
state = ¤t_thread_info()->ieee_state;
/* Update softare trap enable bits. */
*state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
/* Update the real fpcr. */
fpcr = rdfpcr() & FPCR_DYN_MASK;
fpcr |= ieee_swcr_to_fpcr(swcr);
wrfpcr(fpcr);
return 0;
}
case SSI_IEEE_RAISE_EXCEPTION: {
unsigned long exc, swcr, fpcr, fex;
unsigned int *state;
if (get_user(exc, (unsigned long __user *)buffer))
return -EFAULT;
state = ¤t_thread_info()->ieee_state;
exc &= IEEE_STATUS_MASK;
/* Update softare trap enable bits. */
swcr = (*state & IEEE_SW_MASK) | exc;
*state |= exc;
/* Update the real fpcr. */
fpcr = rdfpcr();
fpcr |= ieee_swcr_to_fpcr(swcr);
wrfpcr(fpcr);
/* If any exceptions set by this call, and are unmasked,
send a signal. Old exceptions are not signaled. */
fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr;
if (fex) {
siginfo_t info;
int si_code = 0;
if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND;
if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES;
if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND;
if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF;
if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV;
if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = NULL; /* FIXME */
send_sig_info(SIGFPE, &info, current);
}
return 0;
}
case SSI_IEEE_STATE_AT_SIGNAL:
case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
/*
* Not sure anybody will ever use this weird stuff. These
* ops can be used (under OSF/1) to set the fpcr that should
* be used when a signal handler starts executing.
*/
break;
case SSI_NVPAIRS: {
unsigned long v, w, i;
unsigned int old, new;
for (i = 0; i < nbytes; ++i) {
if (get_user(v, 2*i + (unsigned int __user *)buffer))
return -EFAULT;
if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer))
return -EFAULT;
switch (v) {
case SSIN_UACPROC:
again:
old = current_thread_info()->flags;
new = old & ~(UAC_BITMASK << UAC_SHIFT);
new = new | (w & UAC_BITMASK) << UAC_SHIFT;
if (cmpxchg(¤t_thread_info()->flags,
old, new) != old)
goto again;
break;
default:
return -EOPNOTSUPP;
}
}
return 0;
}
default:
break;
}
return -EOPNOTSUPP;
}
/* Translations due to the fact that OSF's time_t is an int. Which
affects all sorts of things, like timeval and itimerval. */
extern struct timezone sys_tz;
struct timeval32
{
int tv_sec, tv_usec;
};
struct itimerval32
{
struct timeval32 it_interval;
struct timeval32 it_value;
};
static inline long
get_tv32(struct timeval *o, struct timeval32 __user *i)
{
return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
(__get_user(o->tv_sec, &i->tv_sec) |
__get_user(o->tv_usec, &i->tv_usec)));
}
static inline long
put_tv32(struct timeval32 __user *o, struct timeval *i)
{
return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
(__put_user(i->tv_sec, &o->tv_sec) |
__put_user(i->tv_usec, &o->tv_usec)));
}
static inline long
get_it32(struct itimerval *o, struct itimerval32 __user *i)
{
return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
(__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
__get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
__get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
__get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
}
static inline long
put_it32(struct itimerval32 __user *o, struct itimerval *i)
{
return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
(__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
__put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
__put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
__put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
}
static inline void
jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value)
{
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
value->tv_sec = jiffies / HZ;
}
SYSCALL_DEFINE2(osf_gettimeofday, struct timeval32 __user *, tv,
struct timezone __user *, tz)
{
if (tv) {
struct timeval ktv;
do_gettimeofday(&ktv);
if (put_tv32(tv, &ktv))
return -EFAULT;
}
if (tz) {
if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
return -EFAULT;
}
return 0;
}
SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
struct timezone __user *, tz)
{
struct timespec kts;
struct timezone ktz;
if (tv) {
if (get_tv32((struct timeval *)&kts, tv))
return -EFAULT;
}
if (tz) {
if (copy_from_user(&ktz, tz, sizeof(*tz)))
return -EFAULT;
}
kts.tv_nsec *= 1000;
return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
}
SYSCALL_DEFINE2(osf_getitimer, int, which, struct itimerval32 __user *, it)
{
struct itimerval kit;
int error;
error = do_getitimer(which, &kit);
if (!error && put_it32(it, &kit))
error = -EFAULT;
return error;
}
SYSCALL_DEFINE3(osf_setitimer, int, which, struct itimerval32 __user *, in,
struct itimerval32 __user *, out)
{
struct itimerval kin, kout;
int error;
if (in) {
if (get_it32(&kin, in))
return -EFAULT;
} else
memset(&kin, 0, sizeof(kin));
error = do_setitimer(which, &kin, out ? &kout : NULL);
if (error || !out)
return error;
if (put_it32(out, &kout))
return -EFAULT;
return 0;
}
SYSCALL_DEFINE2(osf_utimes, char __user *, filename,
struct timeval32 __user *, tvs)
{
struct timespec tv[2];
if (tvs) {
struct timeval ktvs[2];
if (get_tv32(&ktvs[0], &tvs[0]) ||
get_tv32(&ktvs[1], &tvs[1]))
return -EFAULT;
if (ktvs[0].tv_usec < 0 || ktvs[0].tv_usec >= 1000000 ||
ktvs[1].tv_usec < 0 || ktvs[1].tv_usec >= 1000000)
return -EINVAL;
tv[0].tv_sec = ktvs[0].tv_sec;
tv[0].tv_nsec = 1000 * ktvs[0].tv_usec;
tv[1].tv_sec = ktvs[1].tv_sec;
tv[1].tv_nsec = 1000 * ktvs[1].tv_usec;
}
return do_utimes(AT_FDCWD, filename, tvs ? tv : NULL, 0);
}
#define MAX_SELECT_SECONDS \
((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
SYSCALL_DEFINE5(osf_select, int, n, fd_set __user *, inp, fd_set __user *, outp,
fd_set __user *, exp, struct timeval32 __user *, tvp)
{
struct timespec end_time, *to = NULL;
if (tvp) {
time_t sec, usec;
to = &end_time;
if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
|| __get_user(sec, &tvp->tv_sec)
|| __get_user(usec, &tvp->tv_usec)) {
return -EFAULT;
}
if (sec < 0 || usec < 0)
return -EINVAL;
if (poll_select_set_timeout(to, sec, usec * NSEC_PER_USEC))
return -EINVAL;
}
/* OSF does not copy back the remaining time. */
return core_sys_select(n, inp, outp, exp, to);
}
struct rusage32 {
struct timeval32 ru_utime; /* user time used */
struct timeval32 ru_stime; /* system time used */
long ru_maxrss; /* maximum resident set size */
long ru_ixrss; /* integral shared memory size */
long ru_idrss; /* integral unshared data size */
long ru_isrss; /* integral unshared stack size */
long ru_minflt; /* page reclaims */
long ru_majflt; /* page faults */
long ru_nswap; /* swaps */
long ru_inblock; /* block input operations */
long ru_oublock; /* block output operations */
long ru_msgsnd; /* messages sent */
long ru_msgrcv; /* messages received */
long ru_nsignals; /* signals received */
long ru_nvcsw; /* voluntary context switches */
long ru_nivcsw; /* involuntary " */
};
SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
{
struct rusage32 r;
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
return -EINVAL;
memset(&r, 0, sizeof(r));
switch (who) {
case RUSAGE_SELF:
jiffies_to_timeval32(current->utime, &r.ru_utime);
jiffies_to_timeval32(current->stime, &r.ru_stime);
r.ru_minflt = current->min_flt;
r.ru_majflt = current->maj_flt;
break;
case RUSAGE_CHILDREN:
jiffies_to_timeval32(current->signal->cutime, &r.ru_utime);
jiffies_to_timeval32(current->signal->cstime, &r.ru_stime);
r.ru_minflt = current->signal->cmin_flt;
r.ru_majflt = current->signal->cmaj_flt;
break;
}
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
}
SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
struct rusage32 __user *, ur)
{
struct rusage r;
long ret, err;
mm_segment_t old_fs;
if (!ur)
return sys_wait4(pid, ustatus, options, NULL);
old_fs = get_fs();
set_fs (KERNEL_DS);
ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
set_fs (old_fs);
if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
return -EFAULT;
err = 0;
err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec);
err |= __put_user(r.ru_maxrss, &ur->ru_maxrss);
err |= __put_user(r.ru_ixrss, &ur->ru_ixrss);
err |= __put_user(r.ru_idrss, &ur->ru_idrss);
err |= __put_user(r.ru_isrss, &ur->ru_isrss);
err |= __put_user(r.ru_minflt, &ur->ru_minflt);
err |= __put_user(r.ru_majflt, &ur->ru_majflt);
err |= __put_user(r.ru_nswap, &ur->ru_nswap);
err |= __put_user(r.ru_inblock, &ur->ru_inblock);
err |= __put_user(r.ru_oublock, &ur->ru_oublock);
err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd);
err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv);
err |= __put_user(r.ru_nsignals, &ur->ru_nsignals);
err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw);
err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw);
return err ? err : ret;
}
/*
* I don't know what the parameters are: the first one
* seems to be a timeval pointer, and I suspect the second
* one is the time remaining.. Ho humm.. No documentation.
*/
SYSCALL_DEFINE2(osf_usleep_thread, struct timeval32 __user *, sleep,
struct timeval32 __user *, remain)
{
struct timeval tmp;
unsigned long ticks;
if (get_tv32(&tmp, sleep))
goto fault;
ticks = timeval_to_jiffies(&tmp);
ticks = schedule_timeout_interruptible(ticks);
if (remain) {
jiffies_to_timeval(ticks, &tmp);
if (put_tv32(remain, &tmp))
goto fault;
}
return 0;
fault:
return -EFAULT;
}
struct timex32 {
unsigned int modes; /* mode selector */
long offset; /* time offset (usec) */
long freq; /* frequency offset (scaled ppm) */
long maxerror; /* maximum error (usec) */
long esterror; /* estimated error (usec) */
int status; /* clock command/status */
long constant; /* pll time constant */
long precision; /* clock precision (usec) (read only) */
long tolerance; /* clock frequency tolerance (ppm)
* (read only)
*/
struct timeval32 time; /* (read only) */
long tick; /* (modified) usecs between clock ticks */
long ppsfreq; /* pps frequency (scaled ppm) (ro) */
long jitter; /* pps jitter (us) (ro) */
int shift; /* interval duration (s) (shift) (ro) */
long stabil; /* pps stability (scaled ppm) (ro) */
long jitcnt; /* jitter limit exceeded (ro) */
long calcnt; /* calibration intervals (ro) */
long errcnt; /* calibration errors (ro) */
long stbcnt; /* stability limit exceeded (ro) */
int :32; int :32; int :32; int :32;
int :32; int :32; int :32; int :32;
int :32; int :32; int :32; int :32;
};
SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
{
struct timex txc;
int ret;
/* copy relevant bits of struct timex. */
if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
offsetof(struct timex32, time)))
return -EFAULT;
ret = do_adjtimex(&txc);
if (ret < 0)
return ret;
/* copy back to timex32 */
if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
(copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
offsetof(struct timex32, tick))) ||
(put_tv32(&txc_p->time, &txc.time)))
return -EFAULT;
return ret;
}
/* Get an address range which is currently unmapped. Similar to the
generic version except that we know how to honor ADDR_LIMIT_32BIT. */
static unsigned long
arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
unsigned long limit)
{
struct vm_area_struct *vma = find_vma(current->mm, addr);
while (1) {
/* At this point: (!vma || addr < vma->vm_end). */
if (limit - len < addr)
return -ENOMEM;
if (!vma || addr + len <= vma->vm_start)
return addr;
addr = vma->vm_end;
vma = vma->vm_next;
}
}
unsigned long
arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
unsigned long limit;
/* "32 bit" actually means 31 bit, since pointers sign extend. */
if (current->personality & ADDR_LIMIT_32BIT)
limit = 0x80000000;
else
limit = TASK_SIZE;
if (len > limit)
return -ENOMEM;
if (flags & MAP_FIXED)
return addr;
/* First, see if the given suggestion fits.
The OSF/1 loader (/sbin/loader) relies on us returning an
address larger than the requested if one exists, which is
a terribly broken way to program.
That said, I can see the use in being able to suggest not
merely specific addresses, but regions of memory -- perhaps
this feature should be incorporated into all ports? */
if (addr) {
addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
if (addr != (unsigned long) -ENOMEM)
return addr;
}
/* Next, try allocating at TASK_UNMAPPED_BASE. */
addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
len, limit);
if (addr != (unsigned long) -ENOMEM)
return addr;
/* Finally, try allocating in low memory. */
addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
return addr;
}
#ifdef CONFIG_OSF4_COMPAT
/* Clear top 32 bits of iov_len in the user's buffer for
compatibility with old versions of OSF/1 where iov_len
was defined as int. */
static int
osf_fix_iov_len(const struct iovec __user *iov, unsigned long count)
{
unsigned long i;
for (i = 0 ; i < count ; i++) {
int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1;
if (put_user(0, iov_len_high))
return -EFAULT;
}
return 0;
}
SYSCALL_DEFINE3(osf_readv, unsigned long, fd,
const struct iovec __user *, vector, unsigned long, count)
{
if (unlikely(personality(current->personality) == PER_OSF4))
if (osf_fix_iov_len(vector, count))
return -EFAULT;
return sys_readv(fd, vector, count);
}
SYSCALL_DEFINE3(osf_writev, unsigned long, fd,
const struct iovec __user *, vector, unsigned long, count)
{
if (unlikely(personality(current->personality) == PER_OSF4))
if (osf_fix_iov_len(vector, count))
return -EFAULT;
return sys_writev(fd, vector, count);
}
#endif
| gpl-2.0 |
aseering/linux | drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlyg84.c | 840 | 2238 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "dmacnv50.h"
#include "rootnv50.h"
#include <nvif/class.h>
static const struct nv50_disp_mthd_list
g84_disp_ovly_mthd_base = {
.mthd = 0x0000,
.addr = 0x000000,
.data = {
{ 0x0080, 0x000000 },
{ 0x0084, 0x6109a0 },
{ 0x0088, 0x6109c0 },
{ 0x008c, 0x6109c8 },
{ 0x0090, 0x6109b4 },
{ 0x0094, 0x610970 },
{ 0x00a0, 0x610998 },
{ 0x00a4, 0x610964 },
{ 0x00c0, 0x610958 },
{ 0x00e0, 0x6109a8 },
{ 0x00e4, 0x6109d0 },
{ 0x00e8, 0x6109d8 },
{ 0x0100, 0x61094c },
{ 0x0104, 0x610984 },
{ 0x0108, 0x61098c },
{ 0x0800, 0x6109f8 },
{ 0x0808, 0x610a08 },
{ 0x080c, 0x610a10 },
{ 0x0810, 0x610a00 },
{}
}
};
const struct nv50_disp_chan_mthd
g84_disp_ovly_chan_mthd = {
.name = "Overlay",
.addr = 0x000540,
.prev = 0x000004,
.data = {
{ "Global", 1, &g84_disp_ovly_mthd_base },
{}
}
};
const struct nv50_disp_dmac_oclass
g84_disp_ovly_oclass = {
.base.oclass = G82_DISP_OVERLAY_CHANNEL_DMA,
.base.minver = 0,
.base.maxver = 0,
.ctor = nv50_disp_ovly_new,
.func = &nv50_disp_dmac_func,
.mthd = &g84_disp_ovly_chan_mthd,
.chid = 3,
};
| gpl-2.0 |
jwyterlin/linux | drivers/gpu/host1x/hw/channel_hw.c | 840 | 5074 | /*
* Tegra host1x Channel
*
* Copyright (c) 2010-2013, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/host1x.h>
#include <linux/slab.h>
#include <trace/events/host1x.h>
#include "../channel.h"
#include "../dev.h"
#include "../intr.h"
#include "../job.h"
#define HOST1X_CHANNEL_SIZE 16384
#define TRACE_MAX_LENGTH 128U
static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
u32 offset, u32 words)
{
void *mem = NULL;
if (host1x_debug_trace_cmdbuf)
mem = host1x_bo_mmap(bo);
if (mem) {
u32 i;
/*
* Write in batches of 128 as there seems to be a limit
* of how much you can output to ftrace at once.
*/
for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
trace_host1x_cdma_push_gather(
dev_name(cdma_to_channel(cdma)->dev),
(u32)bo, min(words - i, TRACE_MAX_LENGTH),
offset + i * sizeof(u32), mem);
}
host1x_bo_munmap(bo, mem);
}
}
static void submit_gathers(struct host1x_job *job)
{
struct host1x_cdma *cdma = &job->channel->cdma;
unsigned int i;
for (i = 0; i < job->num_gathers; i++) {
struct host1x_job_gather *g = &job->gathers[i];
u32 op1 = host1x_opcode_gather(g->words);
u32 op2 = g->base + g->offset;
trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
host1x_cdma_push(cdma, op1, op2);
}
}
static inline void synchronize_syncpt_base(struct host1x_job *job)
{
struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
u32 id, value;
value = host1x_syncpt_read_max(sp);
id = sp->base->id;
host1x_cdma_push(&job->channel->cdma,
host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
HOST1X_UCLASS_LOAD_SYNCPT_BASE, 1),
HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(id) |
HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(value));
}
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
struct host1x_syncpt *sp;
u32 user_syncpt_incrs = job->syncpt_incrs;
u32 prev_max = 0;
u32 syncval;
int err;
struct host1x_waitlist *completed_waiter = NULL;
struct host1x *host = dev_get_drvdata(ch->dev->parent);
sp = host->syncpt + job->syncpt_id;
trace_host1x_channel_submit(dev_name(ch->dev),
job->num_gathers, job->num_relocs,
job->num_waitchk, job->syncpt_id,
job->syncpt_incrs);
/* before error checks, return current max */
prev_max = job->syncpt_end = host1x_syncpt_read_max(sp);
/* get submit lock */
err = mutex_lock_interruptible(&ch->submitlock);
if (err)
goto error;
completed_waiter = kzalloc(sizeof(*completed_waiter), GFP_KERNEL);
if (!completed_waiter) {
mutex_unlock(&ch->submitlock);
err = -ENOMEM;
goto error;
}
/* begin a CDMA submit */
err = host1x_cdma_begin(&ch->cdma, job);
if (err) {
mutex_unlock(&ch->submitlock);
goto error;
}
if (job->serialize) {
/*
* Force serialization by inserting a host wait for the
* previous job to finish before this one can commence.
*/
host1x_cdma_push(&ch->cdma,
host1x_opcode_setclass(HOST1X_CLASS_HOST1X,
host1x_uclass_wait_syncpt_r(), 1),
host1x_class_host_wait_syncpt(job->syncpt_id,
host1x_syncpt_read_max(sp)));
}
/* Synchronize base register to allow using it for relative waiting */
if (sp->base)
synchronize_syncpt_base(job);
syncval = host1x_syncpt_incr_max(sp, user_syncpt_incrs);
job->syncpt_end = syncval;
/* add a setclass for modules that require it */
if (job->class)
host1x_cdma_push(&ch->cdma,
host1x_opcode_setclass(job->class, 0, 0),
HOST1X_OPCODE_NOP);
submit_gathers(job);
/* end CDMA submit & stash pinned hMems into sync queue */
host1x_cdma_end(&ch->cdma, job);
trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
/* schedule a submit complete interrupt */
err = host1x_intr_add_action(host, job->syncpt_id, syncval,
HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
completed_waiter, NULL);
completed_waiter = NULL;
WARN(err, "Failed to set submit complete interrupt");
mutex_unlock(&ch->submitlock);
return 0;
error:
kfree(completed_waiter);
return err;
}
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
unsigned int index)
{
ch->id = index;
mutex_init(&ch->reflock);
mutex_init(&ch->submitlock);
ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
return 0;
}
static const struct host1x_channel_ops host1x_channel_ops = {
.init = host1x_channel_init,
.submit = channel_submit,
};
| gpl-2.0 |
akalongman/linux | fs/nilfs2/dat.c | 1352 | 13804 | /*
* dat.c - NILFS disk address translation.
*
* Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Written by Koji Sato <koji@osrg.net>.
*/
#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/string.h>
#include <linux/errno.h>
#include "nilfs.h"
#include "mdt.h"
#include "alloc.h"
#include "dat.h"
#define NILFS_CNO_MIN ((__u64)1)
#define NILFS_CNO_MAX (~(__u64)0)
/**
* struct nilfs_dat_info - on-memory private data of DAT file
* @mi: on-memory private data of metadata file
* @palloc_cache: persistent object allocator cache of DAT file
* @shadow: shadow map of DAT file
*/
struct nilfs_dat_info {
struct nilfs_mdt_info mi;
struct nilfs_palloc_cache palloc_cache;
struct nilfs_shadow_map shadow;
};
static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
{
return (struct nilfs_dat_info *)NILFS_MDT(dat);
}
static int nilfs_dat_prepare_entry(struct inode *dat,
struct nilfs_palloc_req *req, int create)
{
return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr,
create, &req->pr_entry_bh);
}
static void nilfs_dat_commit_entry(struct inode *dat,
struct nilfs_palloc_req *req)
{
mark_buffer_dirty(req->pr_entry_bh);
nilfs_mdt_mark_dirty(dat);
brelse(req->pr_entry_bh);
}
static void nilfs_dat_abort_entry(struct inode *dat,
struct nilfs_palloc_req *req)
{
brelse(req->pr_entry_bh);
}
int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
int ret;
ret = nilfs_palloc_prepare_alloc_entry(dat, req);
if (ret < 0)
return ret;
ret = nilfs_dat_prepare_entry(dat, req, 1);
if (ret < 0)
nilfs_palloc_abort_alloc_entry(dat, req);
return ret;
}
void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
entry->de_blocknr = cpu_to_le64(0);
kunmap_atomic(kaddr);
nilfs_palloc_commit_alloc_entry(dat, req);
nilfs_dat_commit_entry(dat, req);
}
void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{
nilfs_dat_abort_entry(dat, req);
nilfs_palloc_abort_alloc_entry(dat, req);
}
static void nilfs_dat_commit_free(struct inode *dat,
struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
entry->de_blocknr = cpu_to_le64(0);
kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req);
nilfs_palloc_commit_free_entry(dat, req);
}
int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
{
int ret;
ret = nilfs_dat_prepare_entry(dat, req, 0);
WARN_ON(ret == -ENOENT);
return ret;
}
void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
sector_t blocknr)
{
struct nilfs_dat_entry *entry;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req);
}
int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
__u64 start;
sector_t blocknr;
void *kaddr;
int ret;
ret = nilfs_dat_prepare_entry(dat, req, 0);
if (ret < 0) {
WARN_ON(ret == -ENOENT);
return ret;
}
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr);
if (blocknr == 0) {
ret = nilfs_palloc_prepare_free_entry(dat, req);
if (ret < 0) {
nilfs_dat_abort_entry(dat, req);
return ret;
}
}
return 0;
}
void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
int dead)
{
struct nilfs_dat_entry *entry;
__u64 start, end;
sector_t blocknr;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
end = start = le64_to_cpu(entry->de_start);
if (!dead) {
end = nilfs_mdt_cno(dat);
WARN_ON(start > end);
}
entry->de_end = cpu_to_le64(end);
blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr);
if (blocknr == 0)
nilfs_dat_commit_free(dat, req);
else
nilfs_dat_commit_entry(dat, req);
}
void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
{
struct nilfs_dat_entry *entry;
__u64 start;
sector_t blocknr;
void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr);
start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr);
if (start == nilfs_mdt_cno(dat) && blocknr == 0)
nilfs_palloc_abort_free_entry(dat, req);
nilfs_dat_abort_entry(dat, req);
}
int nilfs_dat_prepare_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq)
{
int ret;
ret = nilfs_dat_prepare_end(dat, oldreq);
if (!ret) {
ret = nilfs_dat_prepare_alloc(dat, newreq);
if (ret < 0)
nilfs_dat_abort_end(dat, oldreq);
}
return ret;
}
void nilfs_dat_commit_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq, int dead)
{
nilfs_dat_commit_end(dat, oldreq, dead);
nilfs_dat_commit_alloc(dat, newreq);
}
void nilfs_dat_abort_update(struct inode *dat,
struct nilfs_palloc_req *oldreq,
struct nilfs_palloc_req *newreq)
{
nilfs_dat_abort_end(dat, oldreq);
nilfs_dat_abort_alloc(dat, newreq);
}
/**
* nilfs_dat_mark_dirty -
* @dat: DAT file inode
* @vblocknr: virtual block number
*
* Description:
*
* Return Value: On success, 0 is returned. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*/
int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
{
struct nilfs_palloc_req req;
int ret;
req.pr_entry_nr = vblocknr;
ret = nilfs_dat_prepare_entry(dat, &req, 0);
if (ret == 0)
nilfs_dat_commit_entry(dat, &req);
return ret;
}
/**
* nilfs_dat_freev - free virtual block numbers
* @dat: DAT file inode
* @vblocknrs: array of virtual block numbers
* @nitems: number of virtual block numbers
*
* Description: nilfs_dat_freev() frees the virtual block numbers specified by
* @vblocknrs and @nitems.
*
* Return Value: On success, 0 is returned. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-ENOENT - The virtual block number have not been allocated.
*/
int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
{
return nilfs_palloc_freev(dat, vblocknrs, nitems);
}
/**
* nilfs_dat_move - change a block number
* @dat: DAT file inode
* @vblocknr: virtual block number
* @blocknr: block number
*
* Description: nilfs_dat_move() changes the block number associated with
* @vblocknr to @blocknr.
*
* Return Value: On success, 0 is returned. On error, one of the following
* negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*/
int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
{
struct buffer_head *entry_bh;
struct nilfs_dat_entry *entry;
void *kaddr;
int ret;
ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
if (ret < 0)
return ret;
/*
* The given disk block number (blocknr) is not yet written to
* the device at this point.
*
* To prevent nilfs_dat_translate() from returning the
* uncommitted block number, this makes a copy of the entry
* buffer and redirects nilfs_dat_translate() to the copy.
*/
if (!buffer_nilfs_redirected(entry_bh)) {
ret = nilfs_mdt_freeze_buffer(dat, entry_bh);
if (ret) {
brelse(entry_bh);
return ret;
}
}
kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
(unsigned long long)vblocknr,
(unsigned long long)le64_to_cpu(entry->de_start),
(unsigned long long)le64_to_cpu(entry->de_end));
kunmap_atomic(kaddr);
brelse(entry_bh);
return -EINVAL;
}
WARN_ON(blocknr == 0);
entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr);
mark_buffer_dirty(entry_bh);
nilfs_mdt_mark_dirty(dat);
brelse(entry_bh);
return 0;
}
/**
* nilfs_dat_translate - translate a virtual block number to a block number
* @dat: DAT file inode
* @vblocknr: virtual block number
* @blocknrp: pointer to a block number
*
* Description: nilfs_dat_translate() maps the virtual block number @vblocknr
* to the corresponding block number.
*
* Return Value: On success, 0 is returned and the block number associated
* with @vblocknr is stored in the place pointed by @blocknrp. On error, one
* of the following negative error codes is returned.
*
* %-EIO - I/O error.
*
* %-ENOMEM - Insufficient amount of memory available.
*
* %-ENOENT - A block number associated with @vblocknr does not exist.
*/
int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
{
struct buffer_head *entry_bh, *bh;
struct nilfs_dat_entry *entry;
sector_t blocknr;
void *kaddr;
int ret;
ret = nilfs_palloc_get_entry_block(dat, vblocknr, 0, &entry_bh);
if (ret < 0)
return ret;
if (!nilfs_doing_gc() && buffer_nilfs_redirected(entry_bh)) {
bh = nilfs_mdt_get_frozen_buffer(dat, entry_bh);
if (bh) {
WARN_ON(!buffer_uptodate(bh));
brelse(entry_bh);
entry_bh = bh;
}
}
kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
blocknr = le64_to_cpu(entry->de_blocknr);
if (blocknr == 0) {
ret = -ENOENT;
goto out;
}
*blocknrp = blocknr;
out:
kunmap_atomic(kaddr);
brelse(entry_bh);
return ret;
}
ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
size_t nvi)
{
struct buffer_head *entry_bh;
struct nilfs_dat_entry *entry;
struct nilfs_vinfo *vinfo = buf;
__u64 first, last;
void *kaddr;
unsigned long entries_per_block = NILFS_MDT(dat)->mi_entries_per_block;
int i, j, n, ret;
for (i = 0; i < nvi; i += n) {
ret = nilfs_palloc_get_entry_block(dat, vinfo->vi_vblocknr,
0, &entry_bh);
if (ret < 0)
return ret;
kaddr = kmap_atomic(entry_bh->b_page);
/* last virtual block number in this block */
first = vinfo->vi_vblocknr;
do_div(first, entries_per_block);
first *= entries_per_block;
last = first + entries_per_block - 1;
for (j = i, n = 0;
j < nvi && vinfo->vi_vblocknr >= first &&
vinfo->vi_vblocknr <= last;
j++, n++, vinfo = (void *)vinfo + visz) {
entry = nilfs_palloc_block_get_entry(
dat, vinfo->vi_vblocknr, entry_bh, kaddr);
vinfo->vi_start = le64_to_cpu(entry->de_start);
vinfo->vi_end = le64_to_cpu(entry->de_end);
vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
}
kunmap_atomic(kaddr);
brelse(entry_bh);
}
return nvi;
}
/**
* nilfs_dat_read - read or get dat inode
* @sb: super block instance
* @entry_size: size of a dat entry
* @raw_inode: on-disk dat inode
* @inodep: buffer to store the inode
*/
int nilfs_dat_read(struct super_block *sb, size_t entry_size,
struct nilfs_inode *raw_inode, struct inode **inodep)
{
static struct lock_class_key dat_lock_key;
struct inode *dat;
struct nilfs_dat_info *di;
int err;
if (entry_size > sb->s_blocksize) {
printk(KERN_ERR
"NILFS: too large DAT entry size: %zu bytes.\n",
entry_size);
return -EINVAL;
} else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
printk(KERN_ERR
"NILFS: too small DAT entry size: %zu bytes.\n",
entry_size);
return -EINVAL;
}
dat = nilfs_iget_locked(sb, NULL, NILFS_DAT_INO);
if (unlikely(!dat))
return -ENOMEM;
if (!(dat->i_state & I_NEW))
goto out;
err = nilfs_mdt_init(dat, NILFS_MDT_GFP, sizeof(*di));
if (err)
goto failed;
err = nilfs_palloc_init_blockgroup(dat, entry_size);
if (err)
goto failed;
di = NILFS_DAT_I(dat);
lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
nilfs_palloc_setup_cache(dat, &di->palloc_cache);
nilfs_mdt_setup_shadow_map(dat, &di->shadow);
err = nilfs_read_inode_common(dat, raw_inode);
if (err)
goto failed;
unlock_new_inode(dat);
out:
*inodep = dat;
return 0;
failed:
iget_failed(dat);
return err;
}
| gpl-2.0 |
futranbg/ef35l-ICS-kernel | drivers/net/e1000e/82571.c | 1352 | 56106 | /*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
/*
* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
* 82571EB Gigabit Ethernet Controller (Fiber)
* 82571EB Dual Port Gigabit Mezzanine Adapter
* 82571EB Quad Port Gigabit Mezzanine Adapter
* 82571PT Gigabit PT Quad Port Server ExpressModule
* 82572EI Gigabit Ethernet Controller (Copper)
* 82572EI Gigabit Ethernet Controller (Fiber)
* 82572EI Gigabit Ethernet Controller
* 82573V Gigabit Ethernet Controller (Copper)
* 82573E Gigabit Ethernet Controller (Copper)
* 82573L Gigabit Ethernet Controller
* 82574L Gigabit Network Connection
* 82583V Gigabit Network Connection
*/
#include "e1000.h"
#define ID_LED_RESERVED_F746 0xF746
#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
(ID_LED_OFF1_ON2 << 8) | \
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
#define E1000_BASE1000T_STATUS 10
#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
#define E1000_RECEIVE_ERROR_COUNTER 21
#define E1000_RECEIVE_ERROR_MAX 0xFFFF
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
static s32 e1000_setup_link_82571(struct e1000_hw *hw);
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
static void e1000_clear_vfta_82571(struct e1000_hw *hw);
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
static s32 e1000_led_on_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
/**
* e1000_init_phy_params_82571 - Init PHY func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
if (hw->phy.media_type != e1000_media_type_copper) {
phy->type = e1000_phy_none;
return 0;
}
phy->addr = 1;
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
phy->reset_delay_us = 100;
phy->ops.power_up = e1000_power_up_phy_copper;
phy->ops.power_down = e1000_power_down_phy_copper_82571;
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
phy->type = e1000_phy_igp_2;
break;
case e1000_82573:
phy->type = e1000_phy_m88;
break;
case e1000_82574:
case e1000_82583:
phy->type = e1000_phy_bm;
phy->ops.acquire = e1000_get_hw_semaphore_82574;
phy->ops.release = e1000_put_hw_semaphore_82574;
phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
break;
default:
return -E1000_ERR_PHY;
break;
}
/* This can only be done after all function pointers are setup. */
ret_val = e1000_get_phy_id_82571(hw);
if (ret_val) {
e_dbg("Error getting PHY ID\n");
return ret_val;
}
/* Verify phy id */
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
if (phy->id != IGP01E1000_I_PHY_ID)
ret_val = -E1000_ERR_PHY;
break;
case e1000_82573:
if (phy->id != M88E1111_I_PHY_ID)
ret_val = -E1000_ERR_PHY;
break;
case e1000_82574:
case e1000_82583:
if (phy->id != BME1000_E_PHY_ID_R2)
ret_val = -E1000_ERR_PHY;
break;
default:
ret_val = -E1000_ERR_PHY;
break;
}
if (ret_val)
e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
return ret_val;
}
/**
* e1000_init_nvm_params_82571 - Init NVM func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 eecd = er32(EECD);
u16 size;
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
switch (nvm->override) {
case e1000_nvm_override_spi_large:
nvm->page_size = 32;
nvm->address_bits = 16;
break;
case e1000_nvm_override_spi_small:
nvm->page_size = 8;
nvm->address_bits = 8;
break;
default:
nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
break;
}
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (((eecd >> 15) & 0x3) == 0x3) {
nvm->type = e1000_nvm_flash_hw;
nvm->word_size = 2048;
/*
* Autonomous Flash update bit must be cleared due
* to Flash update issue.
*/
eecd &= ~E1000_EECD_AUPDEN;
ew32(EECD, eecd);
break;
}
/* Fall Through */
default:
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
/*
* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
nvm->word_size = 1 << size;
break;
}
/* Function Pointers */
switch (hw->mac.type) {
case e1000_82574:
case e1000_82583:
nvm->ops.acquire = e1000_get_hw_semaphore_82574;
nvm->ops.release = e1000_put_hw_semaphore_82574;
break;
default:
break;
}
return 0;
}
/**
* e1000_init_mac_params_82571 - Init MAC func ptrs.
* @hw: pointer to the HW structure
**/
static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_mac_operations *func = &mac->ops;
u32 swsm = 0;
u32 swsm2 = 0;
bool force_clear_smbi = false;
/* Set media type */
switch (adapter->pdev->device) {
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82572EI_FIBER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
hw->phy.media_type = e1000_media_type_fiber;
break;
case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82572EI_SERDES:
case E1000_DEV_ID_82571EB_SERDES_DUAL:
case E1000_DEV_ID_82571EB_SERDES_QUAD:
hw->phy.media_type = e1000_media_type_internal_serdes;
break;
default:
hw->phy.media_type = e1000_media_type_copper;
break;
}
/* Set mta register count */
mac->mta_reg_count = 128;
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
/* Adaptive IFS supported */
mac->adaptive_ifs = true;
/* check for link */
switch (hw->phy.media_type) {
case e1000_media_type_copper:
func->setup_physical_interface = e1000_setup_copper_link_82571;
func->check_for_link = e1000e_check_for_copper_link;
func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
break;
case e1000_media_type_fiber:
func->setup_physical_interface =
e1000_setup_fiber_serdes_link_82571;
func->check_for_link = e1000e_check_for_fiber_link;
func->get_link_up_info =
e1000e_get_speed_and_duplex_fiber_serdes;
break;
case e1000_media_type_internal_serdes:
func->setup_physical_interface =
e1000_setup_fiber_serdes_link_82571;
func->check_for_link = e1000_check_for_serdes_link_82571;
func->get_link_up_info =
e1000e_get_speed_and_duplex_fiber_serdes;
break;
default:
return -E1000_ERR_CONFIG;
break;
}
switch (hw->mac.type) {
case e1000_82573:
func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
func->blink_led = e1000e_blink_led_generic;
/* FWSM register */
mac->has_fwsm = true;
/*
* ARC supported; valid only if manageability features are
* enabled.
*/
mac->arc_subsystem_valid =
(er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
break;
case e1000_82574:
case e1000_82583:
func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000_check_mng_mode_82574;
func->led_on = e1000_led_on_82574;
break;
default:
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
func->blink_led = e1000e_blink_led_generic;
/* FWSM register */
mac->has_fwsm = true;
break;
}
/*
* Ensure that the inter-port SWSM.SMBI lock bit is clear before
* first NVM or PHY access. This should be done for single-port
* devices, and for one port only on dual-port devices so that
* for those devices we can still use the SMBI lock to synchronize
* inter-port accesses to the PHY & NVM.
*/
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
swsm2 = er32(SWSM2);
if (!(swsm2 & E1000_SWSM2_LOCK)) {
/* Only do this for the first interface on this card */
ew32(SWSM2,
swsm2 | E1000_SWSM2_LOCK);
force_clear_smbi = true;
} else
force_clear_smbi = false;
break;
default:
force_clear_smbi = true;
break;
}
if (force_clear_smbi) {
/* Make sure SWSM.SMBI is clear */
swsm = er32(SWSM);
if (swsm & E1000_SWSM_SMBI) {
/* This bit should not be set on a first interface, and
* indicates that the bootagent or EFI code has
* improperly left this bit enabled
*/
e_dbg("Please update your 82571 Bootagent\n");
}
ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
}
/*
* Initialize device specific counter of SMBI acquisition
* timeouts.
*/
hw->dev_spec.e82571.smb_counter = 0;
return 0;
}
static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
static int global_quad_port_a; /* global port a indication */
struct pci_dev *pdev = adapter->pdev;
int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
s32 rc;
rc = e1000_init_mac_params_82571(adapter);
if (rc)
return rc;
rc = e1000_init_nvm_params_82571(hw);
if (rc)
return rc;
rc = e1000_init_phy_params_82571(hw);
if (rc)
return rc;
/* tag quad port adapters first, it's used below */
switch (pdev->device) {
case E1000_DEV_ID_82571EB_QUAD_COPPER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
case E1000_DEV_ID_82571PT_QUAD_COPPER:
adapter->flags |= FLAG_IS_QUAD_PORT;
/* mark the first port */
if (global_quad_port_a == 0)
adapter->flags |= FLAG_IS_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
global_quad_port_a++;
if (global_quad_port_a == 4)
global_quad_port_a = 0;
break;
default:
break;
}
switch (adapter->hw.mac.type) {
case e1000_82571:
/* these dual ports don't have WoL on port B at all */
if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) ||
(pdev->device == E1000_DEV_ID_82571EB_SERDES) ||
(pdev->device == E1000_DEV_ID_82571EB_COPPER)) &&
(is_port_b))
adapter->flags &= ~FLAG_HAS_WOL;
/* quad ports only support WoL on port A */
if (adapter->flags & FLAG_IS_QUAD_PORT &&
(!(adapter->flags & FLAG_IS_QUAD_PORT_A)))
adapter->flags &= ~FLAG_HAS_WOL;
/* Does not support WoL on any port */
if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
adapter->flags &= ~FLAG_HAS_WOL;
break;
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (pdev->device == E1000_DEV_ID_82573L) {
adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
adapter->max_hw_frame_size = DEFAULT_JUMBO;
}
break;
default:
break;
}
return 0;
}
/**
* e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
* @hw: pointer to the HW structure
*
* Reads the PHY registers and stores the PHY ID and possibly the PHY
* revision in the hardware structure.
**/
static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 phy_id = 0;
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
/*
* The 82571 firmware may still be configuring the PHY.
* In this case, we cannot access the PHY until the
* configuration is done. So we explicitly set the
* PHY ID.
*/
phy->id = IGP01E1000_I_PHY_ID;
break;
case e1000_82573:
return e1000e_get_phy_id(hw);
break;
case e1000_82574:
case e1000_82583:
ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
if (ret_val)
return ret_val;
phy->id = (u32)(phy_id << 16);
udelay(20);
ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
if (ret_val)
return ret_val;
phy->id |= (u32)(phy_id);
phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
break;
default:
return -E1000_ERR_PHY;
break;
}
return 0;
}
/**
* e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM
**/
static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
s32 sw_timeout = hw->nvm.word_size + 1;
s32 fw_timeout = hw->nvm.word_size + 1;
s32 i = 0;
/*
* If we have timedout 3 times on trying to acquire
* the inter-port SMBI semaphore, there is old code
* operating on the other port, and it is not
* releasing SMBI. Modify the number of times that
* we try for the semaphore to interwork with this
* older code.
*/
if (hw->dev_spec.e82571.smb_counter > 2)
sw_timeout = 1;
/* Get the SW semaphore */
while (i < sw_timeout) {
swsm = er32(SWSM);
if (!(swsm & E1000_SWSM_SMBI))
break;
udelay(50);
i++;
}
if (i == sw_timeout) {
e_dbg("Driver can't access device - SMBI bit is set.\n");
hw->dev_spec.e82571.smb_counter++;
}
/* Get the FW semaphore. */
for (i = 0; i < fw_timeout; i++) {
swsm = er32(SWSM);
ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (er32(SWSM) & E1000_SWSM_SWESMBI)
break;
udelay(50);
}
if (i == fw_timeout) {
/* Release semaphores */
e1000_put_hw_semaphore_82571(hw);
e_dbg("Driver can't access the NVM\n");
return -E1000_ERR_NVM;
}
return 0;
}
/**
* e1000_put_hw_semaphore_82571 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
**/
static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
{
u32 swsm;
swsm = er32(SWSM);
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
ew32(SWSM, swsm);
}
/**
* e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore during reset.
*
**/
static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
s32 ret_val = 0;
s32 i = 0;
extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
do {
ew32(EXTCNF_CTRL, extcnf_ctrl);
extcnf_ctrl = er32(EXTCNF_CTRL);
if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
break;
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
usleep_range(2000, 4000);
i++;
} while (i < MDIO_OWNERSHIP_TIMEOUT);
if (i == MDIO_OWNERSHIP_TIMEOUT) {
/* Release semaphores */
e1000_put_hw_semaphore_82573(hw);
e_dbg("Driver can't access the PHY\n");
ret_val = -E1000_ERR_PHY;
goto out;
}
out:
return ret_val;
}
/**
* e1000_put_hw_semaphore_82573 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used during reset.
*
**/
static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
{
u32 extcnf_ctrl;
extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
ew32(EXTCNF_CTRL, extcnf_ctrl);
}
static DEFINE_MUTEX(swflag_mutex);
/**
* e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
* @hw: pointer to the HW structure
*
* Acquire the HW semaphore to access the PHY or NVM.
*
**/
static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
{
s32 ret_val;
mutex_lock(&swflag_mutex);
ret_val = e1000_get_hw_semaphore_82573(hw);
if (ret_val)
mutex_unlock(&swflag_mutex);
return ret_val;
}
/**
* e1000_put_hw_semaphore_82574 - Release hardware semaphore
* @hw: pointer to the HW structure
*
* Release hardware semaphore used to access the PHY or NVM
*
**/
static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
{
e1000_put_hw_semaphore_82573(hw);
mutex_unlock(&swflag_mutex);
}
/**
* e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: true to enable LPLU, false to disable
*
* Sets the LPLU D0 state according to the active flag.
* LPLU will not be activated unless the
* device autonegotiation advertisement meets standards of
* either 10 or 10/100 or 10/100/1000 at all duplexes.
* This is a function pointer entry point only called by
* PHY setup routines.
**/
static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
{
u16 data = er32(POEMB);
if (active)
data |= E1000_PHY_CTRL_D0A_LPLU;
else
data &= ~E1000_PHY_CTRL_D0A_LPLU;
ew32(POEMB, data);
return 0;
}
/**
* e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
* @hw: pointer to the HW structure
* @active: boolean used to enable/disable lplu
*
* The low power link up (lplu) state is set to the power management level D3
* when active is true, else clear lplu for D3. LPLU
* is used during Dx states where the power conservation is most important.
* During driver activity, SmartSpeed should be enabled so performance is
* maintained.
**/
static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
{
u16 data = er32(POEMB);
if (!active) {
data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
} else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
(hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
(hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
data |= E1000_PHY_CTRL_NOND0A_LPLU;
}
ew32(POEMB, data);
return 0;
}
/**
* e1000_acquire_nvm_82571 - Request for access to the EEPROM
* @hw: pointer to the HW structure
*
* To gain access to the EEPROM, first we must obtain a hardware semaphore.
* Then for non-82573 hardware, set the EEPROM access request bit and wait
* for EEPROM access grant bit. If the access grant bit is not set, release
* hardware semaphore.
**/
static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
{
s32 ret_val;
ret_val = e1000_get_hw_semaphore_82571(hw);
if (ret_val)
return ret_val;
switch (hw->mac.type) {
case e1000_82573:
break;
default:
ret_val = e1000e_acquire_nvm(hw);
break;
}
if (ret_val)
e1000_put_hw_semaphore_82571(hw);
return ret_val;
}
/**
* e1000_release_nvm_82571 - Release exclusive access to EEPROM
* @hw: pointer to the HW structure
*
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
**/
static void e1000_release_nvm_82571(struct e1000_hw *hw)
{
e1000e_release_nvm(hw);
e1000_put_hw_semaphore_82571(hw);
}
/**
* e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
* @hw: pointer to the HW structure
* @offset: offset within the EEPROM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the EEPROM
*
* For non-82573 silicon, write data to EEPROM at offset using SPI interface.
*
* If e1000e_update_nvm_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
s32 ret_val;
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
break;
case e1000_82571:
case e1000_82572:
ret_val = e1000e_write_nvm_spi(hw, offset, words, data);
break;
default:
ret_val = -E1000_ERR_NVM;
break;
}
return ret_val;
}
/**
* e1000_update_nvm_checksum_82571 - Update EEPROM checksum
* @hw: pointer to the HW structure
*
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM.
**/
static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
{
u32 eecd;
s32 ret_val;
u16 i;
ret_val = e1000e_update_nvm_checksum_generic(hw);
if (ret_val)
return ret_val;
/*
* If our nvm is an EEPROM, then we're done
* otherwise, commit the checksum to the flash NVM.
*/
if (hw->nvm.type != e1000_nvm_flash_hw)
return ret_val;
/* Check for pending operations. */
for (i = 0; i < E1000_FLASH_UPDATES; i++) {
usleep_range(1000, 2000);
if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
break;
}
if (i == E1000_FLASH_UPDATES)
return -E1000_ERR_NVM;
/* Reset the firmware if using STM opcode. */
if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
/*
* The enabling of and the actual reset must be done
* in two write cycles.
*/
ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
e1e_flush();
ew32(HICR, E1000_HICR_FW_RESET);
}
/* Commit the write to flash */
eecd = er32(EECD) | E1000_EECD_FLUPD;
ew32(EECD, eecd);
for (i = 0; i < E1000_FLASH_UPDATES; i++) {
usleep_range(1000, 2000);
if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
break;
}
if (i == E1000_FLASH_UPDATES)
return -E1000_ERR_NVM;
return 0;
}
/**
* e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
{
if (hw->nvm.type == e1000_nvm_flash_hw)
e1000_fix_nvm_checksum_82571(hw);
return e1000e_validate_nvm_checksum_generic(hw);
}
/**
* e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
* @hw: pointer to the HW structure
* @offset: offset within the EEPROM to be written to
* @words: number of words to write
* @data: 16 bit word(s) to be written to the EEPROM
*
* After checking for invalid values, poll the EEPROM to ensure the previous
* command has completed before trying to write the next word. After write
* poll for completion.
*
* If e1000e_update_nvm_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, eewr = 0;
s32 ret_val = 0;
/*
* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
e_dbg("nvm parameter(s) out of bounds\n");
return -E1000_ERR_NVM;
}
for (i = 0; i < words; i++) {
eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
E1000_NVM_RW_REG_START;
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val)
break;
ew32(EEWR, eewr);
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
if (ret_val)
break;
}
return ret_val;
}
/**
* e1000_get_cfg_done_82571 - Poll for configuration done
* @hw: pointer to the HW structure
*
* Reads the management control register for the config done bit to be set.
**/
static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
while (timeout) {
if (er32(EEMNGCTL) &
E1000_NVM_CFG_DONE_PORT_0)
break;
usleep_range(1000, 2000);
timeout--;
}
if (!timeout) {
e_dbg("MNG configuration cycle has not completed.\n");
return -E1000_ERR_RESET;
}
return 0;
}
/**
* e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
* @hw: pointer to the HW structure
* @active: true to enable LPLU, false to disable
*
* Sets the LPLU D0 state according to the active flag. When activating LPLU
* this function also disables smart speed and vice versa. LPLU will not be
* activated unless the device autonegotiation advertisement meets standards
* of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function
* pointer entry point only called by PHY setup routines.
**/
static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
u16 data;
ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
if (ret_val)
return ret_val;
if (active) {
data |= IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
if (ret_val)
return ret_val;
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
if (ret_val)
return ret_val;
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
return ret_val;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
return ret_val;
}
}
return 0;
}
/**
* e1000_reset_hw_82571 - Reset hardware
* @hw: pointer to the HW structure
*
* This resets the hardware into a known state.
**/
static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
{
u32 ctrl, ctrl_ext;
s32 ret_val;
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
if (ret_val)
e_dbg("PCI-E Master disable polling has failed.\n");
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
ew32(RCTL, 0);
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
usleep_range(10000, 20000);
/*
* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
*/
switch (hw->mac.type) {
case e1000_82573:
ret_val = e1000_get_hw_semaphore_82573(hw);
break;
case e1000_82574:
case e1000_82583:
ret_val = e1000_get_hw_semaphore_82574(hw);
break;
default:
break;
}
if (ret_val)
e_dbg("Cannot acquire MDIO ownership\n");
ctrl = er32(CTRL);
e_dbg("Issuing a global reset to MAC\n");
ew32(CTRL, ctrl | E1000_CTRL_RST);
/* Must release MDIO ownership and mutex after MAC reset. */
switch (hw->mac.type) {
case e1000_82574:
case e1000_82583:
e1000_put_hw_semaphore_82574(hw);
break;
default:
break;
}
if (hw->nvm.type == e1000_nvm_flash_hw) {
udelay(10);
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
}
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val)
/* We don't want to continue accessing MAC registers. */
return ret_val;
/*
* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
* Need to wait for Phy configuration completion before accessing
* NVM and Phy.
*/
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
msleep(25);
break;
default:
break;
}
/* Clear any pending interrupt events. */
ew32(IMC, 0xffffffff);
er32(ICR);
if (hw->mac.type == e1000_82571) {
/* Install any alternate MAC address into RAR0 */
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
return ret_val;
e1000e_set_laa_state_82571(hw, true);
}
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
hw->mac.serdes_link_state = e1000_serdes_link_down;
return 0;
}
/**
* e1000_init_hw_82571 - Initialize hardware
* @hw: pointer to the HW structure
*
* This inits the hardware readying it for operation.
**/
static s32 e1000_init_hw_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 reg_data;
s32 ret_val;
u16 i, rar_count = mac->rar_entry_count;
e1000_initialize_hw_bits_82571(hw);
/* Initialize identification LED */
ret_val = e1000e_id_led_init(hw);
if (ret_val)
e_dbg("Error initializing identification LED\n");
/* This is not fatal and we should not stop init due to this */
/* Disabling VLAN filtering */
e_dbg("Initializing the IEEE VLAN\n");
mac->ops.clear_vfta(hw);
/* Setup the receive address. */
/*
* If, however, a locally administered address was assigned to the
* 82571, we must reserve a RAR for it to work around an issue where
* resetting one port will reload the MAC on the other port.
*/
if (e1000e_get_laa_state_82571(hw))
rar_count--;
e1000e_init_rx_addrs(hw, rar_count);
/* Zero out the Multicast HASH table */
e_dbg("Zeroing the MTA\n");
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
/* Setup link and flow control */
ret_val = e1000_setup_link_82571(hw);
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
switch (mac->type) {
case e1000_82573:
e1000e_enable_tx_pkt_filtering(hw);
/* fall through */
case e1000_82574:
case e1000_82583:
reg_data = er32(GCR);
reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
ew32(GCR, reg_data);
break;
default:
reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL(1), reg_data);
break;
}
/*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
*/
e1000_clear_hw_cntrs_82571(hw);
return ret_val;
}
/**
* e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
* @hw: pointer to the HW structure
*
* Initializes required hardware-dependent bits needed for normal operation.
**/
static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
{
u32 reg;
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
reg |= (1 << 22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
reg |= (1 << 22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
break;
default:
break;
}
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
reg &= ~((1 << 29) | (1 << 30));
reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
if (er32(TCTL) & E1000_TCTL_MULR)
reg &= ~(1 << 28);
else
reg |= (1 << 28);
ew32(TARC(1), reg);
break;
default:
break;
}
/* Device Control */
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
reg = er32(CTRL);
reg &= ~(1 << 29);
ew32(CTRL, reg);
break;
default:
break;
}
/* Extended Device Control */
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
reg = er32(CTRL_EXT);
reg &= ~(1 << 23);
reg |= (1 << 22);
ew32(CTRL_EXT, reg);
break;
default:
break;
}
if (hw->mac.type == e1000_82571) {
reg = er32(PBA_ECC);
reg |= E1000_PBA_ECC_CORR_EN;
ew32(PBA_ECC, reg);
}
/*
* Workaround for hardware errata.
* Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
*/
if ((hw->mac.type == e1000_82571) ||
(hw->mac.type == e1000_82572)) {
reg = er32(CTRL_EXT);
reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
ew32(CTRL_EXT, reg);
}
/* PCI-Ex Control Registers */
switch (hw->mac.type) {
case e1000_82574:
case e1000_82583:
reg = er32(GCR);
reg |= (1 << 22);
ew32(GCR, reg);
/*
* Workaround for hardware errata.
* apply workaround for hardware errata documented in errata
* docs Fixes issue where some error prone or unreliable PCIe
* completions are occurring, particularly with ASPM enabled.
* Without fix, issue can cause Tx timeouts.
*/
reg = er32(GCR2);
reg |= 1;
ew32(GCR2, reg);
break;
default:
break;
}
}
/**
* e1000_clear_vfta_82571 - Clear VLAN filter table
* @hw: pointer to the HW structure
*
* Clears the register array which contains the VLAN filter table by
* setting all the values to 0.
**/
static void e1000_clear_vfta_82571(struct e1000_hw *hw)
{
u32 offset;
u32 vfta_value = 0;
u32 vfta_offset = 0;
u32 vfta_bit_in_reg = 0;
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (hw->mng_cookie.vlan_id != 0) {
/*
* The VFTA is a 4096b bit-field, each identifying
* a single VLAN ID. The following operations
* determine which 32b entry (i.e. offset) into the
* array we want to set the VLAN ID (i.e. bit) of
* the manageability unit.
*/
vfta_offset = (hw->mng_cookie.vlan_id >>
E1000_VFTA_ENTRY_SHIFT) &
E1000_VFTA_ENTRY_MASK;
vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
break;
}
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/*
* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of
* the manageability unit.
*/
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
e1e_flush();
}
}
/**
* e1000_check_mng_mode_82574 - Check manageability is enabled
* @hw: pointer to the HW structure
*
* Reads the NVM Initialization Control Word 2 and returns true
* (>0) if any manageability is enabled, else false (0).
**/
static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
{
u16 data;
e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
}
/**
* e1000_led_on_82574 - Turn LED on
* @hw: pointer to the HW structure
*
* Turn LED on.
**/
static s32 e1000_led_on_82574(struct e1000_hw *hw)
{
u32 ctrl;
u32 i;
ctrl = hw->mac.ledctl_mode2;
if (!(E1000_STATUS_LU & er32(STATUS))) {
/*
* If no link, then turn LED on by setting the invert bit
* for each LED that's "on" (0x0E) in ledctl_mode2.
*/
for (i = 0; i < 4; i++)
if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
E1000_LEDCTL_MODE_LED_ON)
ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
}
ew32(LEDCTL, ctrl);
return 0;
}
/**
* e1000_check_phy_82574 - check 82574 phy hung state
* @hw: pointer to the HW structure
*
* Returns whether phy is hung or not
**/
bool e1000_check_phy_82574(struct e1000_hw *hw)
{
u16 status_1kbt = 0;
u16 receive_errors = 0;
bool phy_hung = false;
s32 ret_val = 0;
/*
* Read PHY Receive Error counter first, if its is max - all F's then
* read the Base1000T status register If both are max then PHY is hung.
*/
ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
if (ret_val)
goto out;
if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
if (ret_val)
goto out;
if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
E1000_IDLE_ERROR_COUNT_MASK)
phy_hung = true;
}
out:
return phy_hung;
}
/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
* Determines which flow control settings to use, then configures flow
* control. Calls the appropriate media-specific link configuration
* function. Assuming the adapter has a valid link partner, a valid link
* should be established. Assumes the hardware has previously been reset
* and the transmitter and receiver are not enabled.
**/
static s32 e1000_setup_link_82571(struct e1000_hw *hw)
{
/*
* 82573 does not have a word in the NVM to determine
* the default flow control setting, so we explicitly
* set it to full.
*/
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (hw->fc.requested_mode == e1000_fc_default)
hw->fc.requested_mode = e1000_fc_full;
break;
default:
break;
}
return e1000e_setup_link(hw);
}
/**
* e1000_setup_copper_link_82571 - Configure copper link settings
* @hw: pointer to the HW structure
*
* Configures the link for auto-neg or forced speed and duplex. Then we check
* for link, once link is established calls to configure collision distance
* and flow control are called.
**/
static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
switch (hw->phy.type) {
case e1000_phy_m88:
case e1000_phy_bm:
ret_val = e1000e_copper_link_setup_m88(hw);
break;
case e1000_phy_igp_2:
ret_val = e1000e_copper_link_setup_igp(hw);
break;
default:
return -E1000_ERR_PHY;
break;
}
if (ret_val)
return ret_val;
ret_val = e1000e_setup_copper_link(hw);
return ret_val;
}
/**
* e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
* @hw: pointer to the HW structure
*
* Configures collision distance and flow control for fiber and serdes links.
* Upon successful setup, poll for link.
**/
static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
{
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
/*
* If SerDes loopback mode is entered, there is no form
* of reset to take the adapter out of that mode. So we
* have to explicitly take the adapter out of loopback
* mode. This prevents drivers from twiddling their thumbs
* if another tool failed to take it out of loopback mode.
*/
ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
break;
default:
break;
}
return e1000e_setup_fiber_serdes_link(hw);
}
/**
* e1000_check_for_serdes_link_82571 - Check for link (Serdes)
* @hw: pointer to the HW structure
*
* Reports the link state as up or down.
*
* If autonegotiation is supported by the link partner, the link state is
* determined by the result of autonegotiation. This is the most likely case.
* If autonegotiation is not supported by the link partner, and the link
* has a valid signal, force the link up.
*
* The link state is represented internally here by 4 states:
*
* 1) down
* 2) autoneg_progress
* 3) autoneg_complete (the link successfully autonegotiated)
* 4) forced_up (the link has been forced up, it did not autonegotiate)
*
**/
static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 rxcw;
u32 ctrl;
u32 status;
u32 txcw;
u32 i;
s32 ret_val = 0;
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
/* Receiver is synchronized with no invalid bits. */
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
if (!(status & E1000_STATUS_LU)) {
/*
* We have lost link, retry autoneg before
* reporting link failure
*/
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("AN_UP -> AN_PROG\n");
} else {
mac->serdes_has_link = true;
}
break;
case e1000_serdes_link_forced_up:
/*
* If we are receiving /C/ ordered sets, re-enable
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
* If the partner code word is null, stop forcing
* and restart auto negotiation.
*/
if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("FORCED_UP -> AN_PROG\n");
} else {
mac->serdes_has_link = true;
}
break;
case e1000_serdes_link_autoneg_progress:
if (rxcw & E1000_RXCW_C) {
/*
* We received /C/ ordered sets, meaning the
* link partner has autonegotiated, and we can
* trust the Link Up (LU) status bit.
*/
if (status & E1000_STATUS_LU) {
mac->serdes_link_state =
e1000_serdes_link_autoneg_complete;
e_dbg("AN_PROG -> AN_UP\n");
mac->serdes_has_link = true;
} else {
/* Autoneg completed, but failed. */
mac->serdes_link_state =
e1000_serdes_link_down;
e_dbg("AN_PROG -> DOWN\n");
}
} else {
/*
* The link partner did not autoneg.
* Force link up and full duplex, and change
* state to forced.
*/
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
ew32(CTRL, ctrl);
/* Configure Flow Control after link up. */
ret_val = e1000e_config_fc_after_link_up(hw);
if (ret_val) {
e_dbg("Error config flow control\n");
break;
}
mac->serdes_link_state =
e1000_serdes_link_forced_up;
mac->serdes_has_link = true;
e_dbg("AN_PROG -> FORCED_UP\n");
}
break;
case e1000_serdes_link_down:
default:
/*
* The link was down but the receiver has now gained
* valid sync, so lets see if we can bring the link
* up.
*/
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("DOWN -> AN_PROG\n");
break;
}
} else {
if (!(rxcw & E1000_RXCW_SYNCH)) {
mac->serdes_has_link = false;
mac->serdes_link_state = e1000_serdes_link_down;
e_dbg("ANYSTATE -> DOWN\n");
} else {
/*
* Check several times, if Sync and Config
* both are consistently 1 then simply ignore
* the Invalid bit and restart Autoneg
*/
for (i = 0; i < AN_RETRY_COUNT; i++) {
udelay(10);
rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_IV) &&
!((rxcw & E1000_RXCW_SYNCH) &&
(rxcw & E1000_RXCW_C))) {
mac->serdes_has_link = false;
mac->serdes_link_state =
e1000_serdes_link_down;
e_dbg("ANYSTATE -> DOWN\n");
break;
}
}
if (i == AN_RETRY_COUNT) {
txcw = er32(TXCW);
txcw |= E1000_TXCW_ANE;
ew32(TXCW, txcw);
mac->serdes_link_state =
e1000_serdes_link_autoneg_progress;
mac->serdes_has_link = false;
e_dbg("ANYSTATE -> AN_PROG\n");
}
}
}
return ret_val;
}
/**
* e1000_valid_led_default_82571 - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
*
* Read the EEPROM for the current default LED configuration. If the
* LED configuration is not valid, set to a valid LED configuration.
**/
static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
{
s32 ret_val;
ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
if (ret_val) {
e_dbg("NVM Read Error\n");
return ret_val;
}
switch (hw->mac.type) {
case e1000_82573:
case e1000_82574:
case e1000_82583:
if (*data == ID_LED_RESERVED_F746)
*data = ID_LED_DEFAULT_82573;
break;
default:
if (*data == ID_LED_RESERVED_0000 ||
*data == ID_LED_RESERVED_FFFF)
*data = ID_LED_DEFAULT;
break;
}
return 0;
}
/**
* e1000e_get_laa_state_82571 - Get locally administered address state
* @hw: pointer to the HW structure
*
* Retrieve and return the current locally administered address state.
**/
bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
{
if (hw->mac.type != e1000_82571)
return false;
return hw->dev_spec.e82571.laa_is_present;
}
/**
* e1000e_set_laa_state_82571 - Set locally administered address state
* @hw: pointer to the HW structure
* @state: enable/disable locally administered address
*
* Enable/Disable the current locally administered address state.
**/
void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
{
if (hw->mac.type != e1000_82571)
return;
hw->dev_spec.e82571.laa_is_present = state;
/* If workaround is activated... */
if (state)
/*
* Hold a copy of the LAA in RAR[14] This is done so that
* between the time RAR[0] gets clobbered and the time it
* gets fixed, the actual LAA is in one of the RARs and no
* incoming packets directed to this port are dropped.
* Eventually the LAA will be in RAR[0] and RAR[14].
*/
e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
}
/**
* e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
* @hw: pointer to the HW structure
*
* Verifies that the EEPROM has completed the update. After updating the
* EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If
* the checksum fix is not implemented, we need to set the bit and update
* the checksum. Otherwise, if bit 15 is set and the checksum is incorrect,
* we need to return bad checksum.
**/
static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
{
struct e1000_nvm_info *nvm = &hw->nvm;
s32 ret_val;
u16 data;
if (nvm->type != e1000_nvm_flash_hw)
return 0;
/*
* Check bit 4 of word 10h. If it is 0, firmware is done updating
* 10h-12h. Checksum may need to be fixed.
*/
ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
if (ret_val)
return ret_val;
if (!(data & 0x10)) {
/*
* Read 0x23 and check bit 15. This bit is a 1
* when the checksum has already been fixed. If
* the checksum is still wrong and this bit is a
* 1, we need to return bad checksum. Otherwise,
* we need to set this bit to a 1 and update the
* checksum.
*/
ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
if (ret_val)
return ret_val;
if (!(data & 0x8000)) {
data |= 0x8000;
ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
if (ret_val)
return ret_val;
ret_val = e1000e_update_nvm_checksum(hw);
}
}
return 0;
}
/**
* e1000_read_mac_addr_82571 - Read device MAC address
* @hw: pointer to the HW structure
**/
static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
{
s32 ret_val = 0;
if (hw->mac.type == e1000_82571) {
/*
* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
ret_val = e1000_check_alt_mac_addr_generic(hw);
if (ret_val)
goto out;
}
ret_val = e1000_read_mac_addr_generic(hw);
out:
return ret_val;
}
/**
* e1000_power_down_phy_copper_82571 - Remove link during PHY power down
* @hw: pointer to the HW structure
*
* In the case of a PHY power down to save power, or to turn off link during a
* driver unload, or wake on lan is not enabled, remove the link.
**/
static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
{
struct e1000_phy_info *phy = &hw->phy;
struct e1000_mac_info *mac = &hw->mac;
if (!(phy->ops.check_reset_block))
return;
/* If the management interface is not enabled, then power down */
if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
}
/**
* e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
* @hw: pointer to the HW structure
*
* Clears the hardware counters by reading the counter registers.
**/
static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
{
e1000e_clear_hw_cntrs_base(hw);
er32(PRC64);
er32(PRC127);
er32(PRC255);
er32(PRC511);
er32(PRC1023);
er32(PRC1522);
er32(PTC64);
er32(PTC127);
er32(PTC255);
er32(PTC511);
er32(PTC1023);
er32(PTC1522);
er32(ALGNERRC);
er32(RXERRC);
er32(TNCRS);
er32(CEXTERR);
er32(TSCTC);
er32(TSCTFC);
er32(MGTPRC);
er32(MGTPDC);
er32(MGTPTC);
er32(IAC);
er32(ICRXOC);
er32(ICRXPTC);
er32(ICRXATC);
er32(ICTXPTC);
er32(ICTXATC);
er32(ICTXQEC);
er32(ICTXQMTC);
er32(ICRXDMTC);
}
static struct e1000_mac_operations e82571_mac_ops = {
/* .check_mng_mode: mac type dependent */
/* .check_for_link: media type dependent */
.id_led_init = e1000e_id_led_init,
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
.get_bus_info = e1000e_get_bus_info_pcie,
.set_lan_id = e1000_set_lan_id_multi_port_pcie,
/* .get_link_up_info: media type dependent */
/* .led_on: mac type dependent */
.led_off = e1000e_led_off_generic,
.update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.write_vfta = e1000_write_vfta_generic,
.clear_vfta = e1000_clear_vfta_82571,
.reset_hw = e1000_reset_hw_82571,
.init_hw = e1000_init_hw_82571,
.setup_link = e1000_setup_link_82571,
/* .setup_physical_interface: media type dependent */
.setup_led = e1000e_setup_led_generic,
.read_mac_addr = e1000_read_mac_addr_82571,
};
static struct e1000_phy_operations e82_phy_ops_igp = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_igp,
.check_reset_block = e1000e_check_reset_block_generic,
.commit = NULL,
.force_speed_duplex = e1000e_phy_force_speed_duplex_igp,
.get_cfg_done = e1000_get_cfg_done_82571,
.get_cable_length = e1000e_get_cable_length_igp_2,
.get_info = e1000e_get_phy_info_igp,
.read_reg = e1000e_read_phy_reg_igp,
.release = e1000_put_hw_semaphore_82571,
.reset = e1000e_phy_hw_reset_generic,
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_igp,
.cfg_on_link_up = NULL,
};
static struct e1000_phy_operations e82_phy_ops_m88 = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
.get_cfg_done = e1000e_get_cfg_done,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_m88,
.release = e1000_put_hw_semaphore_82571,
.reset = e1000e_phy_hw_reset_generic,
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_m88,
.cfg_on_link_up = NULL,
};
static struct e1000_phy_operations e82_phy_ops_bm = {
.acquire = e1000_get_hw_semaphore_82571,
.check_polarity = e1000_check_polarity_m88,
.check_reset_block = e1000e_check_reset_block_generic,
.commit = e1000e_phy_sw_reset,
.force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
.get_cfg_done = e1000e_get_cfg_done,
.get_cable_length = e1000e_get_cable_length_m88,
.get_info = e1000e_get_phy_info_m88,
.read_reg = e1000e_read_phy_reg_bm2,
.release = e1000_put_hw_semaphore_82571,
.reset = e1000e_phy_hw_reset_generic,
.set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
.set_d3_lplu_state = e1000e_set_d3_lplu_state,
.write_reg = e1000e_write_phy_reg_bm2,
.cfg_on_link_up = NULL,
};
static struct e1000_nvm_operations e82571_nvm_ops = {
.acquire = e1000_acquire_nvm_82571,
.read = e1000e_read_nvm_eerd,
.release = e1000_release_nvm_82571,
.update = e1000_update_nvm_checksum_82571,
.valid_led_default = e1000_valid_led_default_82571,
.validate = e1000_validate_nvm_checksum_82571,
.write = e1000_write_nvm_82571,
};
struct e1000_info e1000_82571_info = {
.mac = e1000_82571,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_RESET_OVERWRITES_LAA /* errata */
| FLAG_TARC_SPEED_MODE_BIT /* errata */
| FLAG_APME_CHECK_PORT_B,
.flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
| FLAG2_DMA_BURST,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_igp,
.nvm_ops = &e82571_nvm_ops,
};
struct e1000_info e1000_82572_info = {
.mac = e1000_82572,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_CTRLEXT_ON_LOAD
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
.flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */
| FLAG2_DMA_BURST,
.pba = 38,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_igp,
.nvm_ops = &e82571_nvm_ops,
};
struct e1000_info e1000_82573_info = {
.mac = e1000_82573,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_SWSM_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L1
| FLAG2_DISABLE_ASPM_L0S,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_m88,
.nvm_ops = &e82571_nvm_ops,
};
struct e1000_info e1000_82574_info = {
.mac = e1000_82574,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_MSIX
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_CHECK_PHY_HANG
| FLAG2_DISABLE_ASPM_L0S,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
.nvm_ops = &e82571_nvm_ops,
};
struct e1000_info e1000_82583_info = {
.mac = e1000_82583,
.flags = FLAG_HAS_HW_VLAN_FILTER
| FLAG_HAS_WOL
| FLAG_APME_IN_CTRL3
| FLAG_RX_CSUM_ENABLED
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S,
.pba = 32,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_bm,
.nvm_ops = &e82571_nvm_ops,
};
| gpl-2.0 |
huz123/bricked.tenderloin | net/ipv4/netfilter/nf_nat_proto_udp.c | 1608 | 2402 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_nat_protocol.h>
static u_int16_t udp_port_rover;
static bool
udp_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype,
const struct nf_conn *ct)
{
return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
&udp_port_rover);
}
static bool
udp_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
struct udphdr *hdr;
unsigned int hdroff = iphdroff + iph->ihl*4;
__be32 oldip, newip;
__be16 *portptr, newport;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false;
iph = (struct iphdr *)(skb->data + iphdroff);
hdr = (struct udphdr *)(skb->data + hdroff);
if (maniptype == IP_NAT_MANIP_SRC) {
/* Get rid of src ip and src pt */
oldip = iph->saddr;
newip = tuple->src.u3.ip;
newport = tuple->src.u.udp.port;
portptr = &hdr->source;
} else {
/* Get rid of dst ip and dst pt */
oldip = iph->daddr;
newip = tuple->dst.u3.ip;
newport = tuple->dst.u.udp.port;
portptr = &hdr->dest;
}
if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
0);
if (!hdr->check)
hdr->check = CSUM_MANGLED_0;
}
*portptr = newport;
return true;
}
const struct nf_nat_protocol nf_nat_protocol_udp = {
.protonum = IPPROTO_UDP,
.me = THIS_MODULE,
.manip_pkt = udp_manip_pkt,
.in_range = nf_nat_proto_in_range,
.unique_tuple = udp_unique_tuple,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.range_to_nlattr = nf_nat_proto_range_to_nlattr,
.nlattr_to_range = nf_nat_proto_nlattr_to_range,
#endif
};
| gpl-2.0 |
auras76/MM_6.0_32.1.A.1.185 | drivers/net/wireless/mwifiex/cmdevt.c | 1864 | 48660 | /*
* Marvell Wireless LAN device driver: commands and events
*
* Copyright (C) 2011, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*/
#include "decl.h"
#include "ioctl.h"
#include "util.h"
#include "fw.h"
#include "main.h"
#include "wmm.h"
#include "11n.h"
#include "11ac.h"
/*
* This function initializes a command node.
*
* The actual allocation of the node is not done by this function. It only
* initiates a node by filling it with default parameters. Similarly,
* allocation of the different buffers used (IOCTL buffer, data buffer) are
* not done by this function either.
*/
static void
mwifiex_init_cmd_node(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node,
u32 cmd_oid, void *data_buf)
{
cmd_node->priv = priv;
cmd_node->cmd_oid = cmd_oid;
if (priv->adapter->cmd_wait_q_required) {
cmd_node->wait_q_enabled = priv->adapter->cmd_wait_q_required;
priv->adapter->cmd_wait_q_required = false;
cmd_node->cmd_wait_q_woken = false;
cmd_node->condition = &cmd_node->cmd_wait_q_woken;
}
cmd_node->data_buf = data_buf;
cmd_node->cmd_skb = cmd_node->skb;
}
/*
* This function returns a command node from the free queue depending upon
* availability.
*/
static struct cmd_ctrl_node *
mwifiex_get_cmd_node(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node;
unsigned long flags;
spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
if (list_empty(&adapter->cmd_free_q)) {
dev_err(adapter->dev, "GET_CMD_NODE: cmd node not available\n");
spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
return NULL;
}
cmd_node = list_first_entry(&adapter->cmd_free_q,
struct cmd_ctrl_node, list);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
return cmd_node;
}
/*
* This function cleans up a command node.
*
* The function resets the fields including the buffer pointers.
* This function does not try to free the buffers. They must be
* freed before calling this function.
*
* This function will however call the receive completion callback
* in case a response buffer is still available before resetting
* the pointer.
*/
static void
mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
cmd_node->cmd_oid = 0;
cmd_node->cmd_flag = 0;
cmd_node->data_buf = NULL;
cmd_node->wait_q_enabled = false;
if (cmd_node->cmd_skb)
skb_trim(cmd_node->cmd_skb, 0);
if (cmd_node->resp_skb) {
adapter->if_ops.cmdrsp_complete(adapter, cmd_node->resp_skb);
cmd_node->resp_skb = NULL;
}
}
/*
* This function sends a host command to the firmware.
*
* The function copies the host command into the driver command
* buffer, which will be transferred to the firmware later by the
* main thread.
*/
static int mwifiex_cmd_host_cmd(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
struct mwifiex_ds_misc_cmd *pcmd_ptr)
{
/* Copy the HOST command to command buffer */
memcpy(cmd, pcmd_ptr->cmd, pcmd_ptr->len);
dev_dbg(priv->adapter->dev, "cmd: host cmd size = %d\n", pcmd_ptr->len);
return 0;
}
/*
* This function downloads a command to the firmware.
*
* The function performs sanity tests, sets the command sequence
* number and size, converts the header fields to CPU format before
* sending. Afterwards, it logs the command ID and action for debugging
* and sets up the command timeout timer.
*/
static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
struct cmd_ctrl_node *cmd_node)
{
struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct host_cmd_ds_command *host_cmd;
uint16_t cmd_code;
uint16_t cmd_size;
struct timeval tstamp;
unsigned long flags;
__le32 tmp;
if (!adapter || !cmd_node)
return -1;
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
/* Sanity test */
if (host_cmd == NULL || host_cmd->size == 0) {
dev_err(adapter->dev, "DNLD_CMD: host_cmd is null"
" or cmd size is 0, not sending\n");
if (cmd_node->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, cmd_node);
return -1;
}
cmd_code = le16_to_cpu(host_cmd->command);
cmd_size = le16_to_cpu(host_cmd->size);
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET &&
cmd_code != HostCmd_CMD_FUNC_SHUTDOWN &&
cmd_code != HostCmd_CMD_FUNC_INIT) {
dev_err(adapter->dev,
"DNLD_CMD: FW in reset state, ignore cmd %#x\n",
cmd_code);
mwifiex_complete_cmd(adapter, cmd_node);
mwifiex_recycle_cmd_node(adapter, cmd_node);
return -1;
}
/* Set command sequence number */
adapter->seq_num++;
host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
(adapter->seq_num,
cmd_node->priv->bss_num,
cmd_node->priv->bss_type));
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = cmd_node;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
/* Adjust skb length */
if (cmd_node->cmd_skb->len > cmd_size)
/*
* cmd_size is less than sizeof(struct host_cmd_ds_command).
* Trim off the unused portion.
*/
skb_trim(cmd_node->cmd_skb, cmd_size);
else if (cmd_node->cmd_skb->len < cmd_size)
/*
* cmd_size is larger than sizeof(struct host_cmd_ds_command)
* because we have appended custom IE TLV. Increase skb length
* accordingly.
*/
skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len);
do_gettimeofday(&tstamp);
dev_dbg(adapter->dev, "cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d,"
" seqno %#x\n",
tstamp.tv_sec, tstamp.tv_usec, cmd_code,
le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
le16_to_cpu(host_cmd->seq_num));
if (adapter->iface_type == MWIFIEX_USB) {
tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN);
adapter->cmd_sent = true;
ret = adapter->if_ops.host_to_card(adapter,
MWIFIEX_USB_EP_CMD_EVENT,
cmd_node->cmd_skb, NULL);
skb_pull(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
if (ret == -EBUSY)
cmd_node->cmd_skb = NULL;
} else {
skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
cmd_node->cmd_skb, NULL);
skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN);
}
if (ret == -1) {
dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
if (adapter->iface_type == MWIFIEX_USB)
adapter->cmd_sent = false;
if (cmd_node->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
adapter->dbg.num_cmd_host_to_card_failure++;
return -1;
}
/* Save the last command id and action to debug log */
adapter->dbg.last_cmd_index =
(adapter->dbg.last_cmd_index + 1) % DBG_CMD_NUM;
adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index] = cmd_code;
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN));
/* Clear BSS_NO_BITS from HostCmd */
cmd_code &= HostCmd_CMD_ID_MASK;
/* Setup the timer after transmit command */
mod_timer(&adapter->cmd_timer,
jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
return 0;
}
/*
* This function downloads a sleep confirm command to the firmware.
*
* The function performs sanity tests, sets the command sequence
* number and size, converts the header fields to CPU format before
* sending.
*
* No responses are needed for sleep confirm command.
*/
static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
{
int ret;
struct mwifiex_private *priv;
struct mwifiex_opt_sleep_confirm *sleep_cfm_buf =
(struct mwifiex_opt_sleep_confirm *)
adapter->sleep_cfm->data;
struct sk_buff *sleep_cfm_tmp;
__le32 tmp;
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
sleep_cfm_buf->seq_num =
cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
(adapter->seq_num, priv->bss_num,
priv->bss_type)));
adapter->seq_num++;
if (adapter->iface_type == MWIFIEX_USB) {
sleep_cfm_tmp =
dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
+ MWIFIEX_TYPE_LEN);
skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm)
+ MWIFIEX_TYPE_LEN);
tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN);
memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN,
adapter->sleep_cfm->data,
sizeof(struct mwifiex_opt_sleep_confirm));
ret = adapter->if_ops.host_to_card(adapter,
MWIFIEX_USB_EP_CMD_EVENT,
sleep_cfm_tmp, NULL);
if (ret != -EBUSY)
dev_kfree_skb_any(sleep_cfm_tmp);
} else {
skb_push(adapter->sleep_cfm, INTF_HEADER_LEN);
ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
adapter->sleep_cfm, NULL);
skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN);
}
if (ret == -1) {
dev_err(adapter->dev, "SLEEP_CFM: failed\n");
adapter->dbg.num_cmd_sleep_cfm_host_to_card_failure++;
return -1;
}
if (GET_BSS_ROLE(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY))
== MWIFIEX_BSS_ROLE_STA) {
if (!sleep_cfm_buf->resp_ctrl)
/* Response is not needed for sleep
confirm command */
adapter->ps_state = PS_STATE_SLEEP;
else
adapter->ps_state = PS_STATE_SLEEP_CFM;
if (!sleep_cfm_buf->resp_ctrl &&
(adapter->is_hs_configured &&
!adapter->sleep_period.period)) {
adapter->pm_wakeup_card_req = true;
mwifiex_hs_activated_event(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_STA), true);
}
}
return ret;
}
/*
* This function allocates the command buffers and links them to
* the command free queue.
*
* The driver uses a pre allocated number of command buffers, which
* are created at driver initializations and freed at driver cleanup.
* Every command needs to obtain a command buffer from this pool before
* it can be issued. The command free queue lists the command buffers
* currently free to use, while the command pending queue lists the
* command buffers already in use and awaiting handling. Command buffers
* are returned to the free queue after use.
*/
int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_array;
u32 i;
/* Allocate and initialize struct cmd_ctrl_node */
cmd_array = kcalloc(MWIFIEX_NUM_OF_CMD_BUFFER,
sizeof(struct cmd_ctrl_node), GFP_KERNEL);
if (!cmd_array)
return -ENOMEM;
adapter->cmd_pool = cmd_array;
/* Allocate and initialize command buffers */
for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
cmd_array[i].skb = dev_alloc_skb(MWIFIEX_SIZE_OF_CMD_BUFFER);
if (!cmd_array[i].skb) {
dev_err(adapter->dev, "ALLOC_CMD_BUF: out of memory\n");
return -1;
}
}
for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++)
mwifiex_insert_cmd_to_free_q(adapter, &cmd_array[i]);
return 0;
}
/*
* This function frees the command buffers.
*
* The function calls the completion callback for all the command
* buffers that still have response buffers associated with them.
*/
int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_array;
u32 i;
/* Need to check if cmd pool is allocated or not */
if (!adapter->cmd_pool) {
dev_dbg(adapter->dev, "info: FREE_CMD_BUF: cmd_pool is null\n");
return 0;
}
cmd_array = adapter->cmd_pool;
/* Release shared memory buffers */
for (i = 0; i < MWIFIEX_NUM_OF_CMD_BUFFER; i++) {
if (cmd_array[i].skb) {
dev_dbg(adapter->dev, "cmd: free cmd buffer %d\n", i);
dev_kfree_skb_any(cmd_array[i].skb);
}
if (!cmd_array[i].resp_skb)
continue;
if (adapter->iface_type == MWIFIEX_USB)
adapter->if_ops.cmdrsp_complete(adapter,
cmd_array[i].resp_skb);
else
dev_kfree_skb_any(cmd_array[i].resp_skb);
}
/* Release struct cmd_ctrl_node */
if (adapter->cmd_pool) {
dev_dbg(adapter->dev, "cmd: free cmd pool\n");
kfree(adapter->cmd_pool);
adapter->cmd_pool = NULL;
}
return 0;
}
/*
* This function handles events generated by firmware.
*
* Event body of events received from firmware are not used (though they are
* saved), only the event ID is used. Some events are re-invoked by
* the driver, with a new event body.
*
* After processing, the function calls the completion callback
* for cleanup.
*/
int mwifiex_process_event(struct mwifiex_adapter *adapter)
{
int ret;
struct mwifiex_private *priv =
mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
struct sk_buff *skb = adapter->event_skb;
u32 eventcause = adapter->event_cause;
struct timeval tstamp;
struct mwifiex_rxinfo *rx_info;
/* Save the last event to debug log */
adapter->dbg.last_event_index =
(adapter->dbg.last_event_index + 1) % DBG_CMD_NUM;
adapter->dbg.last_event[adapter->dbg.last_event_index] =
(u16) eventcause;
/* Get BSS number and corresponding priv */
priv = mwifiex_get_priv_by_id(adapter, EVENT_GET_BSS_NUM(eventcause),
EVENT_GET_BSS_TYPE(eventcause));
if (!priv)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
/* Clear BSS_NO_BITS from event */
eventcause &= EVENT_ID_MASK;
adapter->event_cause = eventcause;
if (skb) {
rx_info = MWIFIEX_SKB_RXCB(skb);
rx_info->bss_num = priv->bss_num;
rx_info->bss_type = priv->bss_type;
}
if (eventcause != EVENT_PS_SLEEP && eventcause != EVENT_PS_AWAKE) {
do_gettimeofday(&tstamp);
dev_dbg(adapter->dev, "event: %lu.%lu: cause: %#x\n",
tstamp.tv_sec, tstamp.tv_usec, eventcause);
} else {
/* Handle PS_SLEEP/AWAKE events on STA */
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
if (!priv)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
}
if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
ret = mwifiex_process_uap_event(priv);
else
ret = mwifiex_process_sta_event(priv);
adapter->event_cause = 0;
adapter->event_skb = NULL;
adapter->if_ops.event_complete(adapter, skb);
return ret;
}
/*
* This function is used to send synchronous command to the firmware.
*
* it allocates a wait queue for the command and wait for the command
* response.
*/
int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no,
u16 cmd_action, u32 cmd_oid, void *data_buf)
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
adapter->cmd_wait_q_required = true;
ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid,
data_buf);
return ret;
}
/*
* This function prepares a command and asynchronously send it to the firmware.
*
* Preparation includes -
* - Sanity tests to make sure the card is still present or the FW
* is not reset
* - Getting a new command node from the command free queue
* - Initializing the command node for default parameters
* - Fill up the non-default parameters and buffer pointers
* - Add the command to pending queue
*/
int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
u16 cmd_action, u32 cmd_oid, void *data_buf)
{
int ret;
struct mwifiex_adapter *adapter = priv->adapter;
struct cmd_ctrl_node *cmd_node;
struct host_cmd_ds_command *cmd_ptr;
if (!adapter) {
pr_err("PREP_CMD: adapter is NULL\n");
return -1;
}
if (adapter->is_suspended) {
dev_err(adapter->dev, "PREP_CMD: device in suspended state\n");
return -1;
}
if (adapter->surprise_removed) {
dev_err(adapter->dev, "PREP_CMD: card is removed\n");
return -1;
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET) {
if (cmd_no != HostCmd_CMD_FUNC_INIT) {
dev_err(adapter->dev, "PREP_CMD: FW in reset state\n");
return -1;
}
}
/* Get a new command node */
cmd_node = mwifiex_get_cmd_node(adapter);
if (!cmd_node) {
dev_err(adapter->dev, "PREP_CMD: no free cmd node\n");
return -1;
}
/* Initialize the command node */
mwifiex_init_cmd_node(priv, cmd_node, cmd_oid, data_buf);
if (!cmd_node->cmd_skb) {
dev_err(adapter->dev, "PREP_CMD: no free cmd buf\n");
return -1;
}
memset(skb_put(cmd_node->cmd_skb, sizeof(struct host_cmd_ds_command)),
0, sizeof(struct host_cmd_ds_command));
cmd_ptr = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
cmd_ptr->command = cpu_to_le16(cmd_no);
cmd_ptr->result = 0;
/* Prepare command */
if (cmd_no) {
switch (cmd_no) {
case HostCmd_CMD_UAP_SYS_CONFIG:
case HostCmd_CMD_UAP_BSS_START:
case HostCmd_CMD_UAP_BSS_STOP:
ret = mwifiex_uap_prepare_cmd(priv, cmd_no, cmd_action,
cmd_oid, data_buf,
cmd_ptr);
break;
default:
ret = mwifiex_sta_prepare_cmd(priv, cmd_no, cmd_action,
cmd_oid, data_buf,
cmd_ptr);
break;
}
} else {
ret = mwifiex_cmd_host_cmd(priv, cmd_ptr, data_buf);
cmd_node->cmd_flag |= CMD_F_HOSTCMD;
}
/* Return error, since the command preparation failed */
if (ret) {
dev_err(adapter->dev, "PREP_CMD: cmd %#x preparation failed\n",
cmd_no);
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
return -1;
}
/* Send command */
if (cmd_no == HostCmd_CMD_802_11_SCAN) {
mwifiex_queue_scan_cmd(priv, cmd_node);
} else {
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
queue_work(adapter->workqueue, &adapter->main_work);
if (cmd_node->wait_q_enabled)
ret = mwifiex_wait_queue_complete(adapter, cmd_node);
}
return ret;
}
/*
* This function returns a command to the command free queue.
*
* The function also calls the completion callback if required, before
* cleaning the command node and re-inserting it into the free queue.
*/
void
mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
unsigned long flags;
if (!cmd_node)
return;
if (cmd_node->wait_q_enabled)
mwifiex_complete_cmd(adapter, cmd_node);
/* Clean the node */
mwifiex_clean_cmd_node(adapter, cmd_node);
/* Insert node into cmd_free_q */
spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
}
/* This function reuses a command node. */
void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node)
{
struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
atomic_dec(&adapter->cmd_pending);
dev_dbg(adapter->dev, "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
le16_to_cpu(host_cmd->command),
atomic_read(&adapter->cmd_pending));
}
/*
* This function queues a command to the command pending queue.
*
* This in effect adds the command to the command list to be executed.
* Exit PS command is handled specially, by placing it always to the
* front of the command queue.
*/
void
mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node, u32 add_tail)
{
struct host_cmd_ds_command *host_cmd = NULL;
u16 command;
unsigned long flags;
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
if (!host_cmd) {
dev_err(adapter->dev, "QUEUE_CMD: host_cmd is NULL\n");
return;
}
command = le16_to_cpu(host_cmd->command);
/* Exit_PS command needs to be queued in the header always. */
if (command == HostCmd_CMD_802_11_PS_MODE_ENH) {
struct host_cmd_ds_802_11_ps_mode_enh *pm =
&host_cmd->params.psmode_enh;
if ((le16_to_cpu(pm->action) == DIS_PS) ||
(le16_to_cpu(pm->action) == DIS_AUTO_PS)) {
if (adapter->ps_state != PS_STATE_AWAKE)
add_tail = false;
}
}
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
if (add_tail)
list_add_tail(&cmd_node->list, &adapter->cmd_pending_q);
else
list_add(&cmd_node->list, &adapter->cmd_pending_q);
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
atomic_inc(&adapter->cmd_pending);
dev_dbg(adapter->dev, "cmd: QUEUE_CMD: cmd=%#x, cmd_pending=%d\n",
command, atomic_read(&adapter->cmd_pending));
}
/*
* This function executes the next command in command pending queue.
*
* This function will fail if a command is already in processing stage,
* otherwise it will dequeue the first command from the command pending
* queue and send to the firmware.
*
* If the device is currently in host sleep mode, any commands, except the
* host sleep configuration command will de-activate the host sleep. For PS
* mode, the function will put the firmware back to sleep if applicable.
*/
int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter)
{
struct mwifiex_private *priv;
struct cmd_ctrl_node *cmd_node;
int ret = 0;
struct host_cmd_ds_command *host_cmd;
unsigned long cmd_flags;
unsigned long cmd_pending_q_flags;
/* Check if already in processing */
if (adapter->curr_cmd) {
dev_err(adapter->dev, "EXEC_NEXT_CMD: cmd in processing\n");
return -1;
}
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
/* Check if any command is pending */
spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
if (list_empty(&adapter->cmd_pending_q)) {
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
cmd_pending_q_flags);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
return 0;
}
cmd_node = list_first_entry(&adapter->cmd_pending_q,
struct cmd_ctrl_node, list);
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
cmd_pending_q_flags);
host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data);
priv = cmd_node->priv;
if (adapter->ps_state != PS_STATE_AWAKE) {
dev_err(adapter->dev, "%s: cannot send cmd in sleep state,"
" this should not happen\n", __func__);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
return ret;
}
spin_lock_irqsave(&adapter->cmd_pending_q_lock, cmd_pending_q_flags);
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
cmd_pending_q_flags);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
ret = mwifiex_dnld_cmd_to_fw(priv, cmd_node);
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
/* Any command sent to the firmware when host is in sleep
* mode should de-configure host sleep. We should skip the
* host sleep configuration command itself though
*/
if (priv && (host_cmd->command !=
cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH))) {
if (adapter->hs_activated) {
adapter->is_hs_configured = false;
mwifiex_hs_activated_event(priv, false);
}
}
return ret;
}
/*
* This function handles the command response.
*
* After processing, the function cleans the command node and puts
* it back to the command free queue.
*/
int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter)
{
struct host_cmd_ds_command *resp;
struct mwifiex_private *priv =
mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
int ret = 0;
uint16_t orig_cmdresp_no;
uint16_t cmdresp_no;
uint16_t cmdresp_result;
struct timeval tstamp;
unsigned long flags;
/* Now we got response from FW, cancel the command timer */
del_timer(&adapter->cmd_timer);
if (!adapter->curr_cmd || !adapter->curr_cmd->resp_skb) {
resp = (struct host_cmd_ds_command *) adapter->upld_buf;
dev_err(adapter->dev, "CMD_RESP: NULL curr_cmd, %#x\n",
le16_to_cpu(resp->command));
return -1;
}
adapter->num_cmd_timeout = 0;
resp = (struct host_cmd_ds_command *) adapter->curr_cmd->resp_skb->data;
if (adapter->curr_cmd->cmd_flag & CMD_F_CANCELED) {
dev_err(adapter->dev, "CMD_RESP: %#x been canceled\n",
le16_to_cpu(resp->command));
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
return -1;
}
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
/* Copy original response back to response buffer */
struct mwifiex_ds_misc_cmd *hostcmd;
uint16_t size = le16_to_cpu(resp->size);
dev_dbg(adapter->dev, "info: host cmd resp size = %d\n", size);
size = min_t(u16, size, MWIFIEX_SIZE_OF_CMD_BUFFER);
if (adapter->curr_cmd->data_buf) {
hostcmd = adapter->curr_cmd->data_buf;
hostcmd->len = size;
memcpy(hostcmd->cmd, resp, size);
}
}
orig_cmdresp_no = le16_to_cpu(resp->command);
/* Get BSS number and corresponding priv */
priv = mwifiex_get_priv_by_id(adapter,
HostCmd_GET_BSS_NO(le16_to_cpu(resp->seq_num)),
HostCmd_GET_BSS_TYPE(le16_to_cpu(resp->seq_num)));
if (!priv)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
/* Clear RET_BIT from HostCmd */
resp->command = cpu_to_le16(orig_cmdresp_no & HostCmd_CMD_ID_MASK);
cmdresp_no = le16_to_cpu(resp->command);
cmdresp_result = le16_to_cpu(resp->result);
/* Save the last command response to debug log */
adapter->dbg.last_cmd_resp_index =
(adapter->dbg.last_cmd_resp_index + 1) % DBG_CMD_NUM;
adapter->dbg.last_cmd_resp_id[adapter->dbg.last_cmd_resp_index] =
orig_cmdresp_no;
do_gettimeofday(&tstamp);
dev_dbg(adapter->dev, "cmd: CMD_RESP: (%lu.%lu): 0x%x, result %d,"
" len %d, seqno 0x%x\n",
tstamp.tv_sec, tstamp.tv_usec, orig_cmdresp_no, cmdresp_result,
le16_to_cpu(resp->size), le16_to_cpu(resp->seq_num));
if (!(orig_cmdresp_no & HostCmd_RET_BIT)) {
dev_err(adapter->dev, "CMD_RESP: invalid cmd resp\n");
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = -1;
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
return -1;
}
if (adapter->curr_cmd->cmd_flag & CMD_F_HOSTCMD) {
adapter->curr_cmd->cmd_flag &= ~CMD_F_HOSTCMD;
if ((cmdresp_result == HostCmd_RESULT_OK) &&
(cmdresp_no == HostCmd_CMD_802_11_HS_CFG_ENH))
ret = mwifiex_ret_802_11_hs_cfg(priv, resp);
} else {
/* handle response */
ret = mwifiex_process_sta_cmdresp(priv, cmdresp_no, resp);
}
/* Check init command response */
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) {
if (ret) {
dev_err(adapter->dev, "%s: cmd %#x failed during "
"initialization\n", __func__, cmdresp_no);
mwifiex_init_fw_complete(adapter);
return -1;
} else if (adapter->last_init_cmd == cmdresp_no)
adapter->hw_status = MWIFIEX_HW_STATUS_INIT_DONE;
}
if (adapter->curr_cmd) {
if (adapter->curr_cmd->wait_q_enabled)
adapter->cmd_wait_q.status = ret;
mwifiex_recycle_cmd_node(adapter, adapter->curr_cmd);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
}
return ret;
}
/*
* This function handles the timeout of command sending.
*
* It will re-send the same command again.
*/
void
mwifiex_cmd_timeout_func(unsigned long function_context)
{
struct mwifiex_adapter *adapter =
(struct mwifiex_adapter *) function_context;
struct cmd_ctrl_node *cmd_node;
struct timeval tstamp;
adapter->num_cmd_timeout++;
adapter->dbg.num_cmd_timeout++;
if (!adapter->curr_cmd) {
dev_dbg(adapter->dev, "cmd: empty curr_cmd\n");
return;
}
cmd_node = adapter->curr_cmd;
if (cmd_node) {
adapter->dbg.timeout_cmd_id =
adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
adapter->dbg.timeout_cmd_act =
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index];
do_gettimeofday(&tstamp);
dev_err(adapter->dev,
"%s: Timeout cmd id (%lu.%lu) = %#x, act = %#x\n",
__func__, tstamp.tv_sec, tstamp.tv_usec,
adapter->dbg.timeout_cmd_id,
adapter->dbg.timeout_cmd_act);
dev_err(adapter->dev, "num_data_h2c_failure = %d\n",
adapter->dbg.num_tx_host_to_card_failure);
dev_err(adapter->dev, "num_cmd_h2c_failure = %d\n",
adapter->dbg.num_cmd_host_to_card_failure);
dev_err(adapter->dev, "num_cmd_timeout = %d\n",
adapter->dbg.num_cmd_timeout);
dev_err(adapter->dev, "num_tx_timeout = %d\n",
adapter->dbg.num_tx_timeout);
dev_err(adapter->dev, "last_cmd_index = %d\n",
adapter->dbg.last_cmd_index);
dev_err(adapter->dev, "last_cmd_id: %*ph\n",
(int)sizeof(adapter->dbg.last_cmd_id),
adapter->dbg.last_cmd_id);
dev_err(adapter->dev, "last_cmd_act: %*ph\n",
(int)sizeof(adapter->dbg.last_cmd_act),
adapter->dbg.last_cmd_act);
dev_err(adapter->dev, "last_cmd_resp_index = %d\n",
adapter->dbg.last_cmd_resp_index);
dev_err(adapter->dev, "last_cmd_resp_id: %*ph\n",
(int)sizeof(adapter->dbg.last_cmd_resp_id),
adapter->dbg.last_cmd_resp_id);
dev_err(adapter->dev, "last_event_index = %d\n",
adapter->dbg.last_event_index);
dev_err(adapter->dev, "last_event: %*ph\n",
(int)sizeof(adapter->dbg.last_event),
adapter->dbg.last_event);
dev_err(adapter->dev, "data_sent=%d cmd_sent=%d\n",
adapter->data_sent, adapter->cmd_sent);
dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
adapter->ps_mode, adapter->ps_state);
if (cmd_node->wait_q_enabled) {
adapter->cmd_wait_q.status = -ETIMEDOUT;
wake_up_interruptible(&adapter->cmd_wait_q.wait);
mwifiex_cancel_pending_ioctl(adapter);
/* reset cmd_sent flag to unblock new commands */
adapter->cmd_sent = false;
}
}
if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
mwifiex_init_fw_complete(adapter);
if (adapter->if_ops.card_reset)
adapter->if_ops.card_reset(adapter);
}
/*
* This function cancels all the pending commands.
*
* The current command, all commands in command pending queue and all scan
* commands in scan pending queue are cancelled. All the completion callbacks
* are called with failure status to ensure cleanup.
*/
void
mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
unsigned long flags;
/* Cancel current cmd */
if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->curr_cmd->wait_q_enabled = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
/* Cancel all pending command */
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->cmd_pending_q, list) {
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
if (cmd_node->wait_q_enabled) {
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, cmd_node);
cmd_node->wait_q_enabled = false;
}
mwifiex_recycle_cmd_node(adapter, cmd_node);
spin_lock_irqsave(&adapter->cmd_pending_q_lock, flags);
}
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
/* Cancel all pending scan command */
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->scan_pending_q, list) {
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
cmd_node->wait_q_enabled = false;
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
}
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
}
/*
* This function cancels all pending commands that matches with
* the given IOCTL request.
*
* Both the current command buffer and the pending command queue are
* searched for matching IOCTL request. The completion callback of
* the matched command is called with failure status to ensure cleanup.
* In case of scan commands, all pending commands in scan pending queue
* are cancelled.
*/
void
mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
unsigned long cmd_flags;
unsigned long scan_pending_q_flags;
uint16_t cancel_scan_cmd = false;
if ((adapter->curr_cmd) &&
(adapter->curr_cmd->wait_q_enabled)) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
cmd_node = adapter->curr_cmd;
cmd_node->wait_q_enabled = false;
cmd_node->cmd_flag |= CMD_F_CANCELED;
mwifiex_recycle_cmd_node(adapter, cmd_node);
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
adapter->curr_cmd = NULL;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
/* Cancel all pending scan command */
spin_lock_irqsave(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
list_for_each_entry_safe(cmd_node, tmp_node,
&adapter->scan_pending_q, list) {
list_del(&cmd_node->list);
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
cmd_node->wait_q_enabled = false;
mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
spin_lock_irqsave(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
cancel_scan_cmd = true;
}
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
scan_pending_q_flags);
if (cancel_scan_cmd) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
}
adapter->cmd_wait_q.status = -1;
}
/*
* This function sends the sleep confirm command to firmware, if
* possible.
*
* The sleep confirm command cannot be issued if command response,
* data response or event response is awaiting handling, or if we
* are in the middle of sending a command, or expecting a command
* response.
*/
void
mwifiex_check_ps_cond(struct mwifiex_adapter *adapter)
{
if (!adapter->cmd_sent &&
!adapter->curr_cmd && !IS_CARD_RX_RCVD(adapter))
mwifiex_dnld_sleep_confirm_cmd(adapter);
else
dev_dbg(adapter->dev,
"cmd: Delay Sleep Confirm (%s%s%s)\n",
(adapter->cmd_sent) ? "D" : "",
(adapter->curr_cmd) ? "C" : "",
(IS_CARD_RX_RCVD(adapter)) ? "R" : "");
}
/*
* This function sends a Host Sleep activated event to applications.
*
* This event is generated by the driver, with a blank event body.
*/
void
mwifiex_hs_activated_event(struct mwifiex_private *priv, u8 activated)
{
if (activated) {
if (priv->adapter->is_hs_configured) {
priv->adapter->hs_activated = true;
mwifiex_update_rxreor_flags(priv->adapter,
RXREOR_FORCE_NO_DROP);
dev_dbg(priv->adapter->dev, "event: hs_activated\n");
priv->adapter->hs_activate_wait_q_woken = true;
wake_up_interruptible(
&priv->adapter->hs_activate_wait_q);
} else {
dev_dbg(priv->adapter->dev, "event: HS not configured\n");
}
} else {
dev_dbg(priv->adapter->dev, "event: hs_deactivated\n");
priv->adapter->hs_activated = false;
}
}
/*
* This function handles the command response of a Host Sleep configuration
* command.
*
* Handling includes changing the header fields into CPU format
* and setting the current host sleep activation status in driver.
*
* In case host sleep status change, the function generates an event to
* notify the applications.
*/
int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_hs_cfg_enh *phs_cfg =
&resp->params.opt_hs_cfg;
uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
adapter->iface_type != MWIFIEX_USB) {
mwifiex_hs_activated_event(priv, true);
return 0;
} else {
dev_dbg(adapter->dev, "cmd: CMD_RESP: HS_CFG cmd reply"
" result=%#x, conditions=0x%x gpio=0x%x gap=0x%x\n",
resp->result, conditions,
phs_cfg->params.hs_config.gpio,
phs_cfg->params.hs_config.gap);
}
if (conditions != HS_CFG_CANCEL) {
adapter->is_hs_configured = true;
if (adapter->iface_type == MWIFIEX_USB)
mwifiex_hs_activated_event(priv, true);
} else {
adapter->is_hs_configured = false;
if (adapter->hs_activated)
mwifiex_hs_activated_event(priv, false);
}
return 0;
}
/*
* This function wakes up the adapter and generates a Host Sleep
* cancel event on receiving the power up interrupt.
*/
void
mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
{
dev_dbg(adapter->dev, "info: %s: auto cancelling host sleep"
" since there is interrupt from the firmware\n", __func__);
adapter->if_ops.wakeup(adapter);
adapter->hs_activated = false;
adapter->is_hs_configured = false;
adapter->is_suspended = false;
mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
MWIFIEX_BSS_ROLE_ANY),
false);
}
EXPORT_SYMBOL_GPL(mwifiex_process_hs_config);
/*
* This function handles the command response of a sleep confirm command.
*
* The function sets the card state to SLEEP if the response indicates success.
*/
void
mwifiex_process_sleep_confirm_resp(struct mwifiex_adapter *adapter,
u8 *pbuf, u32 upld_len)
{
struct host_cmd_ds_command *cmd = (struct host_cmd_ds_command *) pbuf;
struct mwifiex_private *priv =
mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
uint16_t result = le16_to_cpu(cmd->result);
uint16_t command = le16_to_cpu(cmd->command);
uint16_t seq_num = le16_to_cpu(cmd->seq_num);
if (!upld_len) {
dev_err(adapter->dev, "%s: cmd size is 0\n", __func__);
return;
}
/* Get BSS number and corresponding priv */
priv = mwifiex_get_priv_by_id(adapter, HostCmd_GET_BSS_NO(seq_num),
HostCmd_GET_BSS_TYPE(seq_num));
if (!priv)
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
/* Update sequence number */
seq_num = HostCmd_GET_SEQ_NO(seq_num);
/* Clear RET_BIT from HostCmd */
command &= HostCmd_CMD_ID_MASK;
if (command != HostCmd_CMD_802_11_PS_MODE_ENH) {
dev_err(adapter->dev,
"%s: rcvd unexpected resp for cmd %#x, result = %x\n",
__func__, command, result);
return;
}
if (result) {
dev_err(adapter->dev, "%s: sleep confirm cmd failed\n",
__func__);
adapter->pm_wakeup_card_req = false;
adapter->ps_state = PS_STATE_AWAKE;
return;
}
adapter->pm_wakeup_card_req = true;
if (adapter->is_hs_configured)
mwifiex_hs_activated_event(mwifiex_get_priv
(adapter, MWIFIEX_BSS_ROLE_ANY),
true);
adapter->ps_state = PS_STATE_SLEEP;
cmd->command = cpu_to_le16(command);
cmd->seq_num = cpu_to_le16(seq_num);
}
EXPORT_SYMBOL_GPL(mwifiex_process_sleep_confirm_resp);
/*
* This function prepares an enhanced power mode command.
*
* This function can be used to disable power save or to configure
* power save with auto PS or STA PS or auto deep sleep.
*
* Preparation includes -
* - Setting command ID, action and proper size
* - Setting Power Save bitmap, PS parameters TLV, PS mode TLV,
* auto deep sleep TLV (as required)
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_enh_power_mode(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
u16 cmd_action, uint16_t ps_bitmap,
struct mwifiex_ds_auto_ds *auto_ds)
{
struct host_cmd_ds_802_11_ps_mode_enh *psmode_enh =
&cmd->params.psmode_enh;
u8 *tlv;
u16 cmd_size = 0;
cmd->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
if (cmd_action == DIS_AUTO_PS) {
psmode_enh->action = cpu_to_le16(DIS_AUTO_PS);
psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) +
sizeof(psmode_enh->params.ps_bitmap));
} else if (cmd_action == GET_PS) {
psmode_enh->action = cpu_to_le16(GET_PS);
psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
cmd->size = cpu_to_le16(S_DS_GEN + sizeof(psmode_enh->action) +
sizeof(psmode_enh->params.ps_bitmap));
} else if (cmd_action == EN_AUTO_PS) {
psmode_enh->action = cpu_to_le16(EN_AUTO_PS);
psmode_enh->params.ps_bitmap = cpu_to_le16(ps_bitmap);
cmd_size = S_DS_GEN + sizeof(psmode_enh->action) +
sizeof(psmode_enh->params.ps_bitmap);
tlv = (u8 *) cmd + cmd_size;
if (ps_bitmap & BITMAP_STA_PS) {
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_ps_param *ps_tlv =
(struct mwifiex_ie_types_ps_param *) tlv;
struct mwifiex_ps_param *ps_mode = &ps_tlv->param;
ps_tlv->header.type = cpu_to_le16(TLV_TYPE_PS_PARAM);
ps_tlv->header.len = cpu_to_le16(sizeof(*ps_tlv) -
sizeof(struct mwifiex_ie_types_header));
cmd_size += sizeof(*ps_tlv);
tlv += sizeof(*ps_tlv);
dev_dbg(adapter->dev, "cmd: PS Command: Enter PS\n");
ps_mode->null_pkt_interval =
cpu_to_le16(adapter->null_pkt_interval);
ps_mode->multiple_dtims =
cpu_to_le16(adapter->multiple_dtim);
ps_mode->bcn_miss_timeout =
cpu_to_le16(adapter->bcn_miss_time_out);
ps_mode->local_listen_interval =
cpu_to_le16(adapter->local_listen_interval);
ps_mode->adhoc_wake_period =
cpu_to_le16(adapter->adhoc_awake_period);
ps_mode->delay_to_ps =
cpu_to_le16(adapter->delay_to_ps);
ps_mode->mode = cpu_to_le16(adapter->enhanced_ps_mode);
}
if (ps_bitmap & BITMAP_AUTO_DS) {
struct mwifiex_ie_types_auto_ds_param *auto_ds_tlv =
(struct mwifiex_ie_types_auto_ds_param *) tlv;
u16 idletime = 0;
auto_ds_tlv->header.type =
cpu_to_le16(TLV_TYPE_AUTO_DS_PARAM);
auto_ds_tlv->header.len =
cpu_to_le16(sizeof(*auto_ds_tlv) -
sizeof(struct mwifiex_ie_types_header));
cmd_size += sizeof(*auto_ds_tlv);
tlv += sizeof(*auto_ds_tlv);
if (auto_ds)
idletime = auto_ds->idle_time;
dev_dbg(priv->adapter->dev,
"cmd: PS Command: Enter Auto Deep Sleep\n");
auto_ds_tlv->deep_sleep_timeout = cpu_to_le16(idletime);
}
cmd->size = cpu_to_le16(cmd_size);
}
return 0;
}
/*
* This function handles the command response of an enhanced power mode
* command.
*
* Handling includes changing the header fields into CPU format
* and setting the current enhanced power mode in driver.
*/
int mwifiex_ret_enh_power_mode(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp,
struct mwifiex_ds_pm_cfg *pm_cfg)
{
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ps_mode_enh *ps_mode =
&resp->params.psmode_enh;
uint16_t action = le16_to_cpu(ps_mode->action);
uint16_t ps_bitmap = le16_to_cpu(ps_mode->params.ps_bitmap);
uint16_t auto_ps_bitmap =
le16_to_cpu(ps_mode->params.ps_bitmap);
dev_dbg(adapter->dev,
"info: %s: PS_MODE cmd reply result=%#x action=%#X\n",
__func__, resp->result, action);
if (action == EN_AUTO_PS) {
if (auto_ps_bitmap & BITMAP_AUTO_DS) {
dev_dbg(adapter->dev, "cmd: Enabled auto deep sleep\n");
priv->adapter->is_deep_sleep = true;
}
if (auto_ps_bitmap & BITMAP_STA_PS) {
dev_dbg(adapter->dev, "cmd: Enabled STA power save\n");
if (adapter->sleep_period.period)
dev_dbg(adapter->dev,
"cmd: set to uapsd/pps mode\n");
}
} else if (action == DIS_AUTO_PS) {
if (ps_bitmap & BITMAP_AUTO_DS) {
priv->adapter->is_deep_sleep = false;
dev_dbg(adapter->dev, "cmd: Disabled auto deep sleep\n");
}
if (ps_bitmap & BITMAP_STA_PS) {
dev_dbg(adapter->dev, "cmd: Disabled STA power save\n");
if (adapter->sleep_period.period) {
adapter->delay_null_pkt = false;
adapter->tx_lock_flag = false;
adapter->pps_uapsd_mode = false;
}
}
} else if (action == GET_PS) {
if (ps_bitmap & BITMAP_STA_PS)
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP;
else
adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM;
dev_dbg(adapter->dev, "cmd: ps_bitmap=%#x\n", ps_bitmap);
if (pm_cfg) {
/* This section is for get power save mode */
if (ps_bitmap & BITMAP_STA_PS)
pm_cfg->param.ps_mode = 1;
else
pm_cfg->param.ps_mode = 0;
}
}
return 0;
}
/*
* This function prepares command to get hardware specifications.
*
* Preparation includes -
* - Setting command ID, action and proper size
* - Setting permanent address parameter
* - Ensuring correct endian-ness
*/
int mwifiex_cmd_get_hw_spec(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd)
{
struct host_cmd_ds_get_hw_spec *hw_spec = &cmd->params.hw_spec;
cmd->command = cpu_to_le16(HostCmd_CMD_GET_HW_SPEC);
cmd->size =
cpu_to_le16(sizeof(struct host_cmd_ds_get_hw_spec) + S_DS_GEN);
memcpy(hw_spec->permanent_addr, priv->curr_addr, ETH_ALEN);
return 0;
}
/*
* This function handles the command response of get hardware
* specifications.
*
* Handling includes changing the header fields into CPU format
* and saving/updating the following parameters in driver -
* - Firmware capability information
* - Firmware band settings
* - Ad-hoc start band and channel
* - Ad-hoc 11n activation status
* - Firmware release number
* - Number of antennas
* - Hardware address
* - Hardware interface version
* - Firmware version
* - Region code
* - 11n capabilities
* - MCS support fields
* - MP end port
*/
int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
struct mwifiex_adapter *adapter = priv->adapter;
int i;
adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
if (IS_SUPPORT_MULTI_BANDS(adapter))
adapter->fw_bands = (u8) GET_FW_DEFAULT_BANDS(adapter);
else
adapter->fw_bands = BAND_B;
adapter->config_bands = adapter->fw_bands;
if (adapter->fw_bands & BAND_A) {
if (adapter->fw_bands & BAND_GN) {
adapter->config_bands |= BAND_AN;
adapter->fw_bands |= BAND_AN;
}
if (adapter->fw_bands & BAND_AN) {
adapter->adhoc_start_band = BAND_A | BAND_AN;
adapter->adhoc_11n_enabled = true;
} else {
adapter->adhoc_start_band = BAND_A;
}
priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL_A;
} else if (adapter->fw_bands & BAND_GN) {
adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN;
priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
adapter->adhoc_11n_enabled = true;
} else if (adapter->fw_bands & BAND_G) {
adapter->adhoc_start_band = BAND_G | BAND_B;
priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
} else if (adapter->fw_bands & BAND_B) {
adapter->adhoc_start_band = BAND_B;
priv->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
}
adapter->fw_release_number = le32_to_cpu(hw_spec->fw_release_number);
adapter->number_of_antenna = le16_to_cpu(hw_spec->number_of_antenna);
if (le32_to_cpu(hw_spec->dot_11ac_dev_cap)) {
adapter->is_hw_11ac_capable = true;
/* Copy 11AC cap */
adapter->hw_dot_11ac_dev_cap =
le32_to_cpu(hw_spec->dot_11ac_dev_cap);
adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap;
adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap;
/* Copy 11AC mcs */
adapter->hw_dot_11ac_mcs_support =
le32_to_cpu(hw_spec->dot_11ac_mcs_support);
adapter->usr_dot_11ac_mcs_support =
adapter->hw_dot_11ac_mcs_support;
} else {
adapter->is_hw_11ac_capable = false;
}
dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
adapter->fw_release_number);
dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
hw_spec->permanent_addr);
dev_dbg(adapter->dev,
"info: GET_HW_SPEC: hw_if_version=%#x version=%#x\n",
le16_to_cpu(hw_spec->hw_if_version),
le16_to_cpu(hw_spec->version));
if (priv->curr_addr[0] == 0xff)
memmove(priv->curr_addr, hw_spec->permanent_addr, ETH_ALEN);
adapter->region_code = le16_to_cpu(hw_spec->region_code);
for (i = 0; i < MWIFIEX_MAX_REGION_CODE; i++)
/* Use the region code to search for the index */
if (adapter->region_code == region_code_index[i])
break;
/* If it's unidentified region code, use the default (USA) */
if (i >= MWIFIEX_MAX_REGION_CODE) {
adapter->region_code = 0x10;
dev_dbg(adapter->dev,
"cmd: unknown region code, use default (USA)\n");
}
adapter->hw_dot_11n_dev_cap = le32_to_cpu(hw_spec->dot_11n_dev_cap);
adapter->hw_dev_mcs_support = hw_spec->dev_mcs_support;
if (adapter->if_ops.update_mp_end_port)
adapter->if_ops.update_mp_end_port(adapter,
le16_to_cpu(hw_spec->mp_end_port));
return 0;
}
| gpl-2.0 |
devttys1/linux-fslc | arch/sh/kernel/cpu/fpu.c | 2120 | 1668 | #include <linux/sched.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/fpu.h>
#include <asm/traps.h>
int init_fpu(struct task_struct *tsk)
{
if (tsk_used_math(tsk)) {
if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
unlazy_fpu(tsk, task_pt_regs(tsk));
return 0;
}
/*
* Memory allocation at the first usage of the FPU and other state.
*/
if (!tsk->thread.xstate) {
tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
GFP_KERNEL);
if (!tsk->thread.xstate)
return -ENOMEM;
}
if (boot_cpu_data.flags & CPU_HAS_FPU) {
struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
memset(fp, 0, xstate_size);
fp->fpscr = FPSCR_INIT;
} else {
struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
memset(fp, 0, xstate_size);
fp->fpscr = FPSCR_INIT;
}
set_stopped_child_used_math(tsk);
return 0;
}
#ifdef CONFIG_SH_FPU
void __fpu_state_restore(void)
{
struct task_struct *tsk = current;
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
tsk->thread.fpu_counter++;
}
void fpu_state_restore(struct pt_regs *regs)
{
struct task_struct *tsk = current;
if (unlikely(!user_mode(regs))) {
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
BUG();
return;
}
if (!tsk_used_math(tsk)) {
local_irq_enable();
/*
* does a slab alloc which can sleep
*/
if (init_fpu(tsk)) {
/*
* ran out of memory!
*/
do_group_exit(SIGKILL);
return;
}
local_irq_disable();
}
grab_fpu(regs);
__fpu_state_restore();
}
BUILD_TRAP_HANDLER(fpu_state_restore)
{
TRAP_HANDLER_DECL;
fpu_state_restore(regs);
}
#endif /* CONFIG_SH_FPU */
| gpl-2.0 |
W4TCH0UT/zz_sprout | fs/jffs2/fs.c | 2376 | 20253 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/mtd/mtd.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/vfs.h>
#include <linux/crc32.h>
#include "nodelist.h"
static int jffs2_flash_setup(struct jffs2_sb_info *c);
int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
{
struct jffs2_full_dnode *old_metadata, *new_metadata;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_raw_inode *ri;
union jffs2_device_node dev;
unsigned char *mdata = NULL;
int mdatalen = 0;
unsigned int ivalid;
uint32_t alloclen;
int ret;
int alloc_type = ALLOC_NORMAL;
jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
/* Special cases - we don't want more than one data node
for these types on the medium at any time. So setattr
must read the original data associated with the node
(i.e. the device numbers or the target name) and write
it out again with the appropriate data attached */
if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
/* For these, we don't actually need to read the old node */
mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
mdata = (char *)&dev;
jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
__func__, mdatalen);
} else if (S_ISLNK(inode->i_mode)) {
mutex_lock(&f->sem);
mdatalen = f->metadata->size;
mdata = kmalloc(f->metadata->size, GFP_USER);
if (!mdata) {
mutex_unlock(&f->sem);
return -ENOMEM;
}
ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
if (ret) {
mutex_unlock(&f->sem);
kfree(mdata);
return ret;
}
mutex_unlock(&f->sem);
jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
__func__, mdatalen);
}
ri = jffs2_alloc_raw_inode();
if (!ri) {
if (S_ISLNK(inode->i_mode))
kfree(mdata);
return -ENOMEM;
}
ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
if (ret) {
jffs2_free_raw_inode(ri);
if (S_ISLNK(inode->i_mode))
kfree(mdata);
return ret;
}
mutex_lock(&f->sem);
ivalid = iattr->ia_valid;
ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
ri->ino = cpu_to_je32(inode->i_ino);
ri->version = cpu_to_je32(++f->highest_version);
ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
if (ivalid & ATTR_MODE)
ri->mode = cpu_to_jemode(iattr->ia_mode);
else
ri->mode = cpu_to_jemode(inode->i_mode);
ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
ri->offset = cpu_to_je32(0);
ri->csize = ri->dsize = cpu_to_je32(mdatalen);
ri->compr = JFFS2_COMPR_NONE;
if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
/* It's an extension. Make it a hole node */
ri->compr = JFFS2_COMPR_ZERO;
ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
ri->offset = cpu_to_je32(inode->i_size);
} else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
/* For truncate-to-zero, treat it as deletion because
it'll always be obsoleting all previous nodes */
alloc_type = ALLOC_DELETION;
}
ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
if (mdatalen)
ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
else
ri->data_crc = cpu_to_je32(0);
new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
if (S_ISLNK(inode->i_mode))
kfree(mdata);
if (IS_ERR(new_metadata)) {
jffs2_complete_reservation(c);
jffs2_free_raw_inode(ri);
mutex_unlock(&f->sem);
return PTR_ERR(new_metadata);
}
/* It worked. Update the inode */
inode->i_atime = ITIME(je32_to_cpu(ri->atime));
inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
inode->i_mode = jemode_to_cpu(ri->mode);
i_uid_write(inode, je16_to_cpu(ri->uid));
i_gid_write(inode, je16_to_cpu(ri->gid));
old_metadata = f->metadata;
if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
jffs2_add_full_dnode_to_inode(c, f, new_metadata);
inode->i_size = iattr->ia_size;
inode->i_blocks = (inode->i_size + 511) >> 9;
f->metadata = NULL;
} else {
f->metadata = new_metadata;
}
if (old_metadata) {
jffs2_mark_node_obsolete(c, old_metadata->raw);
jffs2_free_full_dnode(old_metadata);
}
jffs2_free_raw_inode(ri);
mutex_unlock(&f->sem);
jffs2_complete_reservation(c);
/* We have to do the truncate_setsize() without f->sem held, since
some pages may be locked and waiting for it in readpage().
We are protected from a simultaneous write() extending i_size
back past iattr->ia_size, because do_truncate() holds the
generic inode semaphore. */
if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
truncate_setsize(inode, iattr->ia_size);
inode->i_blocks = (inode->i_size + 511) >> 9;
}
return 0;
}
int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
{
int rc;
rc = inode_change_ok(dentry->d_inode, iattr);
if (rc)
return rc;
rc = jffs2_do_setattr(dentry->d_inode, iattr);
if (!rc && (iattr->ia_valid & ATTR_MODE))
rc = jffs2_acl_chmod(dentry->d_inode);
return rc;
}
int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
unsigned long avail;
buf->f_type = JFFS2_SUPER_MAGIC;
buf->f_bsize = 1 << PAGE_SHIFT;
buf->f_blocks = c->flash_size >> PAGE_SHIFT;
buf->f_files = 0;
buf->f_ffree = 0;
buf->f_namelen = JFFS2_MAX_NAME_LEN;
buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
buf->f_fsid.val[1] = c->mtd->index;
spin_lock(&c->erase_completion_lock);
avail = c->dirty_size + c->free_size;
if (avail > c->sector_size * c->resv_blocks_write)
avail -= c->sector_size * c->resv_blocks_write;
else
avail = 0;
spin_unlock(&c->erase_completion_lock);
buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
return 0;
}
void jffs2_evict_inode (struct inode *inode)
{
/* We can forget about this inode for now - drop all
* the nodelists associated with it, etc.
*/
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
__func__, inode->i_ino, inode->i_mode);
truncate_inode_pages(&inode->i_data, 0);
clear_inode(inode);
jffs2_do_clear_inode(c, f);
}
struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
{
struct jffs2_inode_info *f;
struct jffs2_sb_info *c;
struct jffs2_raw_inode latest_node;
union jffs2_device_node jdev;
struct inode *inode;
dev_t rdev = 0;
int ret;
jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
f = JFFS2_INODE_INFO(inode);
c = JFFS2_SB_INFO(inode->i_sb);
jffs2_init_inode_info(f);
mutex_lock(&f->sem);
ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
if (ret) {
mutex_unlock(&f->sem);
iget_failed(inode);
return ERR_PTR(ret);
}
inode->i_mode = jemode_to_cpu(latest_node.mode);
i_uid_write(inode, je16_to_cpu(latest_node.uid));
i_gid_write(inode, je16_to_cpu(latest_node.gid));
inode->i_size = je32_to_cpu(latest_node.isize);
inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
set_nlink(inode, f->inocache->pino_nlink);
inode->i_blocks = (inode->i_size + 511) >> 9;
switch (inode->i_mode & S_IFMT) {
case S_IFLNK:
inode->i_op = &jffs2_symlink_inode_operations;
break;
case S_IFDIR:
{
struct jffs2_full_dirent *fd;
set_nlink(inode, 2); /* parent and '.' */
for (fd=f->dents; fd; fd = fd->next) {
if (fd->type == DT_DIR && fd->ino)
inc_nlink(inode);
}
/* Root dir gets i_nlink 3 for some reason */
if (inode->i_ino == 1)
inc_nlink(inode);
inode->i_op = &jffs2_dir_inode_operations;
inode->i_fop = &jffs2_dir_operations;
break;
}
case S_IFREG:
inode->i_op = &jffs2_file_inode_operations;
inode->i_fop = &jffs2_file_operations;
inode->i_mapping->a_ops = &jffs2_file_address_operations;
inode->i_mapping->nrpages = 0;
break;
case S_IFBLK:
case S_IFCHR:
/* Read the device numbers from the media */
if (f->metadata->size != sizeof(jdev.old_id) &&
f->metadata->size != sizeof(jdev.new_id)) {
pr_notice("Device node has strange size %d\n",
f->metadata->size);
goto error_io;
}
jffs2_dbg(1, "Reading device numbers from flash\n");
ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
if (ret < 0) {
/* Eep */
pr_notice("Read device numbers for inode %lu failed\n",
(unsigned long)inode->i_ino);
goto error;
}
if (f->metadata->size == sizeof(jdev.old_id))
rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
else
rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
case S_IFSOCK:
case S_IFIFO:
inode->i_op = &jffs2_file_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
break;
default:
pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
__func__, inode->i_mode, (unsigned long)inode->i_ino);
}
mutex_unlock(&f->sem);
jffs2_dbg(1, "jffs2_read_inode() returning\n");
unlock_new_inode(inode);
return inode;
error_io:
ret = -EIO;
error:
mutex_unlock(&f->sem);
jffs2_do_clear_inode(c, f);
iget_failed(inode);
return ERR_PTR(ret);
}
void jffs2_dirty_inode(struct inode *inode, int flags)
{
struct iattr iattr;
if (!(inode->i_state & I_DIRTY_DATASYNC)) {
jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
__func__, inode->i_ino);
return;
}
jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
__func__, inode->i_ino);
iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
iattr.ia_mode = inode->i_mode;
iattr.ia_uid = inode->i_uid;
iattr.ia_gid = inode->i_gid;
iattr.ia_atime = inode->i_atime;
iattr.ia_mtime = inode->i_mtime;
iattr.ia_ctime = inode->i_ctime;
jffs2_do_setattr(inode, &iattr);
}
int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
return -EROFS;
/* We stop if it was running, then restart if it needs to.
This also catches the case where it was stopped and this
is just a remount to restart it.
Flush the writebuffer, if neccecary, else we loose it */
if (!(sb->s_flags & MS_RDONLY)) {
jffs2_stop_garbage_collect_thread(c);
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
}
if (!(*flags & MS_RDONLY))
jffs2_start_garbage_collect_thread(c);
*flags |= MS_NOATIME;
return 0;
}
/* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
fill in the raw_inode while you're at it. */
struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
{
struct inode *inode;
struct super_block *sb = dir_i->i_sb;
struct jffs2_sb_info *c;
struct jffs2_inode_info *f;
int ret;
jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
__func__, dir_i->i_ino, mode);
c = JFFS2_SB_INFO(sb);
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
f = JFFS2_INODE_INFO(inode);
jffs2_init_inode_info(f);
mutex_lock(&f->sem);
memset(ri, 0, sizeof(*ri));
/* Set OS-specific defaults for new inodes */
ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
if (dir_i->i_mode & S_ISGID) {
ri->gid = cpu_to_je16(i_gid_read(dir_i));
if (S_ISDIR(mode))
mode |= S_ISGID;
} else {
ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
}
/* POSIX ACLs have to be processed now, at least partly.
The umask is only applied if there's no default ACL */
ret = jffs2_init_acl_pre(dir_i, inode, &mode);
if (ret) {
make_bad_inode(inode);
iput(inode);
return ERR_PTR(ret);
}
ret = jffs2_do_new_inode (c, f, mode, ri);
if (ret) {
make_bad_inode(inode);
iput(inode);
return ERR_PTR(ret);
}
set_nlink(inode, 1);
inode->i_ino = je32_to_cpu(ri->ino);
inode->i_mode = jemode_to_cpu(ri->mode);
i_gid_write(inode, je16_to_cpu(ri->gid));
i_uid_write(inode, je16_to_cpu(ri->uid));
inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
inode->i_blocks = 0;
inode->i_size = 0;
if (insert_inode_locked(inode) < 0) {
make_bad_inode(inode);
iput(inode);
return ERR_PTR(-EINVAL);
}
return inode;
}
static int calculate_inocache_hashsize(uint32_t flash_size)
{
/*
* Pick a inocache hash size based on the size of the medium.
* Count how many megabytes we're dealing with, apply a hashsize twice
* that size, but rounding down to the usual big powers of 2. And keep
* to sensible bounds.
*/
int size_mb = flash_size / 1024 / 1024;
int hashsize = (size_mb * 2) & ~0x3f;
if (hashsize < INOCACHE_HASHSIZE_MIN)
return INOCACHE_HASHSIZE_MIN;
if (hashsize > INOCACHE_HASHSIZE_MAX)
return INOCACHE_HASHSIZE_MAX;
return hashsize;
}
int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
{
struct jffs2_sb_info *c;
struct inode *root_i;
int ret;
size_t blocks;
c = JFFS2_SB_INFO(sb);
#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
if (c->mtd->type == MTD_NANDFLASH) {
pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
return -EINVAL;
}
if (c->mtd->type == MTD_DATAFLASH) {
pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
return -EINVAL;
}
#endif
c->flash_size = c->mtd->size;
c->sector_size = c->mtd->erasesize;
blocks = c->flash_size / c->sector_size;
/*
* Size alignment check
*/
if ((c->sector_size * blocks) != c->flash_size) {
c->flash_size = c->sector_size * blocks;
pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
c->flash_size / 1024);
}
if (c->flash_size < 5*c->sector_size) {
pr_err("Too few erase blocks (%d)\n",
c->flash_size / c->sector_size);
return -EINVAL;
}
c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
/* NAND (or other bizarre) flash... do setup accordingly */
ret = jffs2_flash_setup(c);
if (ret)
return ret;
c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
if (!c->inocache_list) {
ret = -ENOMEM;
goto out_wbuf;
}
jffs2_init_xattr_subsystem(c);
if ((ret = jffs2_do_mount_fs(c)))
goto out_inohash;
jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
root_i = jffs2_iget(sb, 1);
if (IS_ERR(root_i)) {
jffs2_dbg(1, "get root inode failed\n");
ret = PTR_ERR(root_i);
goto out_root;
}
ret = -ENOMEM;
jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
sb->s_root = d_make_root(root_i);
if (!sb->s_root)
goto out_root;
sb->s_maxbytes = 0xFFFFFFFF;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = JFFS2_SUPER_MAGIC;
if (!(sb->s_flags & MS_RDONLY))
jffs2_start_garbage_collect_thread(c);
return 0;
out_root:
jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c);
if (jffs2_blocks_use_vmalloc(c))
vfree(c->blocks);
else
kfree(c->blocks);
out_inohash:
jffs2_clear_xattr_subsystem(c);
kfree(c->inocache_list);
out_wbuf:
jffs2_flash_cleanup(c);
return ret;
}
void jffs2_gc_release_inode(struct jffs2_sb_info *c,
struct jffs2_inode_info *f)
{
iput(OFNI_EDONI_2SFFJ(f));
}
struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
int inum, int unlinked)
{
struct inode *inode;
struct jffs2_inode_cache *ic;
if (unlinked) {
/* The inode has zero nlink but its nodes weren't yet marked
obsolete. This has to be because we're still waiting for
the final (close() and) iput() to happen.
There's a possibility that the final iput() could have
happened while we were contemplating. In order to ensure
that we don't cause a new read_inode() (which would fail)
for the inode in question, we use ilookup() in this case
instead of iget().
The nlink can't _become_ zero at this point because we're
holding the alloc_sem, and jffs2_do_unlink() would also
need that while decrementing nlink on any inode.
*/
inode = ilookup(OFNI_BS_2SFFJ(c), inum);
if (!inode) {
jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
inum);
spin_lock(&c->inocache_lock);
ic = jffs2_get_ino_cache(c, inum);
if (!ic) {
jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
inum);
spin_unlock(&c->inocache_lock);
return NULL;
}
if (ic->state != INO_STATE_CHECKEDABSENT) {
/* Wait for progress. Don't just loop */
jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
ic->ino, ic->state);
sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
} else {
spin_unlock(&c->inocache_lock);
}
return NULL;
}
} else {
/* Inode has links to it still; they're not going away because
jffs2_do_unlink() would need the alloc_sem and we have it.
Just iget() it, and if read_inode() is necessary that's OK.
*/
inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
if (is_bad_inode(inode)) {
pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
inum, unlinked);
/* NB. This will happen again. We need to do something appropriate here. */
iput(inode);
return ERR_PTR(-EIO);
}
return JFFS2_INODE_INFO(inode);
}
unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
struct jffs2_inode_info *f,
unsigned long offset,
unsigned long *priv)
{
struct inode *inode = OFNI_EDONI_2SFFJ(f);
struct page *pg;
pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
(void *)jffs2_do_readpage_unlock, inode);
if (IS_ERR(pg))
return (void *)pg;
*priv = (unsigned long)pg;
return kmap(pg);
}
void jffs2_gc_release_page(struct jffs2_sb_info *c,
unsigned char *ptr,
unsigned long *priv)
{
struct page *pg = (void *)*priv;
kunmap(pg);
page_cache_release(pg);
}
static int jffs2_flash_setup(struct jffs2_sb_info *c) {
int ret = 0;
if (jffs2_cleanmarker_oob(c)) {
/* NAND flash... do setup accordingly */
ret = jffs2_nand_flash_setup(c);
if (ret)
return ret;
}
/* and Dataflash */
if (jffs2_dataflash(c)) {
ret = jffs2_dataflash_setup(c);
if (ret)
return ret;
}
/* and Intel "Sibley" flash */
if (jffs2_nor_wbuf_flash(c)) {
ret = jffs2_nor_wbuf_flash_setup(c);
if (ret)
return ret;
}
/* and an UBI volume */
if (jffs2_ubivol(c)) {
ret = jffs2_ubivol_setup(c);
if (ret)
return ret;
}
return ret;
}
void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
if (jffs2_cleanmarker_oob(c)) {
jffs2_nand_flash_cleanup(c);
}
/* and DataFlash */
if (jffs2_dataflash(c)) {
jffs2_dataflash_cleanup(c);
}
/* and Intel "Sibley" flash */
if (jffs2_nor_wbuf_flash(c)) {
jffs2_nor_wbuf_flash_cleanup(c);
}
/* and an UBI volume */
if (jffs2_ubivol(c)) {
jffs2_ubivol_cleanup(c);
}
}
| gpl-2.0 |
DutchDanny/SensationXL-ICS | arch/h8300/kernel/ptrace.c | 3144 | 3802 | /*
* linux/arch/h8300/kernel/ptrace.c
*
* Yoshinori Sato <ysato@users.sourceforge.jp>
*
* Based on:
* linux/arch/m68k/kernel/ptrace.c
*
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/signal.h>
/* cpu depend functions */
extern long h8300_get_reg(struct task_struct *task, int regno);
extern int h8300_put_reg(struct task_struct *task, int regno, unsigned long data);
void user_disable_single_step(struct task_struct *child)
{
}
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
int regno = addr >> 2;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp = 0;
if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
ret = 0; /* Default return condition */
if (regno < H8300_REGS_NO)
tmp = h8300_get_reg(child, regno);
else {
switch (regno) {
case 49:
tmp = child->mm->start_code;
break ;
case 50:
tmp = child->mm->start_data;
break ;
case 51:
tmp = child->mm->end_code;
break ;
case 52:
tmp = child->mm->end_data;
break ;
default:
ret = -EIO;
}
}
if (!ret)
ret = put_user(tmp, datap);
break ;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
if ((addr & 3) || addr >= sizeof(struct user)) {
ret = -EIO;
break ;
}
if (regno == PT_ORIG_ER0) {
ret = -EIO;
break ;
}
if (regno < H8300_REGS_NO) {
ret = h8300_put_reg(child, regno, data);
break ;
}
ret = -EIO;
break ;
case PTRACE_GETREGS: { /* Get all gp regs from the child. */
int i;
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
tmp = h8300_get_reg(child, i);
if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
datap++;
}
ret = 0;
break;
}
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
int i;
unsigned long tmp;
for (i = 0; i < H8300_REGS_NO; i++) {
if (get_user(tmp, datap)) {
ret = -EFAULT;
break;
}
h8300_put_reg(child, i, tmp);
datap++;
}
ret = 0;
break;
}
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage void do_syscall_trace(void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
| gpl-2.0 |
RenderBroken/render_kernel_motorola_msm8226 | arch/arm/mach-msm/keypad-surf-ffa.c | 3400 | 8499 | /*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2008-2009, The Linux Foundation. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/platform_device.h>
#include <linux/gpio_event.h>
#include <asm/mach-types.h>
/* don't turn this on without updating the ffa support */
#define SCAN_FUNCTION_KEYS 0
/* FFA:
36: KEYSENSE_N(0)
37: KEYSENSE_N(1)
38: KEYSENSE_N(2)
39: KEYSENSE_N(3)
40: KEYSENSE_N(4)
31: KYPD_17
32: KYPD_15
33: KYPD_13
34: KYPD_11
35: KYPD_9
41: KYPD_MEMO
*/
static unsigned int keypad_row_gpios[] = {
31, 32, 33, 34, 35, 41
#if SCAN_FUNCTION_KEYS
, 42
#endif
};
static unsigned int keypad_col_gpios[] = { 36, 37, 38, 39, 40 };
static unsigned int keypad_row_gpios_8k_ffa[] = {31, 32, 33, 34, 35, 36};
static unsigned int keypad_col_gpios_8k_ffa[] = {38, 39, 40, 41, 42};
#define KEYMAP_INDEX(row, col) ((row)*ARRAY_SIZE(keypad_col_gpios) + (col))
#define FFA_8K_KEYMAP_INDEX(row, col) ((row)* \
ARRAY_SIZE(keypad_col_gpios_8k_ffa) + (col))
static const unsigned short keypad_keymap_surf[ARRAY_SIZE(keypad_col_gpios) *
ARRAY_SIZE(keypad_row_gpios)] = {
[KEYMAP_INDEX(0, 0)] = KEY_5,
[KEYMAP_INDEX(0, 1)] = KEY_9,
[KEYMAP_INDEX(0, 2)] = 229, /* SOFT1 */
[KEYMAP_INDEX(0, 3)] = KEY_6,
[KEYMAP_INDEX(0, 4)] = KEY_LEFT,
[KEYMAP_INDEX(1, 0)] = KEY_0,
[KEYMAP_INDEX(1, 1)] = KEY_RIGHT,
[KEYMAP_INDEX(1, 2)] = KEY_1,
[KEYMAP_INDEX(1, 3)] = 228, /* KEY_SHARP */
[KEYMAP_INDEX(1, 4)] = KEY_SEND,
[KEYMAP_INDEX(2, 0)] = KEY_VOLUMEUP,
[KEYMAP_INDEX(2, 1)] = KEY_HOME, /* FA */
[KEYMAP_INDEX(2, 2)] = KEY_F8, /* QCHT */
[KEYMAP_INDEX(2, 3)] = KEY_F6, /* R+ */
[KEYMAP_INDEX(2, 4)] = KEY_F7, /* R- */
[KEYMAP_INDEX(3, 0)] = KEY_UP,
[KEYMAP_INDEX(3, 1)] = KEY_CLEAR,
[KEYMAP_INDEX(3, 2)] = KEY_4,
[KEYMAP_INDEX(3, 3)] = KEY_MUTE, /* SPKR */
[KEYMAP_INDEX(3, 4)] = KEY_2,
[KEYMAP_INDEX(4, 0)] = 230, /* SOFT2 */
[KEYMAP_INDEX(4, 1)] = 232, /* KEY_CENTER */
[KEYMAP_INDEX(4, 2)] = KEY_DOWN,
[KEYMAP_INDEX(4, 3)] = KEY_BACK, /* FB */
[KEYMAP_INDEX(4, 4)] = KEY_8,
[KEYMAP_INDEX(5, 0)] = KEY_VOLUMEDOWN,
[KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */
[KEYMAP_INDEX(5, 2)] = KEY_MAIL, /* MESG */
[KEYMAP_INDEX(5, 3)] = KEY_3,
[KEYMAP_INDEX(5, 4)] = KEY_7,
#if SCAN_FUNCTION_KEYS
[KEYMAP_INDEX(6, 0)] = KEY_F5,
[KEYMAP_INDEX(6, 1)] = KEY_F4,
[KEYMAP_INDEX(6, 2)] = KEY_F3,
[KEYMAP_INDEX(6, 3)] = KEY_F2,
[KEYMAP_INDEX(6, 4)] = KEY_F1
#endif
};
static const unsigned short keypad_keymap_ffa[ARRAY_SIZE(keypad_col_gpios) *
ARRAY_SIZE(keypad_row_gpios)] = {
/*[KEYMAP_INDEX(0, 0)] = ,*/
/*[KEYMAP_INDEX(0, 1)] = ,*/
[KEYMAP_INDEX(0, 2)] = KEY_1,
[KEYMAP_INDEX(0, 3)] = KEY_SEND,
[KEYMAP_INDEX(0, 4)] = KEY_LEFT,
[KEYMAP_INDEX(1, 0)] = KEY_3,
[KEYMAP_INDEX(1, 1)] = KEY_RIGHT,
[KEYMAP_INDEX(1, 2)] = KEY_VOLUMEUP,
/*[KEYMAP_INDEX(1, 3)] = ,*/
[KEYMAP_INDEX(1, 4)] = KEY_6,
[KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */
[KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */
[KEYMAP_INDEX(2, 2)] = KEY_0,
[KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */
[KEYMAP_INDEX(2, 4)] = KEY_9,
[KEYMAP_INDEX(3, 0)] = KEY_UP,
[KEYMAP_INDEX(3, 1)] = 232, /* KEY_CENTER */ /* i */
[KEYMAP_INDEX(3, 2)] = KEY_4,
/*[KEYMAP_INDEX(3, 3)] = ,*/
[KEYMAP_INDEX(3, 4)] = KEY_2,
[KEYMAP_INDEX(4, 0)] = KEY_VOLUMEDOWN,
[KEYMAP_INDEX(4, 1)] = KEY_SOUND,
[KEYMAP_INDEX(4, 2)] = KEY_DOWN,
[KEYMAP_INDEX(4, 3)] = KEY_8,
[KEYMAP_INDEX(4, 4)] = KEY_5,
/*[KEYMAP_INDEX(5, 0)] = ,*/
[KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */
[KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */
[KEYMAP_INDEX(5, 3)] = KEY_MENU, /* 1 */
[KEYMAP_INDEX(5, 4)] = KEY_7,
};
#define QSD8x50_FFA_KEYMAP_SIZE (ARRAY_SIZE(keypad_col_gpios_8k_ffa) * \
ARRAY_SIZE(keypad_row_gpios_8k_ffa))
static const unsigned short keypad_keymap_8k_ffa[QSD8x50_FFA_KEYMAP_SIZE] = {
[FFA_8K_KEYMAP_INDEX(0, 0)] = KEY_VOLUMEDOWN,
/*[KEYMAP_INDEX(0, 1)] = ,*/
[FFA_8K_KEYMAP_INDEX(0, 2)] = KEY_DOWN,
[FFA_8K_KEYMAP_INDEX(0, 3)] = KEY_8,
[FFA_8K_KEYMAP_INDEX(0, 4)] = KEY_5,
[FFA_8K_KEYMAP_INDEX(1, 0)] = KEY_UP,
[FFA_8K_KEYMAP_INDEX(1, 1)] = KEY_CLEAR,
[FFA_8K_KEYMAP_INDEX(1, 2)] = KEY_4,
/*[KEYMAP_INDEX(1, 3)] = ,*/
[FFA_8K_KEYMAP_INDEX(1, 4)] = KEY_2,
[FFA_8K_KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */
[FFA_8K_KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */
[FFA_8K_KEYMAP_INDEX(2, 2)] = KEY_0,
[FFA_8K_KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */
[FFA_8K_KEYMAP_INDEX(2, 4)] = KEY_9,
[FFA_8K_KEYMAP_INDEX(3, 0)] = KEY_3,
[FFA_8K_KEYMAP_INDEX(3, 1)] = KEY_RIGHT,
[FFA_8K_KEYMAP_INDEX(3, 2)] = KEY_VOLUMEUP,
/*[KEYMAP_INDEX(3, 3)] = ,*/
[FFA_8K_KEYMAP_INDEX(3, 4)] = KEY_6,
[FFA_8K_KEYMAP_INDEX(4, 0)] = 232, /* OK */
[FFA_8K_KEYMAP_INDEX(4, 1)] = KEY_SOUND,
[FFA_8K_KEYMAP_INDEX(4, 2)] = KEY_1,
[FFA_8K_KEYMAP_INDEX(4, 3)] = KEY_SEND,
[FFA_8K_KEYMAP_INDEX(4, 4)] = KEY_LEFT,
/*[KEYMAP_INDEX(5, 0)] = ,*/
[FFA_8K_KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */
[FFA_8K_KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */
[FFA_8K_KEYMAP_INDEX(5, 3)] = 229, /* 1 */
[FFA_8K_KEYMAP_INDEX(5, 4)] = KEY_7,
};
/* SURF keypad platform device information */
static struct gpio_event_matrix_info surf_keypad_matrix_info = {
.info.func = gpio_event_matrix_func,
.keymap = keypad_keymap_surf,
.output_gpios = keypad_row_gpios,
.input_gpios = keypad_col_gpios,
.noutputs = ARRAY_SIZE(keypad_row_gpios),
.ninputs = ARRAY_SIZE(keypad_col_gpios),
.settle_time.tv64 = 40 * NSEC_PER_USEC,
.poll_time.tv64 = 20 * NSEC_PER_MSEC,
.flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE |
GPIOKPF_PRINT_UNMAPPED_KEYS
};
static struct gpio_event_info *surf_keypad_info[] = {
&surf_keypad_matrix_info.info
};
static struct gpio_event_platform_data surf_keypad_data = {
.name = "surf_keypad",
.info = surf_keypad_info,
.info_count = ARRAY_SIZE(surf_keypad_info)
};
struct platform_device keypad_device_surf = {
.name = GPIO_EVENT_DEV_NAME,
.id = -1,
.dev = {
.platform_data = &surf_keypad_data,
},
};
/* 8k FFA keypad platform device information */
static struct gpio_event_matrix_info keypad_matrix_info_8k_ffa = {
.info.func = gpio_event_matrix_func,
.keymap = keypad_keymap_8k_ffa,
.output_gpios = keypad_row_gpios_8k_ffa,
.input_gpios = keypad_col_gpios_8k_ffa,
.noutputs = ARRAY_SIZE(keypad_row_gpios_8k_ffa),
.ninputs = ARRAY_SIZE(keypad_col_gpios_8k_ffa),
.settle_time.tv64 = 40 * NSEC_PER_USEC,
.poll_time.tv64 = 20 * NSEC_PER_MSEC,
.flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE |
GPIOKPF_PRINT_UNMAPPED_KEYS
};
static struct gpio_event_info *keypad_info_8k_ffa[] = {
&keypad_matrix_info_8k_ffa.info
};
static struct gpio_event_platform_data keypad_data_8k_ffa = {
.name = "8k_ffa_keypad",
.info = keypad_info_8k_ffa,
.info_count = ARRAY_SIZE(keypad_info_8k_ffa)
};
struct platform_device keypad_device_8k_ffa = {
.name = GPIO_EVENT_DEV_NAME,
.id = -1,
.dev = {
.platform_data = &keypad_data_8k_ffa,
},
};
/* 7k FFA keypad platform device information */
static struct gpio_event_matrix_info keypad_matrix_info_7k_ffa = {
.info.func = gpio_event_matrix_func,
.keymap = keypad_keymap_ffa,
.output_gpios = keypad_row_gpios,
.input_gpios = keypad_col_gpios,
.noutputs = ARRAY_SIZE(keypad_row_gpios),
.ninputs = ARRAY_SIZE(keypad_col_gpios),
.settle_time.tv64 = 40 * NSEC_PER_USEC,
.poll_time.tv64 = 20 * NSEC_PER_MSEC,
.flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE |
GPIOKPF_PRINT_UNMAPPED_KEYS
};
static struct gpio_event_info *keypad_info_7k_ffa[] = {
&keypad_matrix_info_7k_ffa.info
};
static struct gpio_event_platform_data keypad_data_7k_ffa = {
.name = "7k_ffa_keypad",
.info = keypad_info_7k_ffa,
.info_count = ARRAY_SIZE(keypad_info_7k_ffa)
};
struct platform_device keypad_device_7k_ffa = {
.name = GPIO_EVENT_DEV_NAME,
.id = -1,
.dev = {
.platform_data = &keypad_data_7k_ffa,
},
};
| gpl-2.0 |
Algesat/BladeApex2 | arch/m68k/platform/coldfire/intc.c | 5192 | 3669 | /*
* intc.c -- support for the old ColdFire interrupt controller
*
* (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/traps.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
/*
* The mapping of irq number to a mask register bit is not one-to-one.
* The irq numbers are either based on "level" of interrupt or fixed
* for an autovector-able interrupt. So we keep a local data structure
* that maps from irq to mask register. Not all interrupts will have
* an IMR bit.
*/
unsigned char mcf_irq2imr[NR_IRQS];
/*
* Define the miniumun and maximum external interrupt numbers.
* This is also used as the "level" interrupt numbers.
*/
#define EIRQ1 25
#define EIRQ7 31
/*
* In the early version 2 core ColdFire parts the IMR register was 16 bits
* in size. Version 3 (and later version 2) core parts have a 32 bit
* sized IMR register. Provide some size independent methods to access the
* IMR register.
*/
#ifdef MCFSIM_IMR_IS_16BITS
void mcf_setimr(int index)
{
u16 imr;
imr = __raw_readw(MCF_MBAR + MCFSIM_IMR);
__raw_writew(imr | (0x1 << index), MCF_MBAR + MCFSIM_IMR);
}
void mcf_clrimr(int index)
{
u16 imr;
imr = __raw_readw(MCF_MBAR + MCFSIM_IMR);
__raw_writew(imr & ~(0x1 << index), MCF_MBAR + MCFSIM_IMR);
}
void mcf_maskimr(unsigned int mask)
{
u16 imr;
imr = __raw_readw(MCF_MBAR + MCFSIM_IMR);
imr |= mask;
__raw_writew(imr, MCF_MBAR + MCFSIM_IMR);
}
#else
void mcf_setimr(int index)
{
u32 imr;
imr = __raw_readl(MCF_MBAR + MCFSIM_IMR);
__raw_writel(imr | (0x1 << index), MCF_MBAR + MCFSIM_IMR);
}
void mcf_clrimr(int index)
{
u32 imr;
imr = __raw_readl(MCF_MBAR + MCFSIM_IMR);
__raw_writel(imr & ~(0x1 << index), MCF_MBAR + MCFSIM_IMR);
}
void mcf_maskimr(unsigned int mask)
{
u32 imr;
imr = __raw_readl(MCF_MBAR + MCFSIM_IMR);
imr |= mask;
__raw_writel(imr, MCF_MBAR + MCFSIM_IMR);
}
#endif
/*
* Interrupts can be "vectored" on the ColdFire cores that support this old
* interrupt controller. That is, the device raising the interrupt can also
* supply the vector number to interrupt through. The AVR register of the
* interrupt controller enables or disables this for each external interrupt,
* so provide generic support for this. Setting this up is out-of-band for
* the interrupt system API's, and needs to be done by the driver that
* supports this device. Very few devices actually use this.
*/
void mcf_autovector(int irq)
{
#ifdef MCFSIM_AVR
if ((irq >= EIRQ1) && (irq <= EIRQ7)) {
u8 avec;
avec = __raw_readb(MCF_MBAR + MCFSIM_AVR);
avec |= (0x1 << (irq - EIRQ1 + 1));
__raw_writeb(avec, MCF_MBAR + MCFSIM_AVR);
}
#endif
}
static void intc_irq_mask(struct irq_data *d)
{
if (mcf_irq2imr[d->irq])
mcf_setimr(mcf_irq2imr[d->irq]);
}
static void intc_irq_unmask(struct irq_data *d)
{
if (mcf_irq2imr[d->irq])
mcf_clrimr(mcf_irq2imr[d->irq]);
}
static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
return 0;
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
.irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
{
int irq;
mcf_maskimr(0xffffffff);
for (irq = 0; (irq < NR_IRQS); irq++) {
irq_set_chip(irq, &intc_irq_chip);
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
irq_set_handler(irq, handle_level_irq);
}
}
| gpl-2.0 |
Red--Code/android_kernel_sony_msm8974ac | arch/m68k/platform/5272/intc.c | 5192 | 6116 | /*
* intc.c -- interrupt controller or ColdFire 5272 SoC
*
* (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/traps.h>
/*
* The 5272 ColdFire interrupt controller is nothing like any other
* ColdFire interrupt controller - it truly is completely different.
* Given its age it is unlikely to be used on any other ColdFire CPU.
*/
/*
* The masking and priproty setting of interrupts on the 5272 is done
* via a set of 4 "Interrupt Controller Registers" (ICR). There is a
* loose mapping of vector number to register and internal bits, but
* a table is the easiest and quickest way to map them.
*
* Note that the external interrupts are edge triggered (unlike the
* internal interrupt sources which are level triggered). Which means
* they also need acknowledging via acknowledge bits.
*/
struct irqmap {
unsigned char icr;
unsigned char index;
unsigned char ack;
};
static struct irqmap intc_irqmap[MCFINT_VECMAX - MCFINT_VECBASE] = {
/*MCF_IRQ_SPURIOUS*/ { .icr = 0, .index = 0, .ack = 0, },
/*MCF_IRQ_EINT1*/ { .icr = MCFSIM_ICR1, .index = 28, .ack = 1, },
/*MCF_IRQ_EINT2*/ { .icr = MCFSIM_ICR1, .index = 24, .ack = 1, },
/*MCF_IRQ_EINT3*/ { .icr = MCFSIM_ICR1, .index = 20, .ack = 1, },
/*MCF_IRQ_EINT4*/ { .icr = MCFSIM_ICR1, .index = 16, .ack = 1, },
/*MCF_IRQ_TIMER1*/ { .icr = MCFSIM_ICR1, .index = 12, .ack = 0, },
/*MCF_IRQ_TIMER2*/ { .icr = MCFSIM_ICR1, .index = 8, .ack = 0, },
/*MCF_IRQ_TIMER3*/ { .icr = MCFSIM_ICR1, .index = 4, .ack = 0, },
/*MCF_IRQ_TIMER4*/ { .icr = MCFSIM_ICR1, .index = 0, .ack = 0, },
/*MCF_IRQ_UART1*/ { .icr = MCFSIM_ICR2, .index = 28, .ack = 0, },
/*MCF_IRQ_UART2*/ { .icr = MCFSIM_ICR2, .index = 24, .ack = 0, },
/*MCF_IRQ_PLIP*/ { .icr = MCFSIM_ICR2, .index = 20, .ack = 0, },
/*MCF_IRQ_PLIA*/ { .icr = MCFSIM_ICR2, .index = 16, .ack = 0, },
/*MCF_IRQ_USB0*/ { .icr = MCFSIM_ICR2, .index = 12, .ack = 0, },
/*MCF_IRQ_USB1*/ { .icr = MCFSIM_ICR2, .index = 8, .ack = 0, },
/*MCF_IRQ_USB2*/ { .icr = MCFSIM_ICR2, .index = 4, .ack = 0, },
/*MCF_IRQ_USB3*/ { .icr = MCFSIM_ICR2, .index = 0, .ack = 0, },
/*MCF_IRQ_USB4*/ { .icr = MCFSIM_ICR3, .index = 28, .ack = 0, },
/*MCF_IRQ_USB5*/ { .icr = MCFSIM_ICR3, .index = 24, .ack = 0, },
/*MCF_IRQ_USB6*/ { .icr = MCFSIM_ICR3, .index = 20, .ack = 0, },
/*MCF_IRQ_USB7*/ { .icr = MCFSIM_ICR3, .index = 16, .ack = 0, },
/*MCF_IRQ_DMA*/ { .icr = MCFSIM_ICR3, .index = 12, .ack = 0, },
/*MCF_IRQ_ERX*/ { .icr = MCFSIM_ICR3, .index = 8, .ack = 0, },
/*MCF_IRQ_ETX*/ { .icr = MCFSIM_ICR3, .index = 4, .ack = 0, },
/*MCF_IRQ_ENTC*/ { .icr = MCFSIM_ICR3, .index = 0, .ack = 0, },
/*MCF_IRQ_QSPI*/ { .icr = MCFSIM_ICR4, .index = 28, .ack = 0, },
/*MCF_IRQ_EINT5*/ { .icr = MCFSIM_ICR4, .index = 24, .ack = 1, },
/*MCF_IRQ_EINT6*/ { .icr = MCFSIM_ICR4, .index = 20, .ack = 1, },
/*MCF_IRQ_SWTO*/ { .icr = MCFSIM_ICR4, .index = 16, .ack = 0, },
};
/*
* The act of masking the interrupt also has a side effect of 'ack'ing
* an interrupt on this irq (for the external irqs). So this mask function
* is also an ack_mask function.
*/
static void intc_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
u32 v;
irq -= MCFINT_VECBASE;
v = 0x8 << intc_irqmap[irq].index;
writel(v, MCF_MBAR + intc_irqmap[irq].icr);
}
}
static void intc_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
u32 v;
irq -= MCFINT_VECBASE;
v = 0xd << intc_irqmap[irq].index;
writel(v, MCF_MBAR + intc_irqmap[irq].icr);
}
}
static void intc_irq_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
/* Only external interrupts are acked */
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
irq -= MCFINT_VECBASE;
if (intc_irqmap[irq].ack) {
u32 v;
v = readl(MCF_MBAR + intc_irqmap[irq].icr);
v &= (0x7 << intc_irqmap[irq].index);
v |= (0x8 << intc_irqmap[irq].index);
writel(v, MCF_MBAR + intc_irqmap[irq].icr);
}
}
}
static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned int irq = d->irq;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX)) {
irq -= MCFINT_VECBASE;
if (intc_irqmap[irq].ack) {
u32 v;
v = readl(MCF_MBAR + MCFSIM_PITR);
if (type == IRQ_TYPE_EDGE_FALLING)
v &= ~(0x1 << (32 - irq));
else
v |= (0x1 << (32 - irq));
writel(v, MCF_MBAR + MCFSIM_PITR);
}
}
return 0;
}
/*
* Simple flow handler to deal with the external edge triggered interrupts.
* We need to be careful with the masking/acking due to the side effects
* of masking an interrupt.
*/
static void intc_external_irq(unsigned int irq, struct irq_desc *desc)
{
irq_desc_get_chip(desc)->irq_ack(&desc->irq_data);
handle_simple_irq(irq, desc);
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
.irq_mask_ack = intc_irq_mask,
.irq_ack = intc_irq_ack,
.irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
{
int irq, edge;
/* Mask all interrupt sources */
writel(0x88888888, MCF_MBAR + MCFSIM_ICR1);
writel(0x88888888, MCF_MBAR + MCFSIM_ICR2);
writel(0x88888888, MCF_MBAR + MCFSIM_ICR3);
writel(0x88888888, MCF_MBAR + MCFSIM_ICR4);
for (irq = 0; (irq < NR_IRQS); irq++) {
irq_set_chip(irq, &intc_irq_chip);
edge = 0;
if ((irq >= MCFINT_VECBASE) && (irq <= MCFINT_VECMAX))
edge = intc_irqmap[irq - MCFINT_VECBASE].ack;
if (edge) {
irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
irq_set_handler(irq, intc_external_irq);
} else {
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
irq_set_handler(irq, handle_level_irq);
}
}
}
| gpl-2.0 |
tobw/linux-curie | crypto/authencesn.c | 8264 | 23982 | /*
* authencesn.c - AEAD wrapper for IPsec with extended sequence numbers,
* derived from authenc.c
*
* Copyright (C) 2010 secunet Security Networks AG
* Copyright (C) 2010 Steffen Klassert <steffen.klassert@secunet.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/aead.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
struct authenc_esn_instance_ctx {
struct crypto_ahash_spawn auth;
struct crypto_skcipher_spawn enc;
};
struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
};
struct authenc_esn_request_ctx {
unsigned int cryptlen;
unsigned int headlen;
unsigned int trailen;
struct scatterlist *sg;
struct scatterlist hsg[2];
struct scatterlist tsg[1];
struct scatterlist cipher[2];
crypto_completion_t complete;
crypto_completion_t update_complete;
crypto_completion_t update_complete2;
char tail[];
};
static void authenc_esn_request_complete(struct aead_request *req, int err)
{
if (err != -EINPROGRESS)
aead_request_complete(req, err);
}
static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
unsigned int keylen)
{
unsigned int authkeylen;
unsigned int enckeylen;
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct crypto_ablkcipher *enc = ctx->enc;
struct rtattr *rta = (void *)key;
struct crypto_authenc_key_param *param;
int err = -EINVAL;
if (!RTA_OK(rta, keylen))
goto badkey;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
goto badkey;
if (RTA_PAYLOAD(rta) < sizeof(*param))
goto badkey;
param = RTA_DATA(rta);
enckeylen = be32_to_cpu(param->enckeylen);
key += RTA_ALIGN(rta->rta_len);
keylen -= RTA_ALIGN(rta->rta_len);
if (keylen < enckeylen)
goto badkey;
authkeylen = keylen - enckeylen;
crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(auth, key, authkeylen);
crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
CRYPTO_TFM_RES_MASK);
if (err)
goto out;
crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
CRYPTO_TFM_RES_MASK);
out:
return err;
badkey:
crypto_aead_set_flags(authenc_esn, CRYPTO_TFM_RES_BAD_KEY_LEN);
goto out;
}
static void authenc_esn_geniv_ahash_update_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->update_complete2, req);
err = crypto_ahash_update(ahreq);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
areq_ctx->cryptlen,
crypto_aead_authsize(authenc_esn), 1);
out:
authenc_esn_request_complete(req, err);
}
static void authenc_esn_geniv_ahash_update_done2(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
areq_ctx->cryptlen,
crypto_aead_authsize(authenc_esn), 1);
out:
authenc_esn_request_complete(req, err);
}
static void authenc_esn_geniv_ahash_done(struct crypto_async_request *areq,
int err)
{
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
if (err)
goto out;
scatterwalk_map_and_copy(ahreq->result, areq_ctx->sg,
areq_ctx->cryptlen,
crypto_aead_authsize(authenc_esn), 1);
out:
aead_request_complete(req, err);
}
static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->sg, ahreq->result,
areq_ctx->cryptlen);
ahash_request_set_callback(ahreq,
aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->update_complete2, req);
err = crypto_ahash_update(ahreq);
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
authsize = crypto_aead_authsize(authenc_esn);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
authenc_esn_request_complete(req, err);
}
static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
ahash_request_set_crypt(ahreq, areq_ctx->tsg, ahreq->result,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) &
CRYPTO_TFM_REQ_MAY_SLEEP,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
goto out;
authsize = crypto_aead_authsize(authenc_esn);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
authenc_esn_request_complete(req, err);
}
static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
int err)
{
u8 *ihash;
unsigned int authsize;
struct ablkcipher_request *abreq;
struct aead_request *req = areq->data;
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
unsigned int cryptlen = req->cryptlen;
if (err)
goto out;
authsize = crypto_aead_authsize(authenc_esn);
cryptlen -= authsize;
ihash = ahreq->result + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
if (err)
goto out;
abreq = aead_request_ctx(req);
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst,
cryptlen, req->iv);
err = crypto_ablkcipher_decrypt(abreq);
out:
authenc_esn_request_complete(req, err);
}
static u8 *crypto_authenc_esn_ahash(struct aead_request *req,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct crypto_ahash *auth = ctx->auth;
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff);
u8 *hash = areq_ctx->tail;
int err;
hash = (u8 *)ALIGN((unsigned long)hash + crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1);
ahash_request_set_tfm(ahreq, auth);
err = crypto_ahash_init(ahreq);
if (err)
return ERR_PTR(err);
ahash_request_set_crypt(ahreq, areq_ctx->hsg, hash, areq_ctx->headlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->update_complete, req);
err = crypto_ahash_update(ahreq);
if (err)
return ERR_PTR(err);
ahash_request_set_crypt(ahreq, areq_ctx->sg, hash, areq_ctx->cryptlen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->update_complete2, req);
err = crypto_ahash_update(ahreq);
if (err)
return ERR_PTR(err);
ahash_request_set_crypt(ahreq, areq_ctx->tsg, hash,
areq_ctx->trailen);
ahash_request_set_callback(ahreq, aead_request_flags(req) & flags,
areq_ctx->complete, req);
err = crypto_ahash_finup(ahreq);
if (err)
return ERR_PTR(err);
return hash;
}
static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv,
unsigned int flags)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *dst = req->dst;
struct scatterlist *assoc = req->assoc;
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
unsigned int cryptlen = req->cryptlen;
struct page *dstp;
u8 *vdst;
u8 *hash;
dstp = sg_page(dst);
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;
if (ivsize) {
sg_init_table(cipher, 2);
sg_set_buf(cipher, iv, ivsize);
scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
dst = cipher;
cryptlen += ivsize;
}
if (sg_is_last(assoc))
return -EINVAL;
assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;
assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
return -EINVAL;
sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length;
areq_ctx->trailen = assoc1->length;
areq_ctx->sg = dst;
areq_ctx->complete = authenc_esn_geniv_ahash_done;
areq_ctx->update_complete = authenc_esn_geniv_ahash_update_done;
areq_ctx->update_complete2 = authenc_esn_geniv_ahash_update_done2;
hash = crypto_authenc_esn_ahash(req, flags);
if (IS_ERR(hash))
return PTR_ERR(hash);
scatterwalk_map_and_copy(hash, dst, cryptlen,
crypto_aead_authsize(authenc_esn), 1);
return 0;
}
static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
int err)
{
struct aead_request *areq = req->data;
if (!err) {
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(areq);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct ablkcipher_request *abreq = aead_request_ctx(areq);
u8 *iv = (u8 *)(abreq + 1) +
crypto_ablkcipher_reqsize(ctx->enc);
err = crypto_authenc_esn_genicv(areq, iv, 0);
}
authenc_esn_request_complete(areq, err);
}
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_ablkcipher *enc = ctx->enc;
struct scatterlist *dst = req->dst;
unsigned int cryptlen = req->cryptlen;
struct ablkcipher_request *abreq = (void *)(areq_ctx->tail
+ ctx->reqoff);
u8 *iv = (u8 *)abreq - crypto_ablkcipher_ivsize(enc);
int err;
ablkcipher_request_set_tfm(abreq, enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
crypto_authenc_esn_encrypt_done, req);
ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv);
memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn));
err = crypto_ablkcipher_encrypt(abreq);
if (err)
return err;
return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
}
static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req,
int err)
{
struct aead_request *areq = req->data;
if (!err) {
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
err = crypto_authenc_esn_genicv(areq, greq->giv, 0);
}
authenc_esn_request_complete(areq, err);
}
static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req)
{
struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct aead_request *areq = &req->areq;
struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
u8 *iv = req->giv;
int err;
skcipher_givcrypt_set_tfm(greq, ctx->enc);
skcipher_givcrypt_set_callback(greq, aead_request_flags(areq),
crypto_authenc_esn_givencrypt_done, areq);
skcipher_givcrypt_set_crypt(greq, areq->src, areq->dst, areq->cryptlen,
areq->iv);
skcipher_givcrypt_set_giv(greq, iv, req->seq);
err = crypto_skcipher_givencrypt(greq);
if (err)
return err;
return crypto_authenc_esn_genicv(areq, iv, CRYPTO_TFM_REQ_MAY_SLEEP);
}
static int crypto_authenc_esn_verify(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
u8 *ohash;
u8 *ihash;
unsigned int authsize;
areq_ctx->complete = authenc_esn_verify_ahash_done;
areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP);
if (IS_ERR(ohash))
return PTR_ERR(ohash);
authsize = crypto_aead_authsize(authenc_esn);
ihash = ohash + authsize;
scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
authsize, 0);
return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
}
static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
unsigned int cryptlen)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req);
struct scatterlist *src = req->src;
struct scatterlist *assoc = req->assoc;
struct scatterlist *cipher = areq_ctx->cipher;
struct scatterlist *hsg = areq_ctx->hsg;
struct scatterlist *tsg = areq_ctx->tsg;
struct scatterlist *assoc1;
struct scatterlist *assoc2;
unsigned int ivsize = crypto_aead_ivsize(authenc_esn);
struct page *srcp;
u8 *vsrc;
srcp = sg_page(src);
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;
if (ivsize) {
sg_init_table(cipher, 2);
sg_set_buf(cipher, iv, ivsize);
scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
src = cipher;
cryptlen += ivsize;
}
if (sg_is_last(assoc))
return -EINVAL;
assoc1 = assoc + 1;
if (sg_is_last(assoc1))
return -EINVAL;
assoc2 = assoc + 2;
if (!sg_is_last(assoc2))
return -EINVAL;
sg_init_table(hsg, 2);
sg_set_page(hsg, sg_page(assoc), assoc->length, assoc->offset);
sg_set_page(hsg + 1, sg_page(assoc2), assoc2->length, assoc2->offset);
sg_init_table(tsg, 1);
sg_set_page(tsg, sg_page(assoc1), assoc1->length, assoc1->offset);
areq_ctx->cryptlen = cryptlen;
areq_ctx->headlen = assoc->length + assoc2->length;
areq_ctx->trailen = assoc1->length;
areq_ctx->sg = src;
areq_ctx->complete = authenc_esn_verify_ahash_done;
areq_ctx->update_complete = authenc_esn_verify_ahash_update_done;
areq_ctx->update_complete2 = authenc_esn_verify_ahash_update_done2;
return crypto_authenc_esn_verify(req);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
struct ablkcipher_request *abreq = aead_request_ctx(req);
unsigned int cryptlen = req->cryptlen;
unsigned int authsize = crypto_aead_authsize(authenc_esn);
u8 *iv = req->iv;
int err;
if (cryptlen < authsize)
return -EINVAL;
cryptlen -= authsize;
err = crypto_authenc_esn_iverify(req, iv, cryptlen);
if (err)
return err;
ablkcipher_request_set_tfm(abreq, ctx->enc);
ablkcipher_request_set_callback(abreq, aead_request_flags(req),
req->base.complete, req->base.data);
ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv);
return crypto_ablkcipher_decrypt(abreq);
}
static int crypto_authenc_esn_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct authenc_esn_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_ablkcipher *enc;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
if (IS_ERR(auth))
return PTR_ERR(auth);
enc = crypto_spawn_skcipher(&ictx->enc);
err = PTR_ERR(enc);
if (IS_ERR(enc))
goto err_free_ahash;
ctx->auth = auth;
ctx->enc = enc;
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth) +
crypto_ahash_alignmask(auth),
crypto_ahash_alignmask(auth) + 1) +
crypto_ablkcipher_ivsize(enc);
tfm->crt_aead.reqsize = sizeof(struct authenc_esn_request_ctx) +
ctx->reqoff +
max_t(unsigned int,
crypto_ahash_reqsize(auth) +
sizeof(struct ahash_request),
sizeof(struct skcipher_givcrypt_request) +
crypto_ablkcipher_reqsize(enc));
return 0;
err_free_ahash:
crypto_free_ahash(auth);
return err;
}
static void crypto_authenc_esn_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_authenc_esn_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_ahash(ctx->auth);
crypto_free_ablkcipher(ctx->enc);
}
static struct crypto_instance *crypto_authenc_esn_alloc(struct rtattr **tb)
{
struct crypto_attr_type *algt;
struct crypto_instance *inst;
struct hash_alg_common *auth;
struct crypto_alg *auth_base;
struct crypto_alg *enc;
struct authenc_esn_instance_ctx *ctx;
const char *enc_name;
int err;
algt = crypto_get_attr_type(tb);
err = PTR_ERR(algt);
if (IS_ERR(algt))
return ERR_PTR(err);
if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask)
return ERR_PTR(-EINVAL);
auth = ahash_attr_alg(tb[1], CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(auth))
return ERR_CAST(auth);
auth_base = &auth->base;
enc_name = crypto_attr_alg_name(tb[2]);
err = PTR_ERR(enc_name);
if (IS_ERR(enc_name))
goto out_put_auth;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
err = -ENOMEM;
if (!inst)
goto out_put_auth;
ctx = crypto_instance_ctx(inst);
err = crypto_init_ahash_spawn(&ctx->auth, auth, inst);
if (err)
goto err_free_inst;
crypto_set_skcipher_spawn(&ctx->enc, inst);
err = crypto_grab_skcipher(&ctx->enc, enc_name, 0,
crypto_requires_sync(algt->type,
algt->mask));
if (err)
goto err_drop_auth;
enc = crypto_skcipher_spawn_alg(&ctx->enc);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_name, enc->cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"authencesn(%s,%s)", auth_base->cra_driver_name,
enc->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_drop_enc;
inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD;
inst->alg.cra_flags |= enc->cra_flags & CRYPTO_ALG_ASYNC;
inst->alg.cra_priority = enc->cra_priority *
10 + auth_base->cra_priority;
inst->alg.cra_blocksize = enc->cra_blocksize;
inst->alg.cra_alignmask = auth_base->cra_alignmask | enc->cra_alignmask;
inst->alg.cra_type = &crypto_aead_type;
inst->alg.cra_aead.ivsize = enc->cra_ablkcipher.ivsize;
inst->alg.cra_aead.maxauthsize = auth->digestsize;
inst->alg.cra_ctxsize = sizeof(struct crypto_authenc_esn_ctx);
inst->alg.cra_init = crypto_authenc_esn_init_tfm;
inst->alg.cra_exit = crypto_authenc_esn_exit_tfm;
inst->alg.cra_aead.setkey = crypto_authenc_esn_setkey;
inst->alg.cra_aead.encrypt = crypto_authenc_esn_encrypt;
inst->alg.cra_aead.decrypt = crypto_authenc_esn_decrypt;
inst->alg.cra_aead.givencrypt = crypto_authenc_esn_givencrypt;
out:
crypto_mod_put(auth_base);
return inst;
err_drop_enc:
crypto_drop_skcipher(&ctx->enc);
err_drop_auth:
crypto_drop_ahash(&ctx->auth);
err_free_inst:
kfree(inst);
out_put_auth:
inst = ERR_PTR(err);
goto out;
}
static void crypto_authenc_esn_free(struct crypto_instance *inst)
{
struct authenc_esn_instance_ctx *ctx = crypto_instance_ctx(inst);
crypto_drop_skcipher(&ctx->enc);
crypto_drop_ahash(&ctx->auth);
kfree(inst);
}
static struct crypto_template crypto_authenc_esn_tmpl = {
.name = "authencesn",
.alloc = crypto_authenc_esn_alloc,
.free = crypto_authenc_esn_free,
.module = THIS_MODULE,
};
static int __init crypto_authenc_esn_module_init(void)
{
return crypto_register_template(&crypto_authenc_esn_tmpl);
}
static void __exit crypto_authenc_esn_module_exit(void)
{
crypto_unregister_template(&crypto_authenc_esn_tmpl);
}
module_init(crypto_authenc_esn_module_init);
module_exit(crypto_authenc_esn_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
| gpl-2.0 |
jorik041/KTSGS5 | arch/cris/arch-v10/kernel/dma.c | 9032 | 8000 | /* Wrapper for DMA channel allocator that updates DMA client muxing.
* Copyright 2004-2007, Axis Communications AB
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/dma.h>
#include <arch/svinto.h>
#include <arch/system.h>
/* Macro to access ETRAX 100 registers */
#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
IO_STATE_(reg##_, field##_, _##val)
static char used_dma_channels[MAX_DMA_CHANNELS];
static const char * used_dma_channels_users[MAX_DMA_CHANNELS];
int cris_request_dma(unsigned int dmanr, const char * device_id,
unsigned options, enum dma_owner owner)
{
unsigned long flags;
unsigned long int gens;
int fail = -EINVAL;
if (dmanr >= MAX_DMA_CHANNELS) {
printk(KERN_CRIT "cris_request_dma: invalid DMA channel %u\n", dmanr);
return -EINVAL;
}
local_irq_save(flags);
if (used_dma_channels[dmanr]) {
local_irq_restore(flags);
if (options & DMA_VERBOSE_ON_ERROR) {
printk(KERN_CRIT "Failed to request DMA %i for %s, already allocated by %s\n", dmanr, device_id, used_dma_channels_users[dmanr]);
}
if (options & DMA_PANIC_ON_ERROR) {
panic("request_dma error!");
}
return -EBUSY;
}
gens = genconfig_shadow;
switch(owner)
{
case dma_eth:
if ((dmanr != NETWORK_TX_DMA_NBR) &&
(dmanr != NETWORK_RX_DMA_NBR)) {
printk(KERN_CRIT "Invalid DMA channel for eth\n");
goto bail;
}
break;
case dma_ser0:
if (dmanr == SER0_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma6, serial0);
} else if (dmanr == SER0_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma7, serial0);
} else {
printk(KERN_CRIT "Invalid DMA channel for ser0\n");
goto bail;
}
break;
case dma_ser1:
if (dmanr == SER1_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma8, serial1);
} else if (dmanr == SER1_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma9, serial1);
} else {
printk(KERN_CRIT "Invalid DMA channel for ser1\n");
goto bail;
}
break;
case dma_ser2:
if (dmanr == SER2_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma2, serial2);
} else if (dmanr == SER2_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma3, serial2);
} else {
printk(KERN_CRIT "Invalid DMA channel for ser2\n");
goto bail;
}
break;
case dma_ser3:
if (dmanr == SER3_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma4, serial3);
} else if (dmanr == SER3_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma5, serial3);
} else {
printk(KERN_CRIT "Invalid DMA channel for ser3\n");
goto bail;
}
break;
case dma_ata:
if (dmanr == ATA_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma2, ata);
} else if (dmanr == ATA_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma3, ata);
} else {
printk(KERN_CRIT "Invalid DMA channel for ata\n");
goto bail;
}
break;
case dma_ext0:
if (dmanr == EXTDMA0_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma4, extdma0);
} else if (dmanr == EXTDMA0_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma5, extdma0);
} else {
printk(KERN_CRIT "Invalid DMA channel for ext0\n");
goto bail;
}
break;
case dma_ext1:
if (dmanr == EXTDMA1_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma6, extdma1);
} else if (dmanr == EXTDMA1_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma7, extdma1);
} else {
printk(KERN_CRIT "Invalid DMA channel for ext1\n");
goto bail;
}
break;
case dma_int6:
if (dmanr == MEM2MEM_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma7, intdma6);
} else {
printk(KERN_CRIT "Invalid DMA channel for int6\n");
goto bail;
}
break;
case dma_int7:
if (dmanr == MEM2MEM_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma6, intdma7);
} else {
printk(KERN_CRIT "Invalid DMA channel for int7\n");
goto bail;
}
break;
case dma_usb:
if (dmanr == USB_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma8, usb);
} else if (dmanr == USB_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma9, usb);
} else {
printk(KERN_CRIT "Invalid DMA channel for usb\n");
goto bail;
}
break;
case dma_scsi0:
if (dmanr == SCSI0_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma2, scsi0);
} else if (dmanr == SCSI0_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma3, scsi0);
} else {
printk(KERN_CRIT "Invalid DMA channel for scsi0\n");
goto bail;
}
break;
case dma_scsi1:
if (dmanr == SCSI1_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma4, scsi1);
} else if (dmanr == SCSI1_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma5, scsi1);
} else {
printk(KERN_CRIT "Invalid DMA channel for scsi1\n");
goto bail;
}
break;
case dma_par0:
if (dmanr == PAR0_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma2, par0);
} else if (dmanr == PAR0_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma3, par0);
} else {
printk(KERN_CRIT "Invalid DMA channel for par0\n");
goto bail;
}
break;
case dma_par1:
if (dmanr == PAR1_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma4, par1);
} else if (dmanr == PAR1_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma5, par1);
} else {
printk(KERN_CRIT "Invalid DMA channel for par1\n");
goto bail;
}
break;
default:
printk(KERN_CRIT "Invalid DMA owner.\n");
goto bail;
}
used_dma_channels[dmanr] = 1;
used_dma_channels_users[dmanr] = device_id;
{
volatile int i;
genconfig_shadow = gens;
*R_GEN_CONFIG = genconfig_shadow;
/* Wait 12 cycles before doing any DMA command */
for(i = 6; i > 0; i--)
nop();
}
fail = 0;
bail:
local_irq_restore(flags);
return fail;
}
void cris_free_dma(unsigned int dmanr, const char * device_id)
{
unsigned long flags;
if (dmanr >= MAX_DMA_CHANNELS) {
printk(KERN_CRIT "cris_free_dma: invalid DMA channel %u\n", dmanr);
return;
}
local_irq_save(flags);
if (!used_dma_channels[dmanr]) {
printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated\n", dmanr);
} else if (device_id != used_dma_channels_users[dmanr]) {
printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated by device\n", dmanr);
} else {
switch(dmanr)
{
case 0:
*R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH0_CMD, cmd, *R_DMA_CH0_CMD) ==
IO_STATE_VALUE(R_DMA_CH0_CMD, cmd, reset));
break;
case 1:
*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH1_CMD, cmd, *R_DMA_CH1_CMD) ==
IO_STATE_VALUE(R_DMA_CH1_CMD, cmd, reset));
break;
case 2:
*R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH2_CMD, cmd, *R_DMA_CH2_CMD) ==
IO_STATE_VALUE(R_DMA_CH2_CMD, cmd, reset));
break;
case 3:
*R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH3_CMD, cmd, *R_DMA_CH3_CMD) ==
IO_STATE_VALUE(R_DMA_CH3_CMD, cmd, reset));
break;
case 4:
*R_DMA_CH4_CMD = IO_STATE(R_DMA_CH4_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH4_CMD, cmd, *R_DMA_CH4_CMD) ==
IO_STATE_VALUE(R_DMA_CH4_CMD, cmd, reset));
break;
case 5:
*R_DMA_CH5_CMD = IO_STATE(R_DMA_CH5_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH5_CMD, cmd, *R_DMA_CH5_CMD) ==
IO_STATE_VALUE(R_DMA_CH5_CMD, cmd, reset));
break;
case 6:
*R_DMA_CH6_CMD = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *R_DMA_CH6_CMD) ==
IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
break;
case 7:
*R_DMA_CH7_CMD = IO_STATE(R_DMA_CH7_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH7_CMD, cmd, *R_DMA_CH7_CMD) ==
IO_STATE_VALUE(R_DMA_CH7_CMD, cmd, reset));
break;
case 8:
*R_DMA_CH8_CMD = IO_STATE(R_DMA_CH8_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH8_CMD, cmd, *R_DMA_CH8_CMD) ==
IO_STATE_VALUE(R_DMA_CH8_CMD, cmd, reset));
break;
case 9:
*R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH9_CMD, cmd, *R_DMA_CH9_CMD) ==
IO_STATE_VALUE(R_DMA_CH9_CMD, cmd, reset));
break;
}
used_dma_channels[dmanr] = 0;
}
local_irq_restore(flags);
}
EXPORT_SYMBOL(cris_request_dma);
EXPORT_SYMBOL(cris_free_dma);
| gpl-2.0 |
patrykk/linux-udoo | arch/cris/arch-v10/kernel/io_interface_mux.c | 9032 | 25003 | /* IO interface mux allocator for ETRAX100LX.
* Copyright 2004-2007, Axis Communications AB
*/
/* C.f. ETRAX100LX Designer's Reference chapter 19.9 */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/init.h>
#include <arch/svinto.h>
#include <asm/io.h>
#include <arch/io_interface_mux.h>
#include <arch/system.h>
#define DBG(s)
/* Macro to access ETRAX 100 registers */
#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
IO_STATE_(reg##_, field##_, _##val)
enum io_if_group {
group_a = (1<<0),
group_b = (1<<1),
group_c = (1<<2),
group_d = (1<<3),
group_e = (1<<4),
group_f = (1<<5)
};
struct watcher
{
void (*notify)(const unsigned int gpio_in_available,
const unsigned int gpio_out_available,
const unsigned char pa_available,
const unsigned char pb_available);
struct watcher *next;
};
struct if_group
{
enum io_if_group group;
/* name - the name of the group 'A' to 'F' */
char *name;
/* used - a bit mask of all pins in the group in the order listed
* in the tables in 19.9.1 to 19.9.6. Note that no
* distinction is made between in, out and in/out pins. */
unsigned int used;
};
struct interface
{
enum cris_io_interface ioif;
/* name - the name of the interface */
char *name;
/* groups - OR'ed together io_if_group flags describing what pin groups
* the interface uses pins in. */
unsigned char groups;
/* used - set when the interface is allocated. */
unsigned char used;
char *owner;
/* group_a through group_f - bit masks describing what pins in the
* pin groups the interface uses. */
unsigned int group_a;
unsigned int group_b;
unsigned int group_c;
unsigned int group_d;
unsigned int group_e;
unsigned int group_f;
/* gpio_g_in, gpio_g_out, gpio_b - bit masks telling what pins in the
* GPIO ports the interface uses. This could be reconstucted using
* the group_X masks and a table of what pins the GPIO ports use,
* but that would be messy. */
unsigned int gpio_g_in;
unsigned int gpio_g_out;
unsigned char gpio_b;
};
static struct if_group if_groups[6] = {
{
.group = group_a,
.name = "A",
.used = 0,
},
{
.group = group_b,
.name = "B",
.used = 0,
},
{
.group = group_c,
.name = "C",
.used = 0,
},
{
.group = group_d,
.name = "D",
.used = 0,
},
{
.group = group_e,
.name = "E",
.used = 0,
},
{
.group = group_f,
.name = "F",
.used = 0,
}
};
/* The order in the array must match the order of enum
* cris_io_interface in io_interface_mux.h */
static struct interface interfaces[] = {
/* Begin Non-multiplexed interfaces */
{
.ioif = if_eth,
.name = "ethernet",
.groups = 0,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0,
.gpio_g_out = 0,
.gpio_b = 0
},
{
.ioif = if_serial_0,
.name = "serial_0",
.groups = 0,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0,
.gpio_g_out = 0,
.gpio_b = 0
},
/* End Non-multiplexed interfaces */
{
.ioif = if_serial_1,
.name = "serial_1",
.groups = group_e,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0x0f,
.group_f = 0,
.gpio_g_in = 0x00000000,
.gpio_g_out = 0x00000000,
.gpio_b = 0x00
},
{
.ioif = if_serial_2,
.name = "serial_2",
.groups = group_b,
.group_a = 0,
.group_b = 0x0f,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x000000c0,
.gpio_g_out = 0x000000c0,
.gpio_b = 0x00
},
{
.ioif = if_serial_3,
.name = "serial_3",
.groups = group_c,
.group_a = 0,
.group_b = 0,
.group_c = 0x0f,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0xc0000000,
.gpio_g_out = 0xc0000000,
.gpio_b = 0x00
},
{
.ioif = if_sync_serial_1,
.name = "sync_serial_1",
.groups = group_e | group_f,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0x0f,
.group_f = 0x10,
.gpio_g_in = 0x00000000,
.gpio_g_out = 0x00000000,
.gpio_b = 0x10
},
{
.ioif = if_sync_serial_3,
.name = "sync_serial_3",
.groups = group_c | group_f,
.group_a = 0,
.group_b = 0,
.group_c = 0x0f,
.group_d = 0,
.group_e = 0,
.group_f = 0x80,
.gpio_g_in = 0xc0000000,
.gpio_g_out = 0xc0000000,
.gpio_b = 0x80
},
{
.ioif = if_shared_ram,
.name = "shared_ram",
.groups = group_a,
.group_a = 0x7f8ff,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x0000ff3e,
.gpio_g_out = 0x0000ff38,
.gpio_b = 0x00
},
{
.ioif = if_shared_ram_w,
.name = "shared_ram_w",
.groups = group_a | group_d,
.group_a = 0x7f8ff,
.group_b = 0,
.group_c = 0,
.group_d = 0xff,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x00ffff3e,
.gpio_g_out = 0x00ffff38,
.gpio_b = 0x00
},
{
.ioif = if_par_0,
.name = "par_0",
.groups = group_a,
.group_a = 0x7fbff,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x0000ff3e,
.gpio_g_out = 0x0000ff3e,
.gpio_b = 0x00
},
{
.ioif = if_par_1,
.name = "par_1",
.groups = group_d,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0x7feff,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x3eff0000,
.gpio_g_out = 0x3eff0000,
.gpio_b = 0x00
},
{
.ioif = if_par_w,
.name = "par_w",
.groups = group_a | group_d,
.group_a = 0x7fbff,
.group_b = 0,
.group_c = 0,
.group_d = 0xff,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x00ffff3e,
.gpio_g_out = 0x00ffff3e,
.gpio_b = 0x00
},
{
.ioif = if_scsi8_0,
.name = "scsi8_0",
.groups = group_a | group_b | group_f,
.group_a = 0x7ffff,
.group_b = 0x0f,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0x10,
.gpio_g_in = 0x0000ffff,
.gpio_g_out = 0x0000ffff,
.gpio_b = 0x10
},
{
.ioif = if_scsi8_1,
.name = "scsi8_1",
.groups = group_c | group_d | group_f,
.group_a = 0,
.group_b = 0,
.group_c = 0x0f,
.group_d = 0x7ffff,
.group_e = 0,
.group_f = 0x80,
.gpio_g_in = 0xffff0000,
.gpio_g_out = 0xffff0000,
.gpio_b = 0x80
},
{
.ioif = if_scsi_w,
.name = "scsi_w",
.groups = group_a | group_b | group_d | group_f,
.group_a = 0x7ffff,
.group_b = 0x0f,
.group_c = 0,
.group_d = 0x601ff,
.group_e = 0,
.group_f = 0x90,
.gpio_g_in = 0x01ffffff,
.gpio_g_out = 0x07ffffff,
.gpio_b = 0x80
},
{
.ioif = if_ata,
.name = "ata",
.groups = group_a | group_b | group_c | group_d,
.group_a = 0x7ffff,
.group_b = 0x0f,
.group_c = 0x0f,
.group_d = 0x7cfff,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0xf9ffffff,
.gpio_g_out = 0xffffffff,
.gpio_b = 0x80
},
{
.ioif = if_csp,
.name = "csp",
.groups = group_f,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0xfc,
.gpio_g_in = 0x00000000,
.gpio_g_out = 0x00000000,
.gpio_b = 0xfc
},
{
.ioif = if_i2c,
.name = "i2c",
.groups = group_f,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0x03,
.gpio_g_in = 0x00000000,
.gpio_g_out = 0x00000000,
.gpio_b = 0x03
},
{
.ioif = if_usb_1,
.name = "usb_1",
.groups = group_e | group_f,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0x0f,
.group_f = 0x2c,
.gpio_g_in = 0x00000000,
.gpio_g_out = 0x00000000,
.gpio_b = 0x2c
},
{
.ioif = if_usb_2,
.name = "usb_2",
.groups = group_d,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0x33e00,
.group_f = 0,
.gpio_g_in = 0x3e000000,
.gpio_g_out = 0x0c000000,
.gpio_b = 0x00
},
/* GPIO pins */
{
.ioif = if_gpio_grp_a,
.name = "gpio_a",
.groups = group_a,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x0000ff3f,
.gpio_g_out = 0x0000ff3f,
.gpio_b = 0x00
},
{
.ioif = if_gpio_grp_b,
.name = "gpio_b",
.groups = group_b,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x000000c0,
.gpio_g_out = 0x000000c0,
.gpio_b = 0x00
},
{
.ioif = if_gpio_grp_c,
.name = "gpio_c",
.groups = group_c,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0xc0000000,
.gpio_g_out = 0xc0000000,
.gpio_b = 0x00
},
{
.ioif = if_gpio_grp_d,
.name = "gpio_d",
.groups = group_d,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x3fff0000,
.gpio_g_out = 0x3fff0000,
.gpio_b = 0x00
},
{
.ioif = if_gpio_grp_e,
.name = "gpio_e",
.groups = group_e,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x00000000,
.gpio_g_out = 0x00000000,
.gpio_b = 0x00
},
{
.ioif = if_gpio_grp_f,
.name = "gpio_f",
.groups = group_f,
.group_a = 0,
.group_b = 0,
.group_c = 0,
.group_d = 0,
.group_e = 0,
.group_f = 0,
.gpio_g_in = 0x00000000,
.gpio_g_out = 0x00000000,
.gpio_b = 0xff
}
/* Array end */
};
static struct watcher *watchers = NULL;
/* The pins that are free to use in the GPIO ports. */
static unsigned int gpio_in_pins = 0xffffffff;
static unsigned int gpio_out_pins = 0xffffffff;
static unsigned char gpio_pb_pins = 0xff;
static unsigned char gpio_pa_pins = 0xff;
/* Identifiers for the owners of the GPIO pins. */
static enum cris_io_interface gpio_pa_owners[8];
static enum cris_io_interface gpio_pb_owners[8];
static enum cris_io_interface gpio_pg_owners[32];
static int cris_io_interface_init(void);
static unsigned char clear_group_from_set(const unsigned char groups, struct if_group *group)
{
return (groups & ~group->group);
}
static struct if_group *get_group(const unsigned char groups)
{
int i;
for (i = 0; i < ARRAY_SIZE(if_groups); i++) {
if (groups & if_groups[i].group) {
return &if_groups[i];
}
}
return NULL;
}
static void notify_watchers(void)
{
struct watcher *w = watchers;
DBG(printk("io_interface_mux: notifying watchers\n"));
while (NULL != w) {
w->notify((const unsigned int)gpio_in_pins,
(const unsigned int)gpio_out_pins,
(const unsigned char)gpio_pa_pins,
(const unsigned char)gpio_pb_pins);
w = w->next;
}
}
int cris_request_io_interface(enum cris_io_interface ioif, const char *device_id)
{
int set_gen_config = 0;
int set_gen_config_ii = 0;
unsigned long int gens;
unsigned long int gens_ii;
struct if_group *grp;
unsigned char group_set;
unsigned long flags;
int res = 0;
(void)cris_io_interface_init();
DBG(printk("cris_request_io_interface(%d, \"%s\")\n", ioif, device_id));
if ((ioif >= if_max_interfaces) || (ioif < 0)) {
printk(KERN_CRIT "cris_request_io_interface: Bad interface "
"%u submitted for %s\n",
ioif,
device_id);
return -EINVAL;
}
local_irq_save(flags);
if (interfaces[ioif].used) {
printk(KERN_CRIT "cris_io_interface: Cannot allocate interface "
"%s for %s, in use by %s\n",
interfaces[ioif].name,
device_id,
interfaces[ioif].owner);
res = -EBUSY;
goto exit;
}
/* Check that all required pins in the used groups are free
* before allocating. */
group_set = interfaces[ioif].groups;
while (NULL != (grp = get_group(group_set))) {
unsigned int if_group_use = 0;
switch (grp->group) {
case group_a:
if_group_use = interfaces[ioif].group_a;
break;
case group_b:
if_group_use = interfaces[ioif].group_b;
break;
case group_c:
if_group_use = interfaces[ioif].group_c;
break;
case group_d:
if_group_use = interfaces[ioif].group_d;
break;
case group_e:
if_group_use = interfaces[ioif].group_e;
break;
case group_f:
if_group_use = interfaces[ioif].group_f;
break;
default:
BUG_ON(1);
}
if (if_group_use & grp->used) {
printk(KERN_INFO "cris_request_io_interface: group "
"%s needed by %s not available\n",
grp->name, interfaces[ioif].name);
res = -EBUSY;
goto exit;
}
group_set = clear_group_from_set(group_set, grp);
}
/* Are the required GPIO pins available too? */
if (((interfaces[ioif].gpio_g_in & gpio_in_pins) !=
interfaces[ioif].gpio_g_in) ||
((interfaces[ioif].gpio_g_out & gpio_out_pins) !=
interfaces[ioif].gpio_g_out) ||
((interfaces[ioif].gpio_b & gpio_pb_pins) !=
interfaces[ioif].gpio_b)) {
printk(KERN_CRIT "cris_request_io_interface: Could not get "
"required pins for interface %u\n", ioif);
res = -EBUSY;
goto exit;
}
/* Check which registers need to be reconfigured. */
gens = genconfig_shadow;
gens_ii = gen_config_ii_shadow;
set_gen_config = 1;
switch (ioif)
{
/* Begin Non-multiplexed interfaces */
case if_eth:
/* fall through */
case if_serial_0:
set_gen_config = 0;
break;
/* End Non-multiplexed interfaces */
case if_serial_1:
set_gen_config_ii = 1;
SETS(gens_ii, R_GEN_CONFIG_II, sermode1, async);
break;
case if_serial_2:
SETS(gens, R_GEN_CONFIG, ser2, select);
break;
case if_serial_3:
SETS(gens, R_GEN_CONFIG, ser3, select);
set_gen_config_ii = 1;
SETS(gens_ii, R_GEN_CONFIG_II, sermode3, async);
break;
case if_sync_serial_1:
set_gen_config_ii = 1;
SETS(gens_ii, R_GEN_CONFIG_II, sermode1, sync);
break;
case if_sync_serial_3:
SETS(gens, R_GEN_CONFIG, ser3, select);
set_gen_config_ii = 1;
SETS(gens_ii, R_GEN_CONFIG_II, sermode3, sync);
break;
case if_shared_ram:
SETS(gens, R_GEN_CONFIG, mio, select);
break;
case if_shared_ram_w:
SETS(gens, R_GEN_CONFIG, mio_w, select);
break;
case if_par_0:
SETS(gens, R_GEN_CONFIG, par0, select);
break;
case if_par_1:
SETS(gens, R_GEN_CONFIG, par1, select);
break;
case if_par_w:
SETS(gens, R_GEN_CONFIG, par0, select);
SETS(gens, R_GEN_CONFIG, par_w, select);
break;
case if_scsi8_0:
SETS(gens, R_GEN_CONFIG, scsi0, select);
break;
case if_scsi8_1:
SETS(gens, R_GEN_CONFIG, scsi1, select);
break;
case if_scsi_w:
SETS(gens, R_GEN_CONFIG, scsi0, select);
SETS(gens, R_GEN_CONFIG, scsi0w, select);
break;
case if_ata:
SETS(gens, R_GEN_CONFIG, ata, select);
break;
case if_csp:
/* fall through */
case if_i2c:
set_gen_config = 0;
break;
case if_usb_1:
SETS(gens, R_GEN_CONFIG, usb1, select);
break;
case if_usb_2:
SETS(gens, R_GEN_CONFIG, usb2, select);
break;
case if_gpio_grp_a:
/* GPIO groups are only accounted, don't do configuration changes. */
/* fall through */
case if_gpio_grp_b:
/* fall through */
case if_gpio_grp_c:
/* fall through */
case if_gpio_grp_d:
/* fall through */
case if_gpio_grp_e:
/* fall through */
case if_gpio_grp_f:
set_gen_config = 0;
break;
default:
printk(KERN_INFO "cris_request_io_interface: Bad interface "
"%u submitted for %s\n",
ioif, device_id);
res = -EBUSY;
goto exit;
}
/* All needed I/O pins and pin groups are free, allocate. */
group_set = interfaces[ioif].groups;
while (NULL != (grp = get_group(group_set))) {
unsigned int if_group_use = 0;
switch (grp->group) {
case group_a:
if_group_use = interfaces[ioif].group_a;
break;
case group_b:
if_group_use = interfaces[ioif].group_b;
break;
case group_c:
if_group_use = interfaces[ioif].group_c;
break;
case group_d:
if_group_use = interfaces[ioif].group_d;
break;
case group_e:
if_group_use = interfaces[ioif].group_e;
break;
case group_f:
if_group_use = interfaces[ioif].group_f;
break;
default:
BUG_ON(1);
}
grp->used |= if_group_use;
group_set = clear_group_from_set(group_set, grp);
}
interfaces[ioif].used = 1;
interfaces[ioif].owner = (char*)device_id;
if (set_gen_config) {
volatile int i;
genconfig_shadow = gens;
*R_GEN_CONFIG = genconfig_shadow;
/* Wait 12 cycles before doing any DMA command */
for(i = 6; i > 0; i--)
nop();
}
if (set_gen_config_ii) {
gen_config_ii_shadow = gens_ii;
*R_GEN_CONFIG_II = gen_config_ii_shadow;
}
DBG(printk(KERN_DEBUG "GPIO pins: available before: "
"g_in=0x%08x g_out=0x%08x pb=0x%02x\n",
gpio_in_pins, gpio_out_pins, gpio_pb_pins));
DBG(printk(KERN_DEBUG
"grabbing pins: g_in=0x%08x g_out=0x%08x pb=0x%02x\n",
interfaces[ioif].gpio_g_in,
interfaces[ioif].gpio_g_out,
interfaces[ioif].gpio_b));
gpio_in_pins &= ~interfaces[ioif].gpio_g_in;
gpio_out_pins &= ~interfaces[ioif].gpio_g_out;
gpio_pb_pins &= ~interfaces[ioif].gpio_b;
DBG(printk(KERN_DEBUG "GPIO pins: available after: "
"g_in=0x%08x g_out=0x%08x pb=0x%02x\n",
gpio_in_pins, gpio_out_pins, gpio_pb_pins));
exit:
local_irq_restore(flags);
if (res == 0)
notify_watchers();
return res;
}
void cris_free_io_interface(enum cris_io_interface ioif)
{
struct if_group *grp;
unsigned char group_set;
unsigned long flags;
(void)cris_io_interface_init();
if ((ioif >= if_max_interfaces) || (ioif < 0)) {
printk(KERN_CRIT "cris_free_io_interface: Bad interface %u\n",
ioif);
return;
}
local_irq_save(flags);
if (!interfaces[ioif].used) {
printk(KERN_CRIT "cris_free_io_interface: Freeing free interface %u\n",
ioif);
local_irq_restore(flags);
return;
}
group_set = interfaces[ioif].groups;
while (NULL != (grp = get_group(group_set))) {
unsigned int if_group_use = 0;
switch (grp->group) {
case group_a:
if_group_use = interfaces[ioif].group_a;
break;
case group_b:
if_group_use = interfaces[ioif].group_b;
break;
case group_c:
if_group_use = interfaces[ioif].group_c;
break;
case group_d:
if_group_use = interfaces[ioif].group_d;
break;
case group_e:
if_group_use = interfaces[ioif].group_e;
break;
case group_f:
if_group_use = interfaces[ioif].group_f;
break;
default:
BUG_ON(1);
}
if ((grp->used & if_group_use) != if_group_use)
BUG_ON(1);
grp->used = grp->used & ~if_group_use;
group_set = clear_group_from_set(group_set, grp);
}
interfaces[ioif].used = 0;
interfaces[ioif].owner = NULL;
DBG(printk("GPIO pins: available before: g_in=0x%08x g_out=0x%08x pb=0x%02x\n",
gpio_in_pins, gpio_out_pins, gpio_pb_pins));
DBG(printk("freeing pins: g_in=0x%08x g_out=0x%08x pb=0x%02x\n",
interfaces[ioif].gpio_g_in,
interfaces[ioif].gpio_g_out,
interfaces[ioif].gpio_b));
gpio_in_pins |= interfaces[ioif].gpio_g_in;
gpio_out_pins |= interfaces[ioif].gpio_g_out;
gpio_pb_pins |= interfaces[ioif].gpio_b;
DBG(printk("GPIO pins: available after: g_in=0x%08x g_out=0x%08x pb=0x%02x\n",
gpio_in_pins, gpio_out_pins, gpio_pb_pins));
local_irq_restore(flags);
notify_watchers();
}
/* Create a bitmask from bit 0 (inclusive) to bit stop_bit
(non-inclusive). stop_bit == 0 returns 0x0 */
static inline unsigned int create_mask(const unsigned stop_bit)
{
/* Avoid overflow */
if (stop_bit >= 32) {
return 0xffffffff;
}
return (1<<stop_bit)-1;
}
/* port can be 'a', 'b' or 'g' */
int cris_io_interface_allocate_pins(const enum cris_io_interface ioif,
const char port,
const unsigned start_bit,
const unsigned stop_bit)
{
unsigned int i;
unsigned int mask = 0;
unsigned int tmp_mask;
unsigned long int flags;
enum cris_io_interface *owners;
(void)cris_io_interface_init();
DBG(printk("cris_io_interface_allocate_pins: if=%d port=%c start=%u stop=%u\n",
ioif, port, start_bit, stop_bit));
if (!((start_bit <= stop_bit) &&
((((port == 'a') || (port == 'b')) && (stop_bit < 8)) ||
((port == 'g') && (stop_bit < 32))))) {
return -EINVAL;
}
mask = create_mask(stop_bit + 1);
tmp_mask = create_mask(start_bit);
mask &= ~tmp_mask;
DBG(printk("cris_io_interface_allocate_pins: port=%c start=%u stop=%u mask=0x%08x\n",
port, start_bit, stop_bit, mask));
local_irq_save(flags);
switch (port) {
case 'a':
if ((gpio_pa_pins & mask) != mask) {
local_irq_restore(flags);
return -EBUSY;
}
owners = gpio_pa_owners;
gpio_pa_pins &= ~mask;
break;
case 'b':
if ((gpio_pb_pins & mask) != mask) {
local_irq_restore(flags);
return -EBUSY;
}
owners = gpio_pb_owners;
gpio_pb_pins &= ~mask;
break;
case 'g':
if (((gpio_in_pins & mask) != mask) ||
((gpio_out_pins & mask) != mask)) {
local_irq_restore(flags);
return -EBUSY;
}
owners = gpio_pg_owners;
gpio_in_pins &= ~mask;
gpio_out_pins &= ~mask;
break;
default:
local_irq_restore(flags);
return -EINVAL;
}
for (i = start_bit; i <= stop_bit; i++) {
owners[i] = ioif;
}
local_irq_restore(flags);
notify_watchers();
return 0;
}
/* port can be 'a', 'b' or 'g' */
int cris_io_interface_free_pins(const enum cris_io_interface ioif,
const char port,
const unsigned start_bit,
const unsigned stop_bit)
{
unsigned int i;
unsigned int mask = 0;
unsigned int tmp_mask;
unsigned long int flags;
enum cris_io_interface *owners;
(void)cris_io_interface_init();
if (!((start_bit <= stop_bit) &&
((((port == 'a') || (port == 'b')) && (stop_bit < 8)) ||
((port == 'g') && (stop_bit < 32))))) {
return -EINVAL;
}
mask = create_mask(stop_bit + 1);
tmp_mask = create_mask(start_bit);
mask &= ~tmp_mask;
DBG(printk("cris_io_interface_free_pins: port=%c start=%u stop=%u mask=0x%08x\n",
port, start_bit, stop_bit, mask));
local_irq_save(flags);
switch (port) {
case 'a':
if ((~gpio_pa_pins & mask) != mask) {
local_irq_restore(flags);
printk(KERN_CRIT "cris_io_interface_free_pins: Freeing free pins");
}
owners = gpio_pa_owners;
break;
case 'b':
if ((~gpio_pb_pins & mask) != mask) {
local_irq_restore(flags);
printk(KERN_CRIT "cris_io_interface_free_pins: Freeing free pins");
}
owners = gpio_pb_owners;
break;
case 'g':
if (((~gpio_in_pins & mask) != mask) ||
((~gpio_out_pins & mask) != mask)) {
local_irq_restore(flags);
printk(KERN_CRIT "cris_io_interface_free_pins: Freeing free pins");
}
owners = gpio_pg_owners;
break;
default:
owners = NULL; /* Cannot happen. Shut up, gcc! */
}
for (i = start_bit; i <= stop_bit; i++) {
if (owners[i] != ioif) {
printk(KERN_CRIT "cris_io_interface_free_pins: Freeing unowned pins");
}
}
/* All was ok, change data. */
switch (port) {
case 'a':
gpio_pa_pins |= mask;
break;
case 'b':
gpio_pb_pins |= mask;
break;
case 'g':
gpio_in_pins |= mask;
gpio_out_pins |= mask;
break;
}
for (i = start_bit; i <= stop_bit; i++) {
owners[i] = if_unclaimed;
}
local_irq_restore(flags);
notify_watchers();
return 0;
}
int cris_io_interface_register_watcher(void (*notify)(const unsigned int gpio_in_available,
const unsigned int gpio_out_available,
const unsigned char pa_available,
const unsigned char pb_available))
{
struct watcher *w;
(void)cris_io_interface_init();
if (NULL == notify) {
return -EINVAL;
}
w = kmalloc(sizeof(*w), GFP_KERNEL);
if (!w) {
return -ENOMEM;
}
w->notify = notify;
w->next = watchers;
watchers = w;
w->notify((const unsigned int)gpio_in_pins,
(const unsigned int)gpio_out_pins,
(const unsigned char)gpio_pa_pins,
(const unsigned char)gpio_pb_pins);
return 0;
}
void cris_io_interface_delete_watcher(void (*notify)(const unsigned int gpio_in_available,
const unsigned int gpio_out_available,
const unsigned char pa_available,
const unsigned char pb_available))
{
struct watcher *w = watchers, *prev = NULL;
(void)cris_io_interface_init();
while ((NULL != w) && (w->notify != notify)){
prev = w;
w = w->next;
}
if (NULL != w) {
if (NULL != prev) {
prev->next = w->next;
} else {
watchers = w->next;
}
kfree(w);
return;
}
printk(KERN_WARNING "cris_io_interface_delete_watcher: Deleting unknown watcher 0x%p\n", notify);
}
static int cris_io_interface_init(void)
{
static int first = 1;
int i;
if (!first) {
return 0;
}
first = 0;
for (i = 0; i<8; i++) {
gpio_pa_owners[i] = if_unclaimed;
gpio_pb_owners[i] = if_unclaimed;
gpio_pg_owners[i] = if_unclaimed;
}
for (; i<32; i++) {
gpio_pg_owners[i] = if_unclaimed;
}
return 0;
}
module_init(cris_io_interface_init);
EXPORT_SYMBOL(cris_request_io_interface);
EXPORT_SYMBOL(cris_free_io_interface);
EXPORT_SYMBOL(cris_io_interface_allocate_pins);
EXPORT_SYMBOL(cris_io_interface_free_pins);
EXPORT_SYMBOL(cris_io_interface_register_watcher);
EXPORT_SYMBOL(cris_io_interface_delete_watcher);
| gpl-2.0 |
monishk10/moshi_cancro | arch/cris/arch-v10/kernel/dma.c | 9032 | 8000 | /* Wrapper for DMA channel allocator that updates DMA client muxing.
* Copyright 2004-2007, Axis Communications AB
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <asm/dma.h>
#include <arch/svinto.h>
#include <arch/system.h>
/* Macro to access ETRAX 100 registers */
#define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \
IO_STATE_(reg##_, field##_, _##val)
static char used_dma_channels[MAX_DMA_CHANNELS];
static const char * used_dma_channels_users[MAX_DMA_CHANNELS];
int cris_request_dma(unsigned int dmanr, const char * device_id,
unsigned options, enum dma_owner owner)
{
unsigned long flags;
unsigned long int gens;
int fail = -EINVAL;
if (dmanr >= MAX_DMA_CHANNELS) {
printk(KERN_CRIT "cris_request_dma: invalid DMA channel %u\n", dmanr);
return -EINVAL;
}
local_irq_save(flags);
if (used_dma_channels[dmanr]) {
local_irq_restore(flags);
if (options & DMA_VERBOSE_ON_ERROR) {
printk(KERN_CRIT "Failed to request DMA %i for %s, already allocated by %s\n", dmanr, device_id, used_dma_channels_users[dmanr]);
}
if (options & DMA_PANIC_ON_ERROR) {
panic("request_dma error!");
}
return -EBUSY;
}
gens = genconfig_shadow;
switch(owner)
{
case dma_eth:
if ((dmanr != NETWORK_TX_DMA_NBR) &&
(dmanr != NETWORK_RX_DMA_NBR)) {
printk(KERN_CRIT "Invalid DMA channel for eth\n");
goto bail;
}
break;
case dma_ser0:
if (dmanr == SER0_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma6, serial0);
} else if (dmanr == SER0_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma7, serial0);
} else {
printk(KERN_CRIT "Invalid DMA channel for ser0\n");
goto bail;
}
break;
case dma_ser1:
if (dmanr == SER1_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma8, serial1);
} else if (dmanr == SER1_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma9, serial1);
} else {
printk(KERN_CRIT "Invalid DMA channel for ser1\n");
goto bail;
}
break;
case dma_ser2:
if (dmanr == SER2_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma2, serial2);
} else if (dmanr == SER2_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma3, serial2);
} else {
printk(KERN_CRIT "Invalid DMA channel for ser2\n");
goto bail;
}
break;
case dma_ser3:
if (dmanr == SER3_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma4, serial3);
} else if (dmanr == SER3_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma5, serial3);
} else {
printk(KERN_CRIT "Invalid DMA channel for ser3\n");
goto bail;
}
break;
case dma_ata:
if (dmanr == ATA_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma2, ata);
} else if (dmanr == ATA_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma3, ata);
} else {
printk(KERN_CRIT "Invalid DMA channel for ata\n");
goto bail;
}
break;
case dma_ext0:
if (dmanr == EXTDMA0_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma4, extdma0);
} else if (dmanr == EXTDMA0_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma5, extdma0);
} else {
printk(KERN_CRIT "Invalid DMA channel for ext0\n");
goto bail;
}
break;
case dma_ext1:
if (dmanr == EXTDMA1_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma6, extdma1);
} else if (dmanr == EXTDMA1_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma7, extdma1);
} else {
printk(KERN_CRIT "Invalid DMA channel for ext1\n");
goto bail;
}
break;
case dma_int6:
if (dmanr == MEM2MEM_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma7, intdma6);
} else {
printk(KERN_CRIT "Invalid DMA channel for int6\n");
goto bail;
}
break;
case dma_int7:
if (dmanr == MEM2MEM_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma6, intdma7);
} else {
printk(KERN_CRIT "Invalid DMA channel for int7\n");
goto bail;
}
break;
case dma_usb:
if (dmanr == USB_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma8, usb);
} else if (dmanr == USB_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma9, usb);
} else {
printk(KERN_CRIT "Invalid DMA channel for usb\n");
goto bail;
}
break;
case dma_scsi0:
if (dmanr == SCSI0_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma2, scsi0);
} else if (dmanr == SCSI0_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma3, scsi0);
} else {
printk(KERN_CRIT "Invalid DMA channel for scsi0\n");
goto bail;
}
break;
case dma_scsi1:
if (dmanr == SCSI1_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma4, scsi1);
} else if (dmanr == SCSI1_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma5, scsi1);
} else {
printk(KERN_CRIT "Invalid DMA channel for scsi1\n");
goto bail;
}
break;
case dma_par0:
if (dmanr == PAR0_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma2, par0);
} else if (dmanr == PAR0_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma3, par0);
} else {
printk(KERN_CRIT "Invalid DMA channel for par0\n");
goto bail;
}
break;
case dma_par1:
if (dmanr == PAR1_TX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma4, par1);
} else if (dmanr == PAR1_RX_DMA_NBR) {
SETS(gens, R_GEN_CONFIG, dma5, par1);
} else {
printk(KERN_CRIT "Invalid DMA channel for par1\n");
goto bail;
}
break;
default:
printk(KERN_CRIT "Invalid DMA owner.\n");
goto bail;
}
used_dma_channels[dmanr] = 1;
used_dma_channels_users[dmanr] = device_id;
{
volatile int i;
genconfig_shadow = gens;
*R_GEN_CONFIG = genconfig_shadow;
/* Wait 12 cycles before doing any DMA command */
for(i = 6; i > 0; i--)
nop();
}
fail = 0;
bail:
local_irq_restore(flags);
return fail;
}
void cris_free_dma(unsigned int dmanr, const char * device_id)
{
unsigned long flags;
if (dmanr >= MAX_DMA_CHANNELS) {
printk(KERN_CRIT "cris_free_dma: invalid DMA channel %u\n", dmanr);
return;
}
local_irq_save(flags);
if (!used_dma_channels[dmanr]) {
printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated\n", dmanr);
} else if (device_id != used_dma_channels_users[dmanr]) {
printk(KERN_CRIT "cris_free_dma: DMA channel %u not allocated by device\n", dmanr);
} else {
switch(dmanr)
{
case 0:
*R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH0_CMD, cmd, *R_DMA_CH0_CMD) ==
IO_STATE_VALUE(R_DMA_CH0_CMD, cmd, reset));
break;
case 1:
*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH1_CMD, cmd, *R_DMA_CH1_CMD) ==
IO_STATE_VALUE(R_DMA_CH1_CMD, cmd, reset));
break;
case 2:
*R_DMA_CH2_CMD = IO_STATE(R_DMA_CH2_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH2_CMD, cmd, *R_DMA_CH2_CMD) ==
IO_STATE_VALUE(R_DMA_CH2_CMD, cmd, reset));
break;
case 3:
*R_DMA_CH3_CMD = IO_STATE(R_DMA_CH3_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH3_CMD, cmd, *R_DMA_CH3_CMD) ==
IO_STATE_VALUE(R_DMA_CH3_CMD, cmd, reset));
break;
case 4:
*R_DMA_CH4_CMD = IO_STATE(R_DMA_CH4_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH4_CMD, cmd, *R_DMA_CH4_CMD) ==
IO_STATE_VALUE(R_DMA_CH4_CMD, cmd, reset));
break;
case 5:
*R_DMA_CH5_CMD = IO_STATE(R_DMA_CH5_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH5_CMD, cmd, *R_DMA_CH5_CMD) ==
IO_STATE_VALUE(R_DMA_CH5_CMD, cmd, reset));
break;
case 6:
*R_DMA_CH6_CMD = IO_STATE(R_DMA_CH6_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH6_CMD, cmd, *R_DMA_CH6_CMD) ==
IO_STATE_VALUE(R_DMA_CH6_CMD, cmd, reset));
break;
case 7:
*R_DMA_CH7_CMD = IO_STATE(R_DMA_CH7_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH7_CMD, cmd, *R_DMA_CH7_CMD) ==
IO_STATE_VALUE(R_DMA_CH7_CMD, cmd, reset));
break;
case 8:
*R_DMA_CH8_CMD = IO_STATE(R_DMA_CH8_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH8_CMD, cmd, *R_DMA_CH8_CMD) ==
IO_STATE_VALUE(R_DMA_CH8_CMD, cmd, reset));
break;
case 9:
*R_DMA_CH9_CMD = IO_STATE(R_DMA_CH9_CMD, cmd, reset);
while (IO_EXTRACT(R_DMA_CH9_CMD, cmd, *R_DMA_CH9_CMD) ==
IO_STATE_VALUE(R_DMA_CH9_CMD, cmd, reset));
break;
}
used_dma_channels[dmanr] = 0;
}
local_irq_restore(flags);
}
EXPORT_SYMBOL(cris_request_dma);
EXPORT_SYMBOL(cris_free_dma);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.